vendor aws and openstack providers

For the remote state backends
This commit is contained in:
James Bardin 2017-06-12 10:30:22 -04:00
parent 808b504bcf
commit 5b58c79051
411 changed files with 107097 additions and 0 deletions

View File

@ -0,0 +1,373 @@
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.

View File

@ -0,0 +1,217 @@
package aws
import (
"errors"
"fmt"
"log"
"os"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
awsCredentials "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-cleanhttp"
)
func GetAccountInfo(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string) (string, string, error) {
// If we have creds from instance profile, we can use metadata API
if authProviderName == ec2rolecreds.ProviderName {
log.Println("[DEBUG] Trying to get account ID via AWS Metadata API")
cfg := &aws.Config{}
setOptionalEndpoint(cfg)
sess, err := session.NewSession(cfg)
if err != nil {
return "", "", errwrap.Wrapf("Error creating AWS session: {{err}}", err)
}
metadataClient := ec2metadata.New(sess)
info, err := metadataClient.IAMInfo()
if err != nil {
// This can be triggered when no IAM Role is assigned
// or AWS just happens to return invalid response
return "", "", fmt.Errorf("Failed getting EC2 IAM info: %s", err)
}
return parseAccountInfoFromArn(info.InstanceProfileArn)
}
// Then try IAM GetUser
log.Println("[DEBUG] Trying to get account ID via iam:GetUser")
outUser, err := iamconn.GetUser(nil)
if err == nil {
return parseAccountInfoFromArn(*outUser.User.Arn)
}
awsErr, ok := err.(awserr.Error)
// AccessDenied and ValidationError can be raised
// if credentials belong to federated profile, so we ignore these
if !ok || (awsErr.Code() != "AccessDenied" && awsErr.Code() != "ValidationError" && awsErr.Code() != "InvalidClientTokenId") {
return "", "", fmt.Errorf("Failed getting account ID via 'iam:GetUser': %s", err)
}
log.Printf("[DEBUG] Getting account ID via iam:GetUser failed: %s", err)
// Then try STS GetCallerIdentity
log.Println("[DEBUG] Trying to get account ID via sts:GetCallerIdentity")
outCallerIdentity, err := stsconn.GetCallerIdentity(&sts.GetCallerIdentityInput{})
if err == nil {
return parseAccountInfoFromArn(*outCallerIdentity.Arn)
}
log.Printf("[DEBUG] Getting account ID via sts:GetCallerIdentity failed: %s", err)
// Then try IAM ListRoles
log.Println("[DEBUG] Trying to get account ID via iam:ListRoles")
outRoles, err := iamconn.ListRoles(&iam.ListRolesInput{
MaxItems: aws.Int64(int64(1)),
})
if err != nil {
return "", "", fmt.Errorf("Failed getting account ID via 'iam:ListRoles': %s", err)
}
if len(outRoles.Roles) < 1 {
return "", "", errors.New("Failed getting account ID via 'iam:ListRoles': No roles available")
}
return parseAccountInfoFromArn(*outRoles.Roles[0].Arn)
}
func parseAccountInfoFromArn(arn string) (string, string, error) {
parts := strings.Split(arn, ":")
if len(parts) < 5 {
return "", "", fmt.Errorf("Unable to parse ID from invalid ARN: %q", arn)
}
return parts[1], parts[4], nil
}
// This function is responsible for reading credentials from the
// environment in the case that they're not explicitly specified
// in the Terraform configuration.
func GetCredentials(c *Config) (*awsCredentials.Credentials, error) {
// build a chain provider, lazy-evaulated by aws-sdk
providers := []awsCredentials.Provider{
&awsCredentials.StaticProvider{Value: awsCredentials.Value{
AccessKeyID: c.AccessKey,
SecretAccessKey: c.SecretKey,
SessionToken: c.Token,
}},
&awsCredentials.EnvProvider{},
&awsCredentials.SharedCredentialsProvider{
Filename: c.CredsFilename,
Profile: c.Profile,
},
}
// Build isolated HTTP client to avoid issues with globally-shared settings
client := cleanhttp.DefaultClient()
// Keep the timeout low as we don't want to wait in non-EC2 environments
client.Timeout = 100 * time.Millisecond
cfg := &aws.Config{
HTTPClient: client,
}
usedEndpoint := setOptionalEndpoint(cfg)
if !c.SkipMetadataApiCheck {
// Real AWS should reply to a simple metadata request.
// We check it actually does to ensure something else didn't just
// happen to be listening on the same IP:Port
metadataClient := ec2metadata.New(session.New(cfg))
if metadataClient.Available() {
providers = append(providers, &ec2rolecreds.EC2RoleProvider{
Client: metadataClient,
})
log.Print("[INFO] AWS EC2 instance detected via default metadata" +
" API endpoint, EC2RoleProvider added to the auth chain")
} else {
if usedEndpoint == "" {
usedEndpoint = "default location"
}
log.Printf("[INFO] Ignoring AWS metadata API endpoint at %s "+
"as it doesn't return any instance-id", usedEndpoint)
}
}
// This is the "normal" flow (i.e. not assuming a role)
if c.AssumeRoleARN == "" {
return awsCredentials.NewChainCredentials(providers), nil
}
// Otherwise we need to construct and STS client with the main credentials, and verify
// that we can assume the defined role.
log.Printf("[INFO] Attempting to AssumeRole %s (SessionName: %q, ExternalId: %q, Policy: %q)",
c.AssumeRoleARN, c.AssumeRoleSessionName, c.AssumeRoleExternalID, c.AssumeRolePolicy)
creds := awsCredentials.NewChainCredentials(providers)
cp, err := creds.Get()
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" {
return nil, errors.New(`No valid credential sources found for AWS Provider.
Please see https://terraform.io/docs/providers/aws/index.html for more information on
providing credentials for the AWS Provider`)
}
return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err)
}
log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName)
awsConfig := &aws.Config{
Credentials: creds,
Region: aws.String(c.Region),
MaxRetries: aws.Int(c.MaxRetries),
HTTPClient: cleanhttp.DefaultClient(),
S3ForcePathStyle: aws.Bool(c.S3ForcePathStyle),
}
stsclient := sts.New(session.New(awsConfig))
assumeRoleProvider := &stscreds.AssumeRoleProvider{
Client: stsclient,
RoleARN: c.AssumeRoleARN,
}
if c.AssumeRoleSessionName != "" {
assumeRoleProvider.RoleSessionName = c.AssumeRoleSessionName
}
if c.AssumeRoleExternalID != "" {
assumeRoleProvider.ExternalID = aws.String(c.AssumeRoleExternalID)
}
if c.AssumeRolePolicy != "" {
assumeRoleProvider.Policy = aws.String(c.AssumeRolePolicy)
}
providers = []awsCredentials.Provider{assumeRoleProvider}
assumeRoleCreds := awsCredentials.NewChainCredentials(providers)
_, err = assumeRoleCreds.Get()
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" {
return nil, fmt.Errorf("The role %q cannot be assumed.\n\n"+
" There are a number of possible causes of this - the most common are:\n"+
" * The credentials used in order to assume the role are invalid\n"+
" * The credentials do not have appropriate permission to assume the role\n"+
" * The role ARN is not valid",
c.AssumeRoleARN)
}
return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err)
}
return assumeRoleCreds, nil
}
func setOptionalEndpoint(cfg *aws.Config) string {
endpoint := os.Getenv("AWS_METADATA_URL")
if endpoint != "" {
log.Printf("[INFO] Setting custom metadata endpoint: %q", endpoint)
cfg.Endpoint = aws.String(endpoint)
return endpoint
}
return ""
}

View File

@ -0,0 +1,317 @@
package aws
import (
"bytes"
"fmt"
"log"
"regexp"
"strconv"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
)
// autoscalingTagSchema returns the schema to use for the tag element.
func autoscalingTagSchema() *schema.Schema {
return &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"key": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"value": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"propagate_at_launch": &schema.Schema{
Type: schema.TypeBool,
Required: true,
},
},
},
Set: autoscalingTagToHash,
}
}
func autoscalingTagToHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-", m["key"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["value"].(string)))
buf.WriteString(fmt.Sprintf("%t-", m["propagate_at_launch"].(bool)))
return hashcode.String(buf.String())
}
// setTags is a helper to set the tags for a resource. It expects the
// tags field to be named "tag"
func setAutoscalingTags(conn *autoscaling.AutoScaling, d *schema.ResourceData) error {
resourceID := d.Get("name").(string)
var createTags, removeTags []*autoscaling.Tag
if d.HasChange("tag") || d.HasChange("tags") {
oraw, nraw := d.GetChange("tag")
o := setToMapByKey(oraw.(*schema.Set), "key")
n := setToMapByKey(nraw.(*schema.Set), "key")
old, err := autoscalingTagsFromMap(o, resourceID)
if err != nil {
return err
}
new, err := autoscalingTagsFromMap(n, resourceID)
if err != nil {
return err
}
c, r, err := diffAutoscalingTags(old, new, resourceID)
if err != nil {
return err
}
createTags = append(createTags, c...)
removeTags = append(removeTags, r...)
oraw, nraw = d.GetChange("tags")
old, err = autoscalingTagsFromList(oraw.([]interface{}), resourceID)
if err != nil {
return err
}
new, err = autoscalingTagsFromList(nraw.([]interface{}), resourceID)
if err != nil {
return err
}
c, r, err = diffAutoscalingTags(old, new, resourceID)
if err != nil {
return err
}
createTags = append(createTags, c...)
removeTags = append(removeTags, r...)
}
// Set tags
if len(removeTags) > 0 {
log.Printf("[DEBUG] Removing autoscaling tags: %#v", removeTags)
remove := autoscaling.DeleteTagsInput{
Tags: removeTags,
}
if _, err := conn.DeleteTags(&remove); err != nil {
return err
}
}
if len(createTags) > 0 {
log.Printf("[DEBUG] Creating autoscaling tags: %#v", createTags)
create := autoscaling.CreateOrUpdateTagsInput{
Tags: createTags,
}
if _, err := conn.CreateOrUpdateTags(&create); err != nil {
return err
}
}
return nil
}
// diffTags takes our tags locally and the ones remotely and returns
// the set of tags that must be created, and the set of tags that must
// be destroyed.
func diffAutoscalingTags(oldTags, newTags []*autoscaling.Tag, resourceID string) ([]*autoscaling.Tag, []*autoscaling.Tag, error) {
// First, we're creating everything we have
create := make(map[string]interface{})
for _, t := range newTags {
tag := map[string]interface{}{
"key": *t.Key,
"value": *t.Value,
"propagate_at_launch": *t.PropagateAtLaunch,
}
create[*t.Key] = tag
}
// Build the list of what to remove
var remove []*autoscaling.Tag
for _, t := range oldTags {
old, ok := create[*t.Key].(map[string]interface{})
if !ok || old["value"] != *t.Value || old["propagate_at_launch"] != *t.PropagateAtLaunch {
// Delete it!
remove = append(remove, t)
}
}
createTags, err := autoscalingTagsFromMap(create, resourceID)
if err != nil {
return nil, nil, err
}
return createTags, remove, nil
}
func autoscalingTagsFromList(vs []interface{}, resourceID string) ([]*autoscaling.Tag, error) {
result := make([]*autoscaling.Tag, 0, len(vs))
for _, tag := range vs {
attr, ok := tag.(map[string]interface{})
if !ok {
continue
}
t, err := autoscalingTagFromMap(attr, resourceID)
if err != nil {
return nil, err
}
if t != nil {
result = append(result, t)
}
}
return result, nil
}
// tagsFromMap returns the tags for the given map of data.
func autoscalingTagsFromMap(m map[string]interface{}, resourceID string) ([]*autoscaling.Tag, error) {
result := make([]*autoscaling.Tag, 0, len(m))
for _, v := range m {
attr, ok := v.(map[string]interface{})
if !ok {
continue
}
t, err := autoscalingTagFromMap(attr, resourceID)
if err != nil {
return nil, err
}
if t != nil {
result = append(result, t)
}
}
return result, nil
}
func autoscalingTagFromMap(attr map[string]interface{}, resourceID string) (*autoscaling.Tag, error) {
if _, ok := attr["key"]; !ok {
return nil, fmt.Errorf("%s: invalid tag attributes: key missing", resourceID)
}
if _, ok := attr["value"]; !ok {
return nil, fmt.Errorf("%s: invalid tag attributes: value missing", resourceID)
}
if _, ok := attr["propagate_at_launch"]; !ok {
return nil, fmt.Errorf("%s: invalid tag attributes: propagate_at_launch missing", resourceID)
}
var propagateAtLaunch bool
var err error
if v, ok := attr["propagate_at_launch"].(bool); ok {
propagateAtLaunch = v
}
if v, ok := attr["propagate_at_launch"].(string); ok {
if propagateAtLaunch, err = strconv.ParseBool(v); err != nil {
return nil, fmt.Errorf(
"%s: invalid tag attribute: invalid value for propagate_at_launch: %s",
resourceID,
v,
)
}
}
t := &autoscaling.Tag{
Key: aws.String(attr["key"].(string)),
Value: aws.String(attr["value"].(string)),
PropagateAtLaunch: aws.Bool(propagateAtLaunch),
ResourceId: aws.String(resourceID),
ResourceType: aws.String("auto-scaling-group"),
}
if tagIgnoredAutoscaling(t) {
return nil, nil
}
return t, nil
}
// autoscalingTagsToMap turns the list of tags into a map.
func autoscalingTagsToMap(ts []*autoscaling.Tag) map[string]interface{} {
tags := make(map[string]interface{})
for _, t := range ts {
tag := map[string]interface{}{
"key": *t.Key,
"value": *t.Value,
"propagate_at_launch": *t.PropagateAtLaunch,
}
tags[*t.Key] = tag
}
return tags
}
// autoscalingTagDescriptionsToMap turns the list of tags into a map.
func autoscalingTagDescriptionsToMap(ts *[]*autoscaling.TagDescription) map[string]map[string]interface{} {
tags := make(map[string]map[string]interface{})
for _, t := range *ts {
tag := map[string]interface{}{
"key": *t.Key,
"value": *t.Value,
"propagate_at_launch": *t.PropagateAtLaunch,
}
tags[*t.Key] = tag
}
return tags
}
// autoscalingTagDescriptionsToSlice turns the list of tags into a slice.
func autoscalingTagDescriptionsToSlice(ts []*autoscaling.TagDescription) []map[string]interface{} {
tags := make([]map[string]interface{}, 0, len(ts))
for _, t := range ts {
tags = append(tags, map[string]interface{}{
"key": *t.Key,
"value": *t.Value,
"propagate_at_launch": *t.PropagateAtLaunch,
})
}
return tags
}
func setToMapByKey(s *schema.Set, key string) map[string]interface{} {
result := make(map[string]interface{})
for _, rawData := range s.List() {
data := rawData.(map[string]interface{})
result[data[key].(string)] = data
}
return result
}
// compare a tag against a list of strings and checks if it should
// be ignored or not
func tagIgnoredAutoscaling(t *autoscaling.Tag) bool {
filter := []string{"^aws:"}
for _, v := range filter {
log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key)
if r, _ := regexp.MatchString(v, *t.Key); r == true {
log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value)
return true
}
}
return false
}

View File

@ -0,0 +1,14 @@
package aws
import (
"strings"
"github.com/aws/aws-sdk-go/aws/awserr"
)
func isAWSErr(err error, code string, message string) bool {
if err, ok := err.(awserr.Error); ok {
return err.Code() == code && strings.Contains(err.Message(), message)
}
return false
}

View File

@ -0,0 +1,510 @@
package aws
import (
"crypto/tls"
"errors"
"fmt"
"log"
"net/http"
"os"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/acm"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/aws/aws-sdk-go/service/applicationautoscaling"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/aws/aws-sdk-go/service/cloudfront"
"github.com/aws/aws-sdk-go/service/cloudtrail"
"github.com/aws/aws-sdk-go/service/cloudwatch"
"github.com/aws/aws-sdk-go/service/cloudwatchevents"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"github.com/aws/aws-sdk-go/service/codebuild"
"github.com/aws/aws-sdk-go/service/codecommit"
"github.com/aws/aws-sdk-go/service/codedeploy"
"github.com/aws/aws-sdk-go/service/codepipeline"
"github.com/aws/aws-sdk-go/service/cognitoidentity"
"github.com/aws/aws-sdk-go/service/configservice"
"github.com/aws/aws-sdk-go/service/databasemigrationservice"
"github.com/aws/aws-sdk-go/service/devicefarm"
"github.com/aws/aws-sdk-go/service/directoryservice"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/ecr"
"github.com/aws/aws-sdk-go/service/ecs"
"github.com/aws/aws-sdk-go/service/efs"
"github.com/aws/aws-sdk-go/service/elasticache"
"github.com/aws/aws-sdk-go/service/elasticbeanstalk"
elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice"
"github.com/aws/aws-sdk-go/service/elastictranscoder"
"github.com/aws/aws-sdk-go/service/elb"
"github.com/aws/aws-sdk-go/service/elbv2"
"github.com/aws/aws-sdk-go/service/emr"
"github.com/aws/aws-sdk-go/service/firehose"
"github.com/aws/aws-sdk-go/service/glacier"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/inspector"
"github.com/aws/aws-sdk-go/service/kinesis"
"github.com/aws/aws-sdk-go/service/kms"
"github.com/aws/aws-sdk-go/service/lambda"
"github.com/aws/aws-sdk-go/service/lightsail"
"github.com/aws/aws-sdk-go/service/opsworks"
"github.com/aws/aws-sdk-go/service/rds"
"github.com/aws/aws-sdk-go/service/redshift"
"github.com/aws/aws-sdk-go/service/route53"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/ses"
"github.com/aws/aws-sdk-go/service/sfn"
"github.com/aws/aws-sdk-go/service/simpledb"
"github.com/aws/aws-sdk-go/service/sns"
"github.com/aws/aws-sdk-go/service/sqs"
"github.com/aws/aws-sdk-go/service/ssm"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/aws/aws-sdk-go/service/waf"
"github.com/aws/aws-sdk-go/service/wafregional"
"github.com/davecgh/go-spew/spew"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/terraform/helper/logging"
"github.com/hashicorp/terraform/terraform"
)
type Config struct {
AccessKey string
SecretKey string
CredsFilename string
Profile string
Token string
Region string
MaxRetries int
AssumeRoleARN string
AssumeRoleExternalID string
AssumeRoleSessionName string
AssumeRolePolicy string
AllowedAccountIds []interface{}
ForbiddenAccountIds []interface{}
CloudFormationEndpoint string
CloudWatchEndpoint string
CloudWatchEventsEndpoint string
CloudWatchLogsEndpoint string
DynamoDBEndpoint string
DeviceFarmEndpoint string
Ec2Endpoint string
ElbEndpoint string
IamEndpoint string
KinesisEndpoint string
KmsEndpoint string
RdsEndpoint string
S3Endpoint string
SnsEndpoint string
SqsEndpoint string
Insecure bool
SkipCredsValidation bool
SkipGetEC2Platforms bool
SkipRegionValidation bool
SkipRequestingAccountId bool
SkipMetadataApiCheck bool
S3ForcePathStyle bool
}
type AWSClient struct {
cfconn *cloudformation.CloudFormation
cloudfrontconn *cloudfront.CloudFront
cloudtrailconn *cloudtrail.CloudTrail
cloudwatchconn *cloudwatch.CloudWatch
cloudwatchlogsconn *cloudwatchlogs.CloudWatchLogs
cloudwatcheventsconn *cloudwatchevents.CloudWatchEvents
cognitoconn *cognitoidentity.CognitoIdentity
configconn *configservice.ConfigService
devicefarmconn *devicefarm.DeviceFarm
dmsconn *databasemigrationservice.DatabaseMigrationService
dsconn *directoryservice.DirectoryService
dynamodbconn *dynamodb.DynamoDB
ec2conn *ec2.EC2
ecrconn *ecr.ECR
ecsconn *ecs.ECS
efsconn *efs.EFS
elbconn *elb.ELB
elbv2conn *elbv2.ELBV2
emrconn *emr.EMR
esconn *elasticsearch.ElasticsearchService
acmconn *acm.ACM
apigateway *apigateway.APIGateway
appautoscalingconn *applicationautoscaling.ApplicationAutoScaling
autoscalingconn *autoscaling.AutoScaling
s3conn *s3.S3
sesConn *ses.SES
simpledbconn *simpledb.SimpleDB
sqsconn *sqs.SQS
snsconn *sns.SNS
stsconn *sts.STS
redshiftconn *redshift.Redshift
r53conn *route53.Route53
partition string
accountid string
supportedplatforms []string
region string
rdsconn *rds.RDS
iamconn *iam.IAM
kinesisconn *kinesis.Kinesis
kmsconn *kms.KMS
firehoseconn *firehose.Firehose
inspectorconn *inspector.Inspector
elasticacheconn *elasticache.ElastiCache
elasticbeanstalkconn *elasticbeanstalk.ElasticBeanstalk
elastictranscoderconn *elastictranscoder.ElasticTranscoder
lambdaconn *lambda.Lambda
lightsailconn *lightsail.Lightsail
opsworksconn *opsworks.OpsWorks
glacierconn *glacier.Glacier
codebuildconn *codebuild.CodeBuild
codedeployconn *codedeploy.CodeDeploy
codecommitconn *codecommit.CodeCommit
codepipelineconn *codepipeline.CodePipeline
sfnconn *sfn.SFN
ssmconn *ssm.SSM
wafconn *waf.WAF
wafregionalconn *wafregional.WAFRegional
}
func (c *AWSClient) S3() *s3.S3 {
return c.s3conn
}
func (c *AWSClient) DynamoDB() *dynamodb.DynamoDB {
return c.dynamodbconn
}
func (c *AWSClient) IsGovCloud() bool {
if c.region == "us-gov-west-1" {
return true
}
return false
}
func (c *AWSClient) IsChinaCloud() bool {
if c.region == "cn-north-1" {
return true
}
return false
}
// Client configures and returns a fully initialized AWSClient
func (c *Config) Client() (interface{}, error) {
// Get the auth and region. This can fail if keys/regions were not
// specified and we're attempting to use the environment.
if c.SkipRegionValidation {
log.Println("[INFO] Skipping region validation")
} else {
log.Println("[INFO] Building AWS region structure")
err := c.ValidateRegion()
if err != nil {
return nil, err
}
}
var client AWSClient
// store AWS region in client struct, for region specific operations such as
// bucket storage in S3
client.region = c.Region
log.Println("[INFO] Building AWS auth structure")
creds, err := GetCredentials(c)
if err != nil {
return nil, err
}
// Call Get to check for credential provider. If nothing found, we'll get an
// error, and we can present it nicely to the user
cp, err := creds.Get()
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" {
return nil, errors.New(`No valid credential sources found for AWS Provider.
Please see https://terraform.io/docs/providers/aws/index.html for more information on
providing credentials for the AWS Provider`)
}
return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err)
}
log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName)
awsConfig := &aws.Config{
Credentials: creds,
Region: aws.String(c.Region),
MaxRetries: aws.Int(c.MaxRetries),
HTTPClient: cleanhttp.DefaultClient(),
S3ForcePathStyle: aws.Bool(c.S3ForcePathStyle),
}
if logging.IsDebugOrHigher() {
awsConfig.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody)
awsConfig.Logger = awsLogger{}
}
if c.Insecure {
transport := awsConfig.HTTPClient.Transport.(*http.Transport)
transport.TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
// Set up base session
sess, err := session.NewSession(awsConfig)
if err != nil {
return nil, errwrap.Wrapf("Error creating AWS session: {{err}}", err)
}
sess.Handlers.Build.PushBackNamed(addTerraformVersionToUserAgent)
if extraDebug := os.Getenv("TERRAFORM_AWS_AUTHFAILURE_DEBUG"); extraDebug != "" {
sess.Handlers.UnmarshalError.PushFrontNamed(debugAuthFailure)
}
// This restriction should only be used for Route53 sessions.
// Other resources that have restrictions should allow the API to fail, rather
// than Terraform abstracting the region for the user. This can lead to breaking
// changes if that resource is ever opened up to more regions.
r53Sess := sess.Copy(&aws.Config{Region: aws.String("us-east-1")})
// Some services have user-configurable endpoints
awsCfSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudFormationEndpoint)})
awsCwSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudWatchEndpoint)})
awsCweSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudWatchEventsEndpoint)})
awsCwlSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudWatchLogsEndpoint)})
awsDynamoSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.DynamoDBEndpoint)})
awsEc2Sess := sess.Copy(&aws.Config{Endpoint: aws.String(c.Ec2Endpoint)})
awsElbSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.ElbEndpoint)})
awsIamSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.IamEndpoint)})
awsKinesisSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.KinesisEndpoint)})
awsKmsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.KmsEndpoint)})
awsRdsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.RdsEndpoint)})
awsS3Sess := sess.Copy(&aws.Config{Endpoint: aws.String(c.S3Endpoint)})
awsSnsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.SnsEndpoint)})
awsSqsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.SqsEndpoint)})
awsDeviceFarmSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.DeviceFarmEndpoint)})
log.Println("[INFO] Initializing DeviceFarm SDK connection")
client.devicefarmconn = devicefarm.New(awsDeviceFarmSess)
// These two services need to be set up early so we can check on AccountID
client.iamconn = iam.New(awsIamSess)
client.stsconn = sts.New(sess)
if !c.SkipCredsValidation {
err = c.ValidateCredentials(client.stsconn)
if err != nil {
return nil, err
}
}
if !c.SkipRequestingAccountId {
partition, accountId, err := GetAccountInfo(client.iamconn, client.stsconn, cp.ProviderName)
if err == nil {
client.partition = partition
client.accountid = accountId
}
}
authErr := c.ValidateAccountId(client.accountid)
if authErr != nil {
return nil, authErr
}
client.ec2conn = ec2.New(awsEc2Sess)
if !c.SkipGetEC2Platforms {
supportedPlatforms, err := GetSupportedEC2Platforms(client.ec2conn)
if err != nil {
// We intentionally fail *silently* because there's a chance
// user just doesn't have ec2:DescribeAccountAttributes permissions
log.Printf("[WARN] Unable to get supported EC2 platforms: %s", err)
} else {
client.supportedplatforms = supportedPlatforms
}
}
client.acmconn = acm.New(sess)
client.apigateway = apigateway.New(sess)
client.appautoscalingconn = applicationautoscaling.New(sess)
client.autoscalingconn = autoscaling.New(sess)
client.cfconn = cloudformation.New(awsCfSess)
client.cloudfrontconn = cloudfront.New(sess)
client.cloudtrailconn = cloudtrail.New(sess)
client.cloudwatchconn = cloudwatch.New(awsCwSess)
client.cloudwatcheventsconn = cloudwatchevents.New(awsCweSess)
client.cloudwatchlogsconn = cloudwatchlogs.New(awsCwlSess)
client.codecommitconn = codecommit.New(sess)
client.codebuildconn = codebuild.New(sess)
client.codedeployconn = codedeploy.New(sess)
client.configconn = configservice.New(sess)
client.cognitoconn = cognitoidentity.New(sess)
client.dmsconn = databasemigrationservice.New(sess)
client.codepipelineconn = codepipeline.New(sess)
client.dsconn = directoryservice.New(sess)
client.dynamodbconn = dynamodb.New(awsDynamoSess)
client.ecrconn = ecr.New(sess)
client.ecsconn = ecs.New(sess)
client.efsconn = efs.New(sess)
client.elasticacheconn = elasticache.New(sess)
client.elasticbeanstalkconn = elasticbeanstalk.New(sess)
client.elastictranscoderconn = elastictranscoder.New(sess)
client.elbconn = elb.New(awsElbSess)
client.elbv2conn = elbv2.New(awsElbSess)
client.emrconn = emr.New(sess)
client.esconn = elasticsearch.New(sess)
client.firehoseconn = firehose.New(sess)
client.inspectorconn = inspector.New(sess)
client.glacierconn = glacier.New(sess)
client.kinesisconn = kinesis.New(awsKinesisSess)
client.kmsconn = kms.New(awsKmsSess)
client.lambdaconn = lambda.New(sess)
client.lightsailconn = lightsail.New(sess)
client.opsworksconn = opsworks.New(sess)
client.r53conn = route53.New(r53Sess)
client.rdsconn = rds.New(awsRdsSess)
client.redshiftconn = redshift.New(sess)
client.simpledbconn = simpledb.New(sess)
client.s3conn = s3.New(awsS3Sess)
client.sesConn = ses.New(sess)
client.sfnconn = sfn.New(sess)
client.snsconn = sns.New(awsSnsSess)
client.sqsconn = sqs.New(awsSqsSess)
client.ssmconn = ssm.New(sess)
client.wafconn = waf.New(sess)
client.wafregionalconn = wafregional.New(sess)
return &client, nil
}
// ValidateRegion returns an error if the configured region is not a
// valid aws region and nil otherwise.
func (c *Config) ValidateRegion() error {
var regions = []string{
"ap-northeast-1",
"ap-northeast-2",
"ap-south-1",
"ap-southeast-1",
"ap-southeast-2",
"ca-central-1",
"cn-north-1",
"eu-central-1",
"eu-west-1",
"eu-west-2",
"sa-east-1",
"us-east-1",
"us-east-2",
"us-gov-west-1",
"us-west-1",
"us-west-2",
}
for _, valid := range regions {
if c.Region == valid {
return nil
}
}
return fmt.Errorf("Not a valid region: %s", c.Region)
}
// Validate credentials early and fail before we do any graph walking.
func (c *Config) ValidateCredentials(stsconn *sts.STS) error {
_, err := stsconn.GetCallerIdentity(&sts.GetCallerIdentityInput{})
return err
}
// ValidateAccountId returns a context-specific error if the configured account
// id is explicitly forbidden or not authorised; and nil if it is authorised.
func (c *Config) ValidateAccountId(accountId string) error {
if c.AllowedAccountIds == nil && c.ForbiddenAccountIds == nil {
return nil
}
log.Println("[INFO] Validating account ID")
if c.ForbiddenAccountIds != nil {
for _, id := range c.ForbiddenAccountIds {
if id == accountId {
return fmt.Errorf("Forbidden account ID (%s)", id)
}
}
}
if c.AllowedAccountIds != nil {
for _, id := range c.AllowedAccountIds {
if id == accountId {
return nil
}
}
return fmt.Errorf("Account ID not allowed (%s)", accountId)
}
return nil
}
func GetSupportedEC2Platforms(conn *ec2.EC2) ([]string, error) {
attrName := "supported-platforms"
input := ec2.DescribeAccountAttributesInput{
AttributeNames: []*string{aws.String(attrName)},
}
attributes, err := conn.DescribeAccountAttributes(&input)
if err != nil {
return nil, err
}
var platforms []string
for _, attr := range attributes.AccountAttributes {
if *attr.AttributeName == attrName {
for _, v := range attr.AttributeValues {
platforms = append(platforms, *v.AttributeValue)
}
break
}
}
if len(platforms) == 0 {
return nil, fmt.Errorf("No EC2 platforms detected")
}
return platforms, nil
}
// addTerraformVersionToUserAgent is a named handler that will add Terraform's
// version information to requests made by the AWS SDK.
var addTerraformVersionToUserAgent = request.NamedHandler{
Name: "terraform.TerraformVersionUserAgentHandler",
Fn: request.MakeAddToUserAgentHandler(
"APN/1.0 HashiCorp/1.0 Terraform", terraform.VersionString()),
}
var debugAuthFailure = request.NamedHandler{
Name: "terraform.AuthFailureAdditionalDebugHandler",
Fn: func(req *request.Request) {
if isAWSErr(req.Error, "AuthFailure", "AWS was not able to validate the provided access credentials") {
log.Printf("[INFO] Additional AuthFailure Debugging Context")
log.Printf("[INFO] Current system UTC time: %s", time.Now().UTC())
log.Printf("[INFO] Request object: %s", spew.Sdump(req))
}
},
}
type awsLogger struct{}
func (l awsLogger) Log(args ...interface{}) {
tokens := make([]string, 0, len(args))
for _, arg := range args {
if token, ok := arg.(string); ok {
tokens = append(tokens, token)
}
}
log.Printf("[DEBUG] [aws-sdk-go] %s", strings.Join(tokens, " "))
}

View File

@ -0,0 +1,103 @@
package aws
import (
"fmt"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/acm"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsAcmCertificate() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsAcmCertificateRead,
Schema: map[string]*schema.Schema{
"domain": {
Type: schema.TypeString,
Required: true,
},
"arn": {
Type: schema.TypeString,
Computed: true,
},
"statuses": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"types": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
}
}
func dataSourceAwsAcmCertificateRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).acmconn
params := &acm.ListCertificatesInput{}
target := d.Get("domain")
statuses, ok := d.GetOk("statuses")
if ok {
statusStrings := statuses.([]interface{})
params.CertificateStatuses = expandStringList(statusStrings)
} else {
params.CertificateStatuses = []*string{aws.String("ISSUED")}
}
var arns []string
err := conn.ListCertificatesPages(params, func(page *acm.ListCertificatesOutput, lastPage bool) bool {
for _, cert := range page.CertificateSummaryList {
if *cert.DomainName == target {
arns = append(arns, *cert.CertificateArn)
}
}
return true
})
if err != nil {
return errwrap.Wrapf("Error describing certificates: {{err}}", err)
}
// filter based on certificate type (imported or aws-issued)
types, ok := d.GetOk("types")
if ok {
typesStrings := expandStringList(types.([]interface{}))
var matchedArns []string
for _, arn := range arns {
params := &acm.DescribeCertificateInput{}
params.CertificateArn = &arn
description, err := conn.DescribeCertificate(params)
if err != nil {
return errwrap.Wrapf("Error describing certificates: {{err}}", err)
}
for _, certType := range typesStrings {
if *description.Certificate.Type == *certType {
matchedArns = append(matchedArns, arn)
break
}
}
}
arns = matchedArns
}
if len(arns) == 0 {
return fmt.Errorf("No certificate for domain %q found in this region.", target)
}
if len(arns) > 1 {
return fmt.Errorf("Multiple certificates for domain %q found in this region.", target)
}
d.SetId(time.Now().UTC().String())
d.Set("arn", arns[0])
return nil
}

View File

@ -0,0 +1,127 @@
package aws
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/elbv2"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsAlb() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsAlbRead,
Schema: map[string]*schema.Schema{
"arn": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"arn_suffix": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"internal": {
Type: schema.TypeBool,
Computed: true,
},
"security_groups": {
Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString},
Computed: true,
Set: schema.HashString,
},
"subnets": {
Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString},
Computed: true,
Set: schema.HashString,
},
"access_logs": {
Type: schema.TypeList,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"bucket": {
Type: schema.TypeString,
Computed: true,
},
"prefix": {
Type: schema.TypeString,
Computed: true,
},
"enabled": {
Type: schema.TypeBool,
Computed: true,
},
},
},
},
"enable_deletion_protection": {
Type: schema.TypeBool,
Computed: true,
},
"idle_timeout": {
Type: schema.TypeInt,
Computed: true,
},
"vpc_id": {
Type: schema.TypeString,
Computed: true,
},
"zone_id": {
Type: schema.TypeString,
Computed: true,
},
"dns_name": {
Type: schema.TypeString,
Computed: true,
},
"tags": tagsSchemaComputed(),
},
}
}
func dataSourceAwsAlbRead(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
albArn := d.Get("arn").(string)
albName := d.Get("name").(string)
describeAlbOpts := &elbv2.DescribeLoadBalancersInput{}
switch {
case albArn != "":
describeAlbOpts.LoadBalancerArns = []*string{aws.String(albArn)}
case albName != "":
describeAlbOpts.Names = []*string{aws.String(albName)}
}
describeResp, err := elbconn.DescribeLoadBalancers(describeAlbOpts)
if err != nil {
return errwrap.Wrapf("Error retrieving ALB: {{err}}", err)
}
if len(describeResp.LoadBalancers) != 1 {
return fmt.Errorf("Search returned %d results, please revise so only one is returned", len(describeResp.LoadBalancers))
}
d.SetId(*describeResp.LoadBalancers[0].LoadBalancerArn)
return flattenAwsAlbResource(d, meta, describeResp.LoadBalancers[0])
}

View File

@ -0,0 +1,62 @@
package aws
import "github.com/hashicorp/terraform/helper/schema"
func dataSourceAwsAlbListener() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsAlbListenerRead,
Schema: map[string]*schema.Schema{
"arn": {
Type: schema.TypeString,
Required: true,
},
"load_balancer_arn": {
Type: schema.TypeString,
Computed: true,
},
"port": {
Type: schema.TypeInt,
Computed: true,
},
"protocol": {
Type: schema.TypeString,
Computed: true,
},
"ssl_policy": {
Type: schema.TypeString,
Computed: true,
},
"certificate_arn": {
Type: schema.TypeString,
Computed: true,
},
"default_action": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"target_group_arn": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
},
}
}
func dataSourceAwsAlbListenerRead(d *schema.ResourceData, meta interface{}) error {
d.SetId(d.Get("arn").(string))
return resourceAwsAlbListenerRead(d, meta)
}

View File

@ -0,0 +1,423 @@
package aws
import (
"bytes"
"fmt"
"log"
"regexp"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsAmi() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsAmiRead,
Schema: map[string]*schema.Schema{
"filter": dataSourceFiltersSchema(),
"executable_users": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"name_regex": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validateNameRegex,
},
"most_recent": {
Type: schema.TypeBool,
Optional: true,
Default: false,
ForceNew: true,
},
"owners": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
// Computed values.
"architecture": {
Type: schema.TypeString,
Computed: true,
},
"creation_date": {
Type: schema.TypeString,
Computed: true,
},
"description": {
Type: schema.TypeString,
Computed: true,
},
"hypervisor": {
Type: schema.TypeString,
Computed: true,
},
"image_id": {
Type: schema.TypeString,
Computed: true,
},
"image_location": {
Type: schema.TypeString,
Computed: true,
},
"image_owner_alias": {
Type: schema.TypeString,
Computed: true,
},
"image_type": {
Type: schema.TypeString,
Computed: true,
},
"kernel_id": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"owner_id": {
Type: schema.TypeString,
Computed: true,
},
"platform": {
Type: schema.TypeString,
Computed: true,
},
"public": {
Type: schema.TypeBool,
Computed: true,
},
"ramdisk_id": {
Type: schema.TypeString,
Computed: true,
},
"root_device_name": {
Type: schema.TypeString,
Computed: true,
},
"root_device_type": {
Type: schema.TypeString,
Computed: true,
},
"sriov_net_support": {
Type: schema.TypeString,
Computed: true,
},
"state": {
Type: schema.TypeString,
Computed: true,
},
"virtualization_type": {
Type: schema.TypeString,
Computed: true,
},
// Complex computed values
"block_device_mappings": {
Type: schema.TypeSet,
Computed: true,
Set: amiBlockDeviceMappingHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"device_name": {
Type: schema.TypeString,
Computed: true,
},
"no_device": {
Type: schema.TypeString,
Computed: true,
},
"virtual_name": {
Type: schema.TypeString,
Computed: true,
},
"ebs": {
Type: schema.TypeMap,
Computed: true,
},
},
},
},
"product_codes": {
Type: schema.TypeSet,
Computed: true,
Set: amiProductCodesHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"product_code_id": {
Type: schema.TypeString,
Computed: true,
},
"product_code_type": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"state_reason": {
Type: schema.TypeMap,
Computed: true,
},
"tags": dataSourceTagsSchema(),
},
}
}
// dataSourceAwsAmiDescriptionRead performs the AMI lookup.
func dataSourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
executableUsers, executableUsersOk := d.GetOk("executable_users")
filters, filtersOk := d.GetOk("filter")
nameRegex, nameRegexOk := d.GetOk("name_regex")
owners, ownersOk := d.GetOk("owners")
if !executableUsersOk && !filtersOk && !nameRegexOk && !ownersOk {
return fmt.Errorf("One of executable_users, filters, name_regex, or owners must be assigned")
}
params := &ec2.DescribeImagesInput{}
if executableUsersOk {
params.ExecutableUsers = expandStringList(executableUsers.([]interface{}))
}
if filtersOk {
params.Filters = buildAwsDataSourceFilters(filters.(*schema.Set))
}
if ownersOk {
o := expandStringList(owners.([]interface{}))
if len(o) > 0 {
params.Owners = o
}
}
resp, err := conn.DescribeImages(params)
if err != nil {
return err
}
var filteredImages []*ec2.Image
if nameRegexOk {
r := regexp.MustCompile(nameRegex.(string))
for _, image := range resp.Images {
// Check for a very rare case where the response would include no
// image name. No name means nothing to attempt a match against,
// therefore we are skipping such image.
if image.Name == nil || *image.Name == "" {
log.Printf("[WARN] Unable to find AMI name to match against "+
"for image ID %q owned by %q, nothing to do.",
*image.ImageId, *image.OwnerId)
continue
}
if r.MatchString(*image.Name) {
filteredImages = append(filteredImages, image)
}
}
} else {
filteredImages = resp.Images[:]
}
var image *ec2.Image
if len(filteredImages) < 1 {
return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.")
}
if len(filteredImages) > 1 {
recent := d.Get("most_recent").(bool)
log.Printf("[DEBUG] aws_ami - multiple results found and `most_recent` is set to: %t", recent)
if recent {
image = mostRecentAmi(filteredImages)
} else {
return fmt.Errorf("Your query returned more than one result. Please try a more " +
"specific search criteria, or set `most_recent` attribute to true.")
}
} else {
// Query returned single result.
image = filteredImages[0]
}
log.Printf("[DEBUG] aws_ami - Single AMI found: %s", *image.ImageId)
return amiDescriptionAttributes(d, image)
}
// Returns the most recent AMI out of a slice of images.
func mostRecentAmi(images []*ec2.Image) *ec2.Image {
return sortImages(images)[0]
}
// populate the numerous fields that the image description returns.
func amiDescriptionAttributes(d *schema.ResourceData, image *ec2.Image) error {
// Simple attributes first
d.SetId(*image.ImageId)
d.Set("architecture", image.Architecture)
d.Set("creation_date", image.CreationDate)
if image.Description != nil {
d.Set("description", image.Description)
}
d.Set("hypervisor", image.Hypervisor)
d.Set("image_id", image.ImageId)
d.Set("image_location", image.ImageLocation)
if image.ImageOwnerAlias != nil {
d.Set("image_owner_alias", image.ImageOwnerAlias)
}
d.Set("image_type", image.ImageType)
if image.KernelId != nil {
d.Set("kernel_id", image.KernelId)
}
d.Set("name", image.Name)
d.Set("owner_id", image.OwnerId)
if image.Platform != nil {
d.Set("platform", image.Platform)
}
d.Set("public", image.Public)
if image.RamdiskId != nil {
d.Set("ramdisk_id", image.RamdiskId)
}
if image.RootDeviceName != nil {
d.Set("root_device_name", image.RootDeviceName)
}
d.Set("root_device_type", image.RootDeviceType)
if image.SriovNetSupport != nil {
d.Set("sriov_net_support", image.SriovNetSupport)
}
d.Set("state", image.State)
d.Set("virtualization_type", image.VirtualizationType)
// Complex types get their own functions
if err := d.Set("block_device_mappings", amiBlockDeviceMappings(image.BlockDeviceMappings)); err != nil {
return err
}
if err := d.Set("product_codes", amiProductCodes(image.ProductCodes)); err != nil {
return err
}
if err := d.Set("state_reason", amiStateReason(image.StateReason)); err != nil {
return err
}
if err := d.Set("tags", dataSourceTags(image.Tags)); err != nil {
return err
}
return nil
}
// Returns a set of block device mappings.
func amiBlockDeviceMappings(m []*ec2.BlockDeviceMapping) *schema.Set {
s := &schema.Set{
F: amiBlockDeviceMappingHash,
}
for _, v := range m {
mapping := map[string]interface{}{
"device_name": *v.DeviceName,
}
if v.Ebs != nil {
ebs := map[string]interface{}{
"delete_on_termination": fmt.Sprintf("%t", *v.Ebs.DeleteOnTermination),
"encrypted": fmt.Sprintf("%t", *v.Ebs.Encrypted),
"volume_size": fmt.Sprintf("%d", *v.Ebs.VolumeSize),
"volume_type": *v.Ebs.VolumeType,
}
// Iops is not always set
if v.Ebs.Iops != nil {
ebs["iops"] = fmt.Sprintf("%d", *v.Ebs.Iops)
} else {
ebs["iops"] = "0"
}
// snapshot id may not be set
if v.Ebs.SnapshotId != nil {
ebs["snapshot_id"] = *v.Ebs.SnapshotId
}
mapping["ebs"] = ebs
}
if v.VirtualName != nil {
mapping["virtual_name"] = *v.VirtualName
}
log.Printf("[DEBUG] aws_ami - adding block device mapping: %v", mapping)
s.Add(mapping)
}
return s
}
// Returns a set of product codes.
func amiProductCodes(m []*ec2.ProductCode) *schema.Set {
s := &schema.Set{
F: amiProductCodesHash,
}
for _, v := range m {
code := map[string]interface{}{
"product_code_id": *v.ProductCodeId,
"product_code_type": *v.ProductCodeType,
}
s.Add(code)
}
return s
}
// Returns the state reason.
func amiStateReason(m *ec2.StateReason) map[string]interface{} {
s := make(map[string]interface{})
if m != nil {
s["code"] = *m.Code
s["message"] = *m.Message
} else {
s["code"] = "UNSET"
s["message"] = "UNSET"
}
return s
}
// Generates a hash for the set hash function used by the block_device_mappings
// attribute.
func amiBlockDeviceMappingHash(v interface{}) int {
var buf bytes.Buffer
// All keys added in alphabetical order.
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string)))
if d, ok := m["ebs"]; ok {
if len(d.(map[string]interface{})) > 0 {
e := d.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-", e["delete_on_termination"].(string)))
buf.WriteString(fmt.Sprintf("%s-", e["encrypted"].(string)))
buf.WriteString(fmt.Sprintf("%s-", e["iops"].(string)))
buf.WriteString(fmt.Sprintf("%s-", e["volume_size"].(string)))
buf.WriteString(fmt.Sprintf("%s-", e["volume_type"].(string)))
}
}
if d, ok := m["no_device"]; ok {
buf.WriteString(fmt.Sprintf("%s-", d.(string)))
}
if d, ok := m["virtual_name"]; ok {
buf.WriteString(fmt.Sprintf("%s-", d.(string)))
}
if d, ok := m["snapshot_id"]; ok {
buf.WriteString(fmt.Sprintf("%s-", d.(string)))
}
return hashcode.String(buf.String())
}
// Generates a hash for the set hash function used by the product_codes
// attribute.
func amiProductCodesHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
// All keys added in alphabetical order.
buf.WriteString(fmt.Sprintf("%s-", m["product_code_id"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["product_code_type"].(string)))
return hashcode.String(buf.String())
}
func validateNameRegex(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if _, err := regexp.Compile(value); err != nil {
errors = append(errors, fmt.Errorf(
"%q contains an invalid regular expression: %s",
k, err))
}
return
}

View File

@ -0,0 +1,111 @@
package aws
import (
"fmt"
"log"
"regexp"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsAmiIds() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsAmiIdsRead,
Schema: map[string]*schema.Schema{
"filter": dataSourceFiltersSchema(),
"executable_users": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"name_regex": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validateNameRegex,
},
"owners": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"tags": dataSourceTagsSchema(),
"ids": &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
}
}
func dataSourceAwsAmiIdsRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
executableUsers, executableUsersOk := d.GetOk("executable_users")
filters, filtersOk := d.GetOk("filter")
nameRegex, nameRegexOk := d.GetOk("name_regex")
owners, ownersOk := d.GetOk("owners")
if executableUsersOk == false && filtersOk == false && nameRegexOk == false && ownersOk == false {
return fmt.Errorf("One of executable_users, filters, name_regex, or owners must be assigned")
}
params := &ec2.DescribeImagesInput{}
if executableUsersOk {
params.ExecutableUsers = expandStringList(executableUsers.([]interface{}))
}
if filtersOk {
params.Filters = buildAwsDataSourceFilters(filters.(*schema.Set))
}
if ownersOk {
o := expandStringList(owners.([]interface{}))
if len(o) > 0 {
params.Owners = o
}
}
resp, err := conn.DescribeImages(params)
if err != nil {
return err
}
var filteredImages []*ec2.Image
imageIds := make([]string, 0)
if nameRegexOk {
r := regexp.MustCompile(nameRegex.(string))
for _, image := range resp.Images {
// Check for a very rare case where the response would include no
// image name. No name means nothing to attempt a match against,
// therefore we are skipping such image.
if image.Name == nil || *image.Name == "" {
log.Printf("[WARN] Unable to find AMI name to match against "+
"for image ID %q owned by %q, nothing to do.",
*image.ImageId, *image.OwnerId)
continue
}
if r.MatchString(*image.Name) {
filteredImages = append(filteredImages, image)
}
}
} else {
filteredImages = resp.Images[:]
}
for _, image := range sortImages(filteredImages) {
imageIds = append(imageIds, *image.ImageId)
}
d.SetId(fmt.Sprintf("%d", hashcode.String(params.String())))
d.Set("ids", imageIds)
return nil
}

View File

@ -0,0 +1,102 @@
package aws
import (
"fmt"
"log"
"sort"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsAutoscalingGroups() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsAutoscalingGroupsRead,
Schema: map[string]*schema.Schema{
"names": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"filter": {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"values": &schema.Schema{
Type: schema.TypeSet,
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
},
},
},
},
}
}
func dataSourceAwsAutoscalingGroupsRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).autoscalingconn
log.Printf("[DEBUG] Reading Autoscaling Groups.")
d.SetId(time.Now().UTC().String())
var raw []string
tf := d.Get("filter").(*schema.Set)
if tf.Len() > 0 {
out, err := conn.DescribeTags(&autoscaling.DescribeTagsInput{
Filters: expandAsgTagFilters(tf.List()),
})
if err != nil {
return err
}
raw = make([]string, len(out.Tags))
for i, v := range out.Tags {
raw[i] = *v.ResourceId
}
} else {
resp, err := conn.DescribeAutoScalingGroups(&autoscaling.DescribeAutoScalingGroupsInput{})
if err != nil {
return fmt.Errorf("Error fetching Autoscaling Groups: %s", err)
}
raw = make([]string, len(resp.AutoScalingGroups))
for i, v := range resp.AutoScalingGroups {
raw[i] = *v.AutoScalingGroupName
}
}
sort.Strings(raw)
if err := d.Set("names", raw); err != nil {
return fmt.Errorf("[WARN] Error setting Autoscaling Group Names: %s", err)
}
return nil
}
func expandAsgTagFilters(in []interface{}) []*autoscaling.Filter {
out := make([]*autoscaling.Filter, len(in), len(in))
for i, filter := range in {
m := filter.(map[string]interface{})
values := expandStringList(m["values"].(*schema.Set).List())
out[i] = &autoscaling.Filter{
Name: aws.String(m["name"].(string)),
Values: values,
}
}
return out
}

View File

@ -0,0 +1,89 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsAvailabilityZone() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsAvailabilityZoneRead,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"region": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"name_suffix": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"state": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
},
}
}
func dataSourceAwsAvailabilityZoneRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
req := &ec2.DescribeAvailabilityZonesInput{}
if name := d.Get("name"); name != "" {
req.ZoneNames = []*string{aws.String(name.(string))}
}
req.Filters = buildEC2AttributeFilterList(
map[string]string{
"state": d.Get("state").(string),
},
)
if len(req.Filters) == 0 {
// Don't send an empty filters list; the EC2 API won't accept it.
req.Filters = nil
}
log.Printf("[DEBUG] DescribeAvailabilityZones %s\n", req)
resp, err := conn.DescribeAvailabilityZones(req)
if err != nil {
return err
}
if resp == nil || len(resp.AvailabilityZones) == 0 {
return fmt.Errorf("no matching AZ found")
}
if len(resp.AvailabilityZones) > 1 {
return fmt.Errorf("multiple AZs matched; use additional constraints to reduce matches to a single AZ")
}
az := resp.AvailabilityZones[0]
// As a convenience when working with AZs generically, we expose
// the AZ suffix alone, without the region name.
// This can be used e.g. to create lookup tables by AZ letter that
// work regardless of region.
nameSuffix := (*az.ZoneName)[len(*az.RegionName):]
d.SetId(*az.ZoneName)
d.Set("id", az.ZoneName)
d.Set("name", az.ZoneName)
d.Set("name_suffix", nameSuffix)
d.Set("region", az.RegionName)
d.Set("state", az.State)
return nil
}

View File

@ -0,0 +1,87 @@
package aws
import (
"fmt"
"log"
"sort"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsAvailabilityZones() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsAvailabilityZonesRead,
Schema: map[string]*schema.Schema{
"names": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"state": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateStateType,
},
},
}
}
func dataSourceAwsAvailabilityZonesRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
log.Printf("[DEBUG] Reading Availability Zones.")
d.SetId(time.Now().UTC().String())
request := &ec2.DescribeAvailabilityZonesInput{}
if v, ok := d.GetOk("state"); ok {
request.Filters = []*ec2.Filter{
{
Name: aws.String("state"),
Values: []*string{aws.String(v.(string))},
},
}
}
log.Printf("[DEBUG] Availability Zones request options: %#v", *request)
resp, err := conn.DescribeAvailabilityZones(request)
if err != nil {
return fmt.Errorf("Error fetching Availability Zones: %s", err)
}
raw := make([]string, len(resp.AvailabilityZones))
for i, v := range resp.AvailabilityZones {
raw[i] = *v.ZoneName
}
sort.Strings(raw)
if err := d.Set("names", raw); err != nil {
return fmt.Errorf("[WARN] Error setting Availability Zones: %s", err)
}
return nil
}
func validateStateType(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
validState := map[string]bool{
"available": true,
"information": true,
"impaired": true,
"unavailable": true,
}
if !validState[value] {
errors = append(errors, fmt.Errorf(
"%q contains an invalid Availability Zone state %q. Valid states are: %q, %q, %q and %q.",
k, value, "available", "information", "impaired", "unavailable"))
}
return
}

View File

@ -0,0 +1,31 @@
package aws
import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
)
// See http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/billing-getting-started.html#step-2
var billingAccountId = "386209384616"
func dataSourceAwsBillingServiceAccount() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsBillingServiceAccountRead,
Schema: map[string]*schema.Schema{
"arn": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceAwsBillingServiceAccountRead(d *schema.ResourceData, meta interface{}) error {
d.SetId(billingAccountId)
d.Set("arn", fmt.Sprintf("arn:%s:iam::%s:root", meta.(*AWSClient).partition, billingAccountId))
return nil
}

View File

@ -0,0 +1,50 @@
package aws
import (
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsCallerIdentity() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsCallerIdentityRead,
Schema: map[string]*schema.Schema{
"account_id": {
Type: schema.TypeString,
Computed: true,
},
"arn": {
Type: schema.TypeString,
Computed: true,
},
"user_id": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceAwsCallerIdentityRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AWSClient).stsconn
res, err := client.GetCallerIdentity(&sts.GetCallerIdentityInput{})
if err != nil {
return fmt.Errorf("Error getting Caller Identity: %v", err)
}
log.Printf("[DEBUG] Received Caller Identity: %s", res)
d.SetId(time.Now().UTC().String())
d.Set("account_id", res.Account)
d.Set("arn", res.Arn)
d.Set("user_id", res.UserId)
return nil
}

View File

@ -0,0 +1,48 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsCanonicalUserId() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsCanonicalUserIdRead,
Schema: map[string]*schema.Schema{
"id": {
Type: schema.TypeString,
Computed: true,
},
"display_name": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceAwsCanonicalUserIdRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).s3conn
log.Printf("[DEBUG] Listing S3 buckets.")
req := &s3.ListBucketsInput{}
resp, err := conn.ListBuckets(req)
if err != nil {
return err
}
if resp == nil || resp.Owner == nil {
return fmt.Errorf("no canonical user ID found")
}
d.SetId(aws.StringValue(resp.Owner.ID))
d.Set("id", resp.Owner.ID)
d.Set("display_name", resp.Owner.DisplayName)
return nil
}

View File

@ -0,0 +1,122 @@
package aws
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsCloudFormationStack() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsCloudFormationStackRead,
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
},
"template_body": {
Type: schema.TypeString,
Computed: true,
StateFunc: func(v interface{}) string {
template, _ := normalizeCloudFormationTemplate(v)
return template
},
},
"capabilities": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"description": {
Type: schema.TypeString,
Computed: true,
},
"disable_rollback": {
Type: schema.TypeBool,
Computed: true,
},
"notification_arns": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"parameters": {
Type: schema.TypeMap,
Computed: true,
},
"outputs": {
Type: schema.TypeMap,
Computed: true,
},
"timeout_in_minutes": {
Type: schema.TypeInt,
Computed: true,
},
"iam_role_arn": {
Type: schema.TypeString,
Computed: true,
},
"tags": {
Type: schema.TypeMap,
Computed: true,
},
},
}
}
func dataSourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).cfconn
name := d.Get("name").(string)
input := cloudformation.DescribeStacksInput{
StackName: aws.String(name),
}
out, err := conn.DescribeStacks(&input)
if err != nil {
return fmt.Errorf("Failed describing CloudFormation stack (%s): %s", name, err)
}
if l := len(out.Stacks); l != 1 {
return fmt.Errorf("Expected 1 CloudFormation stack (%s), found %d", name, l)
}
stack := out.Stacks[0]
d.SetId(*stack.StackId)
d.Set("description", stack.Description)
d.Set("disable_rollback", stack.DisableRollback)
d.Set("timeout_in_minutes", stack.TimeoutInMinutes)
d.Set("iam_role_arn", stack.RoleARN)
if len(stack.NotificationARNs) > 0 {
d.Set("notification_arns", schema.NewSet(schema.HashString, flattenStringList(stack.NotificationARNs)))
}
d.Set("parameters", flattenAllCloudFormationParameters(stack.Parameters))
d.Set("tags", flattenCloudFormationTags(stack.Tags))
d.Set("outputs", flattenCloudFormationOutputs(stack.Outputs))
if len(stack.Capabilities) > 0 {
d.Set("capabilities", schema.NewSet(schema.HashString, flattenStringList(stack.Capabilities)))
}
tInput := cloudformation.GetTemplateInput{
StackName: aws.String(name),
}
tOut, err := conn.GetTemplate(&tInput)
if err != nil {
return err
}
template, err := normalizeCloudFormationTemplate(*tOut.TemplateBody)
if err != nil {
return errwrap.Wrapf("template body contains an invalid JSON or YAML: {{err}}", err)
}
d.Set("template_body", template)
return nil
}

View File

@ -0,0 +1,91 @@
package aws
import (
"bytes"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceTagsHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-", m["key"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["value"].(string)))
return hashcode.String(buf.String())
}
func dataSourceTags(m []*ec2.Tag) *schema.Set {
s := &schema.Set{
F: dataSourceTagsHash,
}
for _, v := range m {
tag := map[string]interface{}{
"key": *v.Key,
"value": *v.Value,
}
s.Add(tag)
}
return s
}
func buildAwsDataSourceFilters(set *schema.Set) []*ec2.Filter {
var filters []*ec2.Filter
for _, v := range set.List() {
m := v.(map[string]interface{})
var filterValues []*string
for _, e := range m["values"].([]interface{}) {
filterValues = append(filterValues, aws.String(e.(string)))
}
filters = append(filters, &ec2.Filter{
Name: aws.String(m["name"].(string)),
Values: filterValues,
})
}
return filters
}
func dataSourceFiltersSchema() *schema.Schema {
return &schema.Schema{
Type: schema.TypeSet,
Optional: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
},
"values": {
Type: schema.TypeList,
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
},
}
}
func dataSourceTagsSchema() *schema.Schema {
return &schema.Schema{
Type: schema.TypeSet,
Computed: true,
Set: dataSourceTagsHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"key": {
Type: schema.TypeString,
Computed: true,
},
"value": {
Type: schema.TypeString,
Computed: true,
},
},
},
}
}

View File

@ -0,0 +1,290 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/rds"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsDbInstance() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsDbInstanceRead,
Schema: map[string]*schema.Schema{
"db_instance_identifier": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"address": {
Type: schema.TypeString,
Computed: true,
},
"allocated_storage": {
Type: schema.TypeInt,
Computed: true,
},
"auto_minor_version_upgrade": {
Type: schema.TypeBool,
Computed: true,
},
"availability_zone": {
Type: schema.TypeString,
Computed: true,
},
"backup_retention_period": {
Type: schema.TypeInt,
Computed: true,
},
"db_cluster_identifier": {
Type: schema.TypeString,
Computed: true,
},
"db_instance_arn": {
Type: schema.TypeString,
Computed: true,
},
"db_instance_class": {
Type: schema.TypeString,
Computed: true,
},
"db_name": {
Type: schema.TypeString,
Computed: true,
},
"db_parameter_groups": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"db_security_groups": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"db_subnet_group": {
Type: schema.TypeString,
Computed: true,
},
"db_instance_port": {
Type: schema.TypeInt,
Computed: true,
},
"endpoint": {
Type: schema.TypeString,
Computed: true,
},
"engine": {
Type: schema.TypeString,
Computed: true,
},
"engine_version": {
Type: schema.TypeString,
Computed: true,
},
"hosted_zone_id": {
Type: schema.TypeString,
Computed: true,
},
"iops": {
Type: schema.TypeInt,
Computed: true,
},
"kms_key_id": {
Type: schema.TypeString,
Computed: true,
},
"license_model": {
Type: schema.TypeString,
Computed: true,
},
"master_username": {
Type: schema.TypeString,
Computed: true,
},
"monitoring_interval": {
Type: schema.TypeInt,
Computed: true,
},
"monitoring_role_arn": {
Type: schema.TypeString,
Computed: true,
},
"multi_az": {
Type: schema.TypeBool,
Computed: true,
},
"option_group_memberships": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"port": {
Type: schema.TypeInt,
Computed: true,
},
"preferred_backup_window": {
Type: schema.TypeString,
Computed: true,
},
"preferred_maintenance_window": {
Type: schema.TypeString,
Computed: true,
},
"publicly_accessible": {
Type: schema.TypeBool,
Computed: true,
},
"storage_encrypted": {
Type: schema.TypeBool,
Computed: true,
},
"storage_type": {
Type: schema.TypeString,
Computed: true,
},
"timezone": {
Type: schema.TypeString,
Computed: true,
},
"vpc_security_groups": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"replicate_source_db": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
opts := rds.DescribeDBInstancesInput{
DBInstanceIdentifier: aws.String(d.Get("db_instance_identifier").(string)),
}
log.Printf("[DEBUG] DB Instance describe configuration: %#v", opts)
resp, err := conn.DescribeDBInstances(&opts)
if err != nil {
return err
}
if len(resp.DBInstances) < 1 {
return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.")
}
if len(resp.DBInstances) > 1 {
return fmt.Errorf("Your query returned more than one result. Please try a more specific search criteria.")
}
dbInstance := *resp.DBInstances[0]
d.SetId(d.Get("db_instance_identifier").(string))
d.Set("allocated_storage", dbInstance.AllocatedStorage)
d.Set("auto_minor_upgrade_enabled", dbInstance.AutoMinorVersionUpgrade)
d.Set("availability_zone", dbInstance.AvailabilityZone)
d.Set("backup_retention_period", dbInstance.BackupRetentionPeriod)
d.Set("db_cluster_identifier", dbInstance.DBClusterIdentifier)
d.Set("db_instance_arn", dbInstance.DBClusterIdentifier)
d.Set("db_instance_class", dbInstance.DBInstanceClass)
d.Set("db_name", dbInstance.DBName)
var parameterGroups []string
for _, v := range dbInstance.DBParameterGroups {
parameterGroups = append(parameterGroups, *v.DBParameterGroupName)
}
if err := d.Set("db_parameter_groups", parameterGroups); err != nil {
return fmt.Errorf("[DEBUG] Error setting db_parameter_groups attribute: %#v, error: %#v", parameterGroups, err)
}
var dbSecurityGroups []string
for _, v := range dbInstance.DBSecurityGroups {
dbSecurityGroups = append(dbSecurityGroups, *v.DBSecurityGroupName)
}
if err := d.Set("db_security_groups", dbSecurityGroups); err != nil {
return fmt.Errorf("[DEBUG] Error setting db_security_groups attribute: %#v, error: %#v", dbSecurityGroups, err)
}
d.Set("db_subnet_group", dbInstance.DBSubnetGroup)
d.Set("db_instance_port", dbInstance.DbInstancePort)
d.Set("engine", dbInstance.Engine)
d.Set("engine_version", dbInstance.EngineVersion)
d.Set("iops", dbInstance.Iops)
d.Set("kms_key_id", dbInstance.KmsKeyId)
d.Set("license_model", dbInstance.LicenseModel)
d.Set("master_username", dbInstance.MasterUsername)
d.Set("monitoring_interval", dbInstance.MonitoringInterval)
d.Set("monitoring_role_arn", dbInstance.MonitoringRoleArn)
d.Set("address", dbInstance.Endpoint.Address)
d.Set("port", dbInstance.Endpoint.Port)
d.Set("hosted_zone_id", dbInstance.Endpoint.HostedZoneId)
d.Set("endpoint", fmt.Sprintf("%s:%d", *dbInstance.Endpoint.Address, *dbInstance.Endpoint.Port))
var optionGroups []string
for _, v := range dbInstance.OptionGroupMemberships {
optionGroups = append(optionGroups, *v.OptionGroupName)
}
if err := d.Set("option_group_memberships", optionGroups); err != nil {
return fmt.Errorf("[DEBUG] Error setting option_group_memberships attribute: %#v, error: %#v", optionGroups, err)
}
d.Set("preferred_backup_window", dbInstance.PreferredBackupWindow)
d.Set("preferred_maintenance_window", dbInstance.PreferredMaintenanceWindow)
d.Set("publicly_accessible", dbInstance.PubliclyAccessible)
d.Set("storage_encrypted", dbInstance.StorageEncrypted)
d.Set("storage_type", dbInstance.StorageType)
d.Set("timezone", dbInstance.Timezone)
d.Set("replicate_source_db", dbInstance.ReadReplicaSourceDBInstanceIdentifier)
var vpcSecurityGroups []string
for _, v := range dbInstance.VpcSecurityGroups {
vpcSecurityGroups = append(vpcSecurityGroups, *v.VpcSecurityGroupId)
}
if err := d.Set("vpc_security_groups", vpcSecurityGroups); err != nil {
return fmt.Errorf("[DEBUG] Error setting vpc_security_groups attribute: %#v, error: %#v", vpcSecurityGroups, err)
}
return nil
}

View File

@ -0,0 +1,217 @@
package aws
import (
"fmt"
"log"
"sort"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/rds"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsDbSnapshot() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsDbSnapshotRead,
Schema: map[string]*schema.Schema{
//selection criteria
"db_instance_identifier": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"db_snapshot_identifier": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"snapshot_type": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"include_shared": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
Default: false,
},
"include_public": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
Default: false,
},
"most_recent": {
Type: schema.TypeBool,
Optional: true,
Default: false,
ForceNew: true,
},
//Computed values returned
"allocated_storage": {
Type: schema.TypeInt,
Computed: true,
},
"availability_zone": {
Type: schema.TypeString,
Computed: true,
},
"db_snapshot_arn": {
Type: schema.TypeString,
Computed: true,
},
"encrypted": {
Type: schema.TypeBool,
Computed: true,
},
"engine": {
Type: schema.TypeString,
Computed: true,
},
"engine_version": {
Type: schema.TypeString,
Computed: true,
},
"iops": {
Type: schema.TypeInt,
Computed: true,
},
"kms_key_id": {
Type: schema.TypeString,
Computed: true,
},
"license_model": {
Type: schema.TypeString,
Computed: true,
},
"option_group_name": {
Type: schema.TypeString,
Computed: true,
},
"port": {
Type: schema.TypeInt,
Computed: true,
},
"source_db_snapshot_identifier": {
Type: schema.TypeString,
Computed: true,
},
"source_region": {
Type: schema.TypeString,
Computed: true,
},
"snapshot_create_time": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"storage_type": {
Type: schema.TypeString,
Computed: true,
},
"vpc_id": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceAwsDbSnapshotRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
instanceIdentifier, instanceIdentifierOk := d.GetOk("db_instance_identifier")
snapshotIdentifier, snapshotIdentifierOk := d.GetOk("db_snapshot_identifier")
if !instanceIdentifierOk && !snapshotIdentifierOk {
return fmt.Errorf("One of db_snapshot_indentifier or db_instance_identifier must be assigned")
}
params := &rds.DescribeDBSnapshotsInput{
IncludePublic: aws.Bool(d.Get("include_public").(bool)),
IncludeShared: aws.Bool(d.Get("include_shared").(bool)),
}
if v, ok := d.GetOk("snapshot_type"); ok {
params.SnapshotType = aws.String(v.(string))
}
if instanceIdentifierOk {
params.DBInstanceIdentifier = aws.String(instanceIdentifier.(string))
}
if snapshotIdentifierOk {
params.DBSnapshotIdentifier = aws.String(snapshotIdentifier.(string))
}
resp, err := conn.DescribeDBSnapshots(params)
if err != nil {
return err
}
if len(resp.DBSnapshots) < 1 {
return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.")
}
var snapshot *rds.DBSnapshot
if len(resp.DBSnapshots) > 1 {
recent := d.Get("most_recent").(bool)
log.Printf("[DEBUG] aws_db_snapshot - multiple results found and `most_recent` is set to: %t", recent)
if recent {
snapshot = mostRecentDbSnapshot(resp.DBSnapshots)
} else {
return fmt.Errorf("Your query returned more than one result. Please try a more specific search criteria.")
}
} else {
snapshot = resp.DBSnapshots[0]
}
return dbSnapshotDescriptionAttributes(d, snapshot)
}
type rdsSnapshotSort []*rds.DBSnapshot
func (a rdsSnapshotSort) Len() int { return len(a) }
func (a rdsSnapshotSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a rdsSnapshotSort) Less(i, j int) bool {
return (*a[i].SnapshotCreateTime).Before(*a[j].SnapshotCreateTime)
}
func mostRecentDbSnapshot(snapshots []*rds.DBSnapshot) *rds.DBSnapshot {
sortedSnapshots := snapshots
sort.Sort(rdsSnapshotSort(sortedSnapshots))
return sortedSnapshots[len(sortedSnapshots)-1]
}
func dbSnapshotDescriptionAttributes(d *schema.ResourceData, snapshot *rds.DBSnapshot) error {
d.SetId(*snapshot.DBInstanceIdentifier)
d.Set("db_instance_identifier", snapshot.DBInstanceIdentifier)
d.Set("db_snapshot_identifier", snapshot.DBSnapshotIdentifier)
d.Set("snapshot_type", snapshot.SnapshotType)
d.Set("allocated_storage", snapshot.AllocatedStorage)
d.Set("availability_zone", snapshot.AvailabilityZone)
d.Set("db_snapshot_arn", snapshot.DBSnapshotArn)
d.Set("encrypted", snapshot.Encrypted)
d.Set("engine", snapshot.Engine)
d.Set("engine_version", snapshot.EngineVersion)
d.Set("iops", snapshot.Iops)
d.Set("kms_key_id", snapshot.KmsKeyId)
d.Set("license_model", snapshot.LicenseModel)
d.Set("option_group_name", snapshot.OptionGroupName)
d.Set("port", snapshot.Port)
d.Set("source_db_snapshot_identifier", snapshot.SourceDBSnapshotIdentifier)
d.Set("source_region", snapshot.SourceRegion)
d.Set("status", snapshot.Status)
d.Set("vpc_id", snapshot.VpcId)
d.Set("snapshot_create_time", snapshot.SnapshotCreateTime.Format(time.RFC3339))
return nil
}

View File

@ -0,0 +1,162 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsEbsSnapshot() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsEbsSnapshotRead,
Schema: map[string]*schema.Schema{
//selection criteria
"filter": dataSourceFiltersSchema(),
"most_recent": {
Type: schema.TypeBool,
Optional: true,
Default: false,
ForceNew: true,
},
"owners": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"snapshot_ids": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"restorable_by_user_ids": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
//Computed values returned
"snapshot_id": {
Type: schema.TypeString,
Computed: true,
},
"volume_id": {
Type: schema.TypeString,
Computed: true,
},
"state": {
Type: schema.TypeString,
Computed: true,
},
"owner_id": {
Type: schema.TypeString,
Computed: true,
},
"owner_alias": {
Type: schema.TypeString,
Computed: true,
},
"encrypted": {
Type: schema.TypeBool,
Computed: true,
},
"description": {
Type: schema.TypeString,
Computed: true,
},
"volume_size": {
Type: schema.TypeInt,
Computed: true,
},
"kms_key_id": {
Type: schema.TypeString,
Computed: true,
},
"data_encryption_key_id": {
Type: schema.TypeString,
Computed: true,
},
"tags": dataSourceTagsSchema(),
},
}
}
func dataSourceAwsEbsSnapshotRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
restorableUsers, restorableUsersOk := d.GetOk("restorable_by_user_ids")
filters, filtersOk := d.GetOk("filter")
snapshotIds, snapshotIdsOk := d.GetOk("snapshot_ids")
owners, ownersOk := d.GetOk("owners")
if !restorableUsersOk && !filtersOk && !snapshotIdsOk && !ownersOk {
return fmt.Errorf("One of snapshot_ids, filters, restorable_by_user_ids, or owners must be assigned")
}
params := &ec2.DescribeSnapshotsInput{}
if restorableUsersOk {
params.RestorableByUserIds = expandStringList(restorableUsers.([]interface{}))
}
if filtersOk {
params.Filters = buildAwsDataSourceFilters(filters.(*schema.Set))
}
if ownersOk {
params.OwnerIds = expandStringList(owners.([]interface{}))
}
if snapshotIdsOk {
params.SnapshotIds = expandStringList(snapshotIds.([]interface{}))
}
resp, err := conn.DescribeSnapshots(params)
if err != nil {
return err
}
var snapshot *ec2.Snapshot
if len(resp.Snapshots) < 1 {
return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.")
}
if len(resp.Snapshots) > 1 {
recent := d.Get("most_recent").(bool)
log.Printf("[DEBUG] aws_ebs_snapshot - multiple results found and `most_recent` is set to: %t", recent)
if recent {
snapshot = mostRecentSnapshot(resp.Snapshots)
} else {
return fmt.Errorf("Your query returned more than one result. Please try a more specific search criteria.")
}
} else {
snapshot = resp.Snapshots[0]
}
//Single Snapshot found so set to state
return snapshotDescriptionAttributes(d, snapshot)
}
func mostRecentSnapshot(snapshots []*ec2.Snapshot) *ec2.Snapshot {
return sortSnapshots(snapshots)[0]
}
func snapshotDescriptionAttributes(d *schema.ResourceData, snapshot *ec2.Snapshot) error {
d.SetId(*snapshot.SnapshotId)
d.Set("snapshot_id", snapshot.SnapshotId)
d.Set("volume_id", snapshot.VolumeId)
d.Set("data_encryption_key_id", snapshot.DataEncryptionKeyId)
d.Set("description", snapshot.Description)
d.Set("encrypted", snapshot.Encrypted)
d.Set("kms_key_id", snapshot.KmsKeyId)
d.Set("volume_size", snapshot.VolumeSize)
d.Set("state", snapshot.State)
d.Set("owner_id", snapshot.OwnerId)
d.Set("owner_alias", snapshot.OwnerAlias)
if err := d.Set("tags", dataSourceTags(snapshot.Tags)); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,77 @@
package aws
import (
"fmt"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsEbsSnapshotIds() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsEbsSnapshotIdsRead,
Schema: map[string]*schema.Schema{
"filter": dataSourceFiltersSchema(),
"owners": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"restorable_by_user_ids": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"tags": dataSourceTagsSchema(),
"ids": &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
}
}
func dataSourceAwsEbsSnapshotIdsRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
restorableUsers, restorableUsersOk := d.GetOk("restorable_by_user_ids")
filters, filtersOk := d.GetOk("filter")
owners, ownersOk := d.GetOk("owners")
if restorableUsers == false && filtersOk == false && ownersOk == false {
return fmt.Errorf("One of filters, restorable_by_user_ids, or owners must be assigned")
}
params := &ec2.DescribeSnapshotsInput{}
if restorableUsersOk {
params.RestorableByUserIds = expandStringList(restorableUsers.([]interface{}))
}
if filtersOk {
params.Filters = buildAwsDataSourceFilters(filters.(*schema.Set))
}
if ownersOk {
params.OwnerIds = expandStringList(owners.([]interface{}))
}
resp, err := conn.DescribeSnapshots(params)
if err != nil {
return err
}
snapshotIds := make([]string, 0)
for _, snapshot := range sortSnapshots(resp.Snapshots) {
snapshotIds = append(snapshotIds, *snapshot.SnapshotId)
}
d.SetId(fmt.Sprintf("%d", hashcode.String(params.String())))
d.Set("ids", snapshotIds)
return nil
}

View File

@ -0,0 +1,136 @@
package aws
import (
"fmt"
"log"
"sort"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/davecgh/go-spew/spew"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsEbsVolume() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsEbsVolumeRead,
Schema: map[string]*schema.Schema{
"filter": dataSourceFiltersSchema(),
"most_recent": {
Type: schema.TypeBool,
Optional: true,
Default: false,
ForceNew: true,
},
"availability_zone": {
Type: schema.TypeString,
Computed: true,
},
"encrypted": {
Type: schema.TypeBool,
Computed: true,
},
"iops": {
Type: schema.TypeInt,
Computed: true,
},
"volume_type": {
Type: schema.TypeString,
Computed: true,
},
"size": {
Type: schema.TypeInt,
Computed: true,
},
"snapshot_id": {
Type: schema.TypeString,
Computed: true,
},
"kms_key_id": {
Type: schema.TypeString,
Computed: true,
},
"volume_id": {
Type: schema.TypeString,
Computed: true,
},
"tags": dataSourceTagsSchema(),
},
}
}
func dataSourceAwsEbsVolumeRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
filters, filtersOk := d.GetOk("filter")
params := &ec2.DescribeVolumesInput{}
if filtersOk {
params.Filters = buildAwsDataSourceFilters(filters.(*schema.Set))
}
resp, err := conn.DescribeVolumes(params)
if err != nil {
return err
}
log.Printf("Found These Volumes %s", spew.Sdump(resp.Volumes))
filteredVolumes := resp.Volumes[:]
var volume *ec2.Volume
if len(filteredVolumes) < 1 {
return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.")
}
if len(filteredVolumes) > 1 {
recent := d.Get("most_recent").(bool)
log.Printf("[DEBUG] aws_ebs_volume - multiple results found and `most_recent` is set to: %t", recent)
if recent {
volume = mostRecentVolume(filteredVolumes)
} else {
return fmt.Errorf("Your query returned more than one result. Please try a more " +
"specific search criteria, or set `most_recent` attribute to true.")
}
} else {
// Query returned single result.
volume = filteredVolumes[0]
}
log.Printf("[DEBUG] aws_ebs_volume - Single Volume found: %s", *volume.VolumeId)
return volumeDescriptionAttributes(d, volume)
}
type volumeSort []*ec2.Volume
func (a volumeSort) Len() int { return len(a) }
func (a volumeSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a volumeSort) Less(i, j int) bool {
itime := *a[i].CreateTime
jtime := *a[j].CreateTime
return itime.Unix() < jtime.Unix()
}
func mostRecentVolume(volumes []*ec2.Volume) *ec2.Volume {
sortedVolumes := volumes
sort.Sort(volumeSort(sortedVolumes))
return sortedVolumes[len(sortedVolumes)-1]
}
func volumeDescriptionAttributes(d *schema.ResourceData, volume *ec2.Volume) error {
d.SetId(*volume.VolumeId)
d.Set("volume_id", volume.VolumeId)
d.Set("availability_zone", volume.AvailabilityZone)
d.Set("encrypted", volume.Encrypted)
d.Set("iops", volume.Iops)
d.Set("kms_key_id", volume.KmsKeyId)
d.Set("size", volume.Size)
d.Set("snapshot_id", volume.SnapshotId)
d.Set("volume_type", volume.VolumeType)
if err := d.Set("tags", dataSourceTags(volume.Tags)); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,77 @@
package aws
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ecs"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsEcsCluster() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsEcsClusterRead,
Schema: map[string]*schema.Schema{
"cluster_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"arn": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"pending_tasks_count": {
Type: schema.TypeInt,
Computed: true,
},
"running_tasks_count": {
Type: schema.TypeInt,
Computed: true,
},
"registered_container_instances_count": {
Type: schema.TypeInt,
Computed: true,
},
},
}
}
func dataSourceAwsEcsClusterRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ecsconn
desc, err := conn.DescribeClusters(&ecs.DescribeClustersInput{
Clusters: []*string{aws.String(d.Get("cluster_name").(string))},
})
if err != nil {
return err
}
for _, cluster := range desc.Clusters {
if aws.StringValue(cluster.ClusterName) != d.Get("cluster_name").(string) {
continue
}
d.SetId(aws.StringValue(cluster.ClusterArn))
d.Set("status", cluster.Status)
d.Set("pending_tasks_count", cluster.PendingTasksCount)
d.Set("running_tasks_count", cluster.RunningTasksCount)
d.Set("registered_container_instances_count", cluster.RegisteredContainerInstancesCount)
}
if d.Id() == "" {
return fmt.Errorf("cluster with name %q not found", d.Get("cluster_name").(string))
}
return nil
}

View File

@ -0,0 +1,107 @@
package aws
import (
"fmt"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ecs"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsEcsContainerDefinition() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsEcsContainerDefinitionRead,
Schema: map[string]*schema.Schema{
"task_definition": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"container_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
// Computed values.
"image": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"image_digest": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"cpu": &schema.Schema{
Type: schema.TypeInt,
Computed: true,
},
"memory": &schema.Schema{
Type: schema.TypeInt,
Computed: true,
},
"memory_reservation": &schema.Schema{
Type: schema.TypeInt,
Computed: true,
},
"disable_networking": &schema.Schema{
Type: schema.TypeBool,
Computed: true,
},
"docker_labels": &schema.Schema{
Type: schema.TypeMap,
Computed: true,
Elem: schema.TypeString,
},
"environment": &schema.Schema{
Type: schema.TypeMap,
Computed: true,
Elem: schema.TypeString,
},
},
}
}
func dataSourceAwsEcsContainerDefinitionRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ecsconn
desc, err := conn.DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{
TaskDefinition: aws.String(d.Get("task_definition").(string)),
})
if err != nil {
return err
}
taskDefinition := *desc.TaskDefinition
for _, def := range taskDefinition.ContainerDefinitions {
if aws.StringValue(def.Name) != d.Get("container_name").(string) {
continue
}
d.SetId(fmt.Sprintf("%s/%s", aws.StringValue(taskDefinition.TaskDefinitionArn), d.Get("container_name").(string)))
d.Set("image", aws.StringValue(def.Image))
image := aws.StringValue(def.Image)
if strings.Contains(image, ":") {
d.Set("image_digest", strings.Split(image, ":")[1])
}
d.Set("cpu", aws.Int64Value(def.Cpu))
d.Set("memory", aws.Int64Value(def.Memory))
d.Set("memory_reservation", aws.Int64Value(def.MemoryReservation))
d.Set("disable_networking", aws.BoolValue(def.DisableNetworking))
d.Set("docker_labels", aws.StringValueMap(def.DockerLabels))
var environment = map[string]string{}
for _, keyValuePair := range def.Environment {
environment[aws.StringValue(keyValuePair.Name)] = aws.StringValue(keyValuePair.Value)
}
d.Set("environment", environment)
}
if d.Id() == "" {
return fmt.Errorf("container with name %q not found in task definition %q", d.Get("container_name").(string), d.Get("task_definition").(string))
}
return nil
}

View File

@ -0,0 +1,71 @@
package aws
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ecs"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsEcsTaskDefinition() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsEcsTaskDefinitionRead,
Schema: map[string]*schema.Schema{
"task_definition": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
// Computed values.
"family": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"network_mode": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"revision": &schema.Schema{
Type: schema.TypeInt,
Computed: true,
},
"status": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"task_role_arn": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceAwsEcsTaskDefinitionRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ecsconn
desc, err := conn.DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{
TaskDefinition: aws.String(d.Get("task_definition").(string)),
})
if err != nil {
return fmt.Errorf("Failed getting task definition %s %q", err, d.Get("task_definition").(string))
}
taskDefinition := *desc.TaskDefinition
d.SetId(aws.StringValue(taskDefinition.TaskDefinitionArn))
d.Set("family", aws.StringValue(taskDefinition.Family))
d.Set("network_mode", aws.StringValue(taskDefinition.NetworkMode))
d.Set("revision", aws.Int64Value(taskDefinition.Revision))
d.Set("status", aws.StringValue(taskDefinition.Status))
d.Set("task_role_arn", aws.StringValue(taskDefinition.TaskRoleArn))
if d.Id() == "" {
return fmt.Errorf("task definition %q not found", d.Get("task_definition").(string))
}
return nil
}

View File

@ -0,0 +1,113 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/efs"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsEfsFileSystem() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsEfsFileSystemRead,
Schema: map[string]*schema.Schema{
"creation_token": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validateMaxLength(64),
},
"file_system_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"performance_mode": {
Type: schema.TypeString,
Computed: true,
},
"tags": tagsSchemaComputed(),
},
}
}
func dataSourceAwsEfsFileSystemRead(d *schema.ResourceData, meta interface{}) error {
efsconn := meta.(*AWSClient).efsconn
describeEfsOpts := &efs.DescribeFileSystemsInput{}
if v, ok := d.GetOk("creation_token"); ok {
describeEfsOpts.CreationToken = aws.String(v.(string))
}
if v, ok := d.GetOk("file_system_id"); ok {
describeEfsOpts.FileSystemId = aws.String(v.(string))
}
describeResp, err := efsconn.DescribeFileSystems(describeEfsOpts)
if err != nil {
return errwrap.Wrapf("Error retrieving EFS: {{err}}", err)
}
if len(describeResp.FileSystems) != 1 {
return fmt.Errorf("Search returned %d results, please revise so only one is returned", len(describeResp.FileSystems))
}
d.SetId(*describeResp.FileSystems[0].FileSystemId)
tags := make([]*efs.Tag, 0)
var marker string
for {
params := &efs.DescribeTagsInput{
FileSystemId: aws.String(d.Id()),
}
if marker != "" {
params.Marker = aws.String(marker)
}
tagsResp, err := efsconn.DescribeTags(params)
if err != nil {
return fmt.Errorf("Error retrieving EC2 tags for EFS file system (%q): %s",
d.Id(), err.Error())
}
for _, tag := range tagsResp.Tags {
tags = append(tags, tag)
}
if tagsResp.NextMarker != nil {
marker = *tagsResp.NextMarker
} else {
break
}
}
err = d.Set("tags", tagsToMapEFS(tags))
if err != nil {
return err
}
var fs *efs.FileSystemDescription
for _, f := range describeResp.FileSystems {
if d.Id() == *f.FileSystemId {
fs = f
break
}
}
if fs == nil {
log.Printf("[WARN] EFS (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
d.Set("creation_token", fs.CreationToken)
d.Set("performance_mode", fs.PerformanceMode)
d.Set("file_system_id", fs.FileSystemId)
return nil
}

View File

@ -0,0 +1,64 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsEip() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsEipRead,
Schema: map[string]*schema.Schema{
"id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"public_ip": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
},
}
}
func dataSourceAwsEipRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
req := &ec2.DescribeAddressesInput{}
if id := d.Get("id"); id != "" {
req.AllocationIds = []*string{aws.String(id.(string))}
}
if public_ip := d.Get("public_ip"); public_ip != "" {
req.PublicIps = []*string{aws.String(public_ip.(string))}
}
log.Printf("[DEBUG] DescribeAddresses %s\n", req)
resp, err := conn.DescribeAddresses(req)
if err != nil {
return err
}
if resp == nil || len(resp.Addresses) == 0 {
return fmt.Errorf("no matching Elastic IP found")
}
if len(resp.Addresses) > 1 {
return fmt.Errorf("multiple Elastic IPs matched; use additional constraints to reduce matches to a single Elastic IP")
}
eip := resp.Addresses[0]
d.SetId(*eip.AllocationId)
d.Set("id", eip.AllocationId)
d.Set("public_ip", eip.PublicIp)
return nil
}

View File

@ -0,0 +1,105 @@
package aws
import (
"fmt"
"log"
"regexp"
"github.com/aws/aws-sdk-go/service/elasticbeanstalk"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsElasticBeanstalkSolutionStack() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsElasticBeanstalkSolutionStackRead,
Schema: map[string]*schema.Schema{
"name_regex": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateSolutionStackNameRegex,
},
"most_recent": {
Type: schema.TypeBool,
Optional: true,
Default: false,
ForceNew: true,
},
// Computed values.
"name": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
// dataSourceAwsElasticBeanstalkSolutionStackRead performs the API lookup.
func dataSourceAwsElasticBeanstalkSolutionStackRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).elasticbeanstalkconn
nameRegex := d.Get("name_regex")
var params *elasticbeanstalk.ListAvailableSolutionStacksInput
resp, err := conn.ListAvailableSolutionStacks(params)
if err != nil {
return err
}
var filteredSolutionStacks []*string
r := regexp.MustCompile(nameRegex.(string))
for _, solutionStack := range resp.SolutionStacks {
if r.MatchString(*solutionStack) {
filteredSolutionStacks = append(filteredSolutionStacks, solutionStack)
}
}
var solutionStack *string
if len(filteredSolutionStacks) < 1 {
return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.")
}
if len(filteredSolutionStacks) == 1 {
// Query returned single result.
solutionStack = filteredSolutionStacks[0]
} else {
recent := d.Get("most_recent").(bool)
log.Printf("[DEBUG] aws_elastic_beanstalk_solution_stack - multiple results found and `most_recent` is set to: %t", recent)
if recent {
solutionStack = mostRecentSolutionStack(filteredSolutionStacks)
} else {
return fmt.Errorf("Your query returned more than one result. Please try a more " +
"specific search criteria, or set `most_recent` attribute to true.")
}
}
log.Printf("[DEBUG] aws_elastic_beanstalk_solution_stack - Single solution stack found: %s", *solutionStack)
return solutionStackDescriptionAttributes(d, solutionStack)
}
// Returns the most recent solution stack out of a slice of stacks.
func mostRecentSolutionStack(solutionStacks []*string) *string {
return solutionStacks[0]
}
// populate the numerous fields that the image description returns.
func solutionStackDescriptionAttributes(d *schema.ResourceData, solutionStack *string) error {
// Simple attributes first
d.SetId(*solutionStack)
d.Set("name", solutionStack)
return nil
}
func validateSolutionStackNameRegex(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if _, err := regexp.Compile(value); err != nil {
errors = append(errors, fmt.Errorf(
"%q contains an invalid regular expression: %s",
k, err))
}
return
}

View File

@ -0,0 +1,236 @@
package aws
import (
"fmt"
"log"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/elasticache"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsElastiCacheCluster() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsElastiCacheClusterRead,
Schema: map[string]*schema.Schema{
"cluster_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
StateFunc: func(v interface{}) string {
value := v.(string)
return strings.ToLower(value)
},
},
"node_type": {
Type: schema.TypeString,
Computed: true,
},
"num_cache_nodes": {
Type: schema.TypeInt,
Computed: true,
},
"subnet_group_name": {
Type: schema.TypeString,
Computed: true,
},
"engine": {
Type: schema.TypeString,
Computed: true,
},
"engine_version": {
Type: schema.TypeString,
Computed: true,
},
"parameter_group_name": {
Type: schema.TypeString,
Computed: true,
},
"replication_group_id": {
Type: schema.TypeString,
Computed: true,
},
"security_group_names": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"security_group_ids": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"maintenance_window": {
Type: schema.TypeString,
Computed: true,
},
"snapshot_window": {
Type: schema.TypeString,
Computed: true,
},
"snapshot_retention_limit": {
Type: schema.TypeInt,
Computed: true,
},
"availability_zone": {
Type: schema.TypeString,
Computed: true,
},
"notification_topic_arn": {
Type: schema.TypeString,
Computed: true,
},
"port": {
Type: schema.TypeInt,
Computed: true,
},
"configuration_endpoint": {
Type: schema.TypeString,
Computed: true,
},
"cluster_address": {
Type: schema.TypeString,
Computed: true,
},
"arn": {
Type: schema.TypeString,
Computed: true,
},
"cache_nodes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"id": {
Type: schema.TypeString,
Computed: true,
},
"address": {
Type: schema.TypeString,
Computed: true,
},
"port": {
Type: schema.TypeInt,
Computed: true,
},
"availability_zone": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"tags": tagsSchemaComputed(),
},
}
}
func dataSourceAwsElastiCacheClusterRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).elasticacheconn
req := &elasticache.DescribeCacheClustersInput{
CacheClusterId: aws.String(d.Get("cluster_id").(string)),
ShowCacheNodeInfo: aws.Bool(true),
}
resp, err := conn.DescribeCacheClusters(req)
if err != nil {
return err
}
if len(resp.CacheClusters) < 1 {
return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.")
}
if len(resp.CacheClusters) > 1 {
return fmt.Errorf("Your query returned more than one result. Please try a more specific search criteria.")
}
cluster := resp.CacheClusters[0]
d.SetId(*cluster.CacheClusterId)
d.Set("cluster_id", cluster.CacheClusterId)
d.Set("node_type", cluster.CacheNodeType)
d.Set("num_cache_nodes", cluster.NumCacheNodes)
d.Set("subnet_group_name", cluster.CacheSubnetGroupName)
d.Set("engine", cluster.Engine)
d.Set("engine_version", cluster.EngineVersion)
d.Set("security_group_names", flattenElastiCacheSecurityGroupNames(cluster.CacheSecurityGroups))
d.Set("security_group_ids", flattenElastiCacheSecurityGroupIds(cluster.SecurityGroups))
if cluster.CacheParameterGroup != nil {
d.Set("parameter_group_name", cluster.CacheParameterGroup.CacheParameterGroupName)
}
if cluster.ReplicationGroupId != nil {
d.Set("replication_group_id", cluster.ReplicationGroupId)
}
d.Set("maintenance_window", cluster.PreferredMaintenanceWindow)
d.Set("snapshot_window", cluster.SnapshotWindow)
d.Set("snapshot_retention_limit", cluster.SnapshotRetentionLimit)
d.Set("availability_zone", cluster.PreferredAvailabilityZone)
if cluster.NotificationConfiguration != nil {
if *cluster.NotificationConfiguration.TopicStatus == "active" {
d.Set("notification_topic_arn", cluster.NotificationConfiguration.TopicArn)
}
}
if cluster.ConfigurationEndpoint != nil {
d.Set("port", cluster.ConfigurationEndpoint.Port)
d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", *cluster.ConfigurationEndpoint.Address, *cluster.ConfigurationEndpoint.Port)))
d.Set("cluster_address", aws.String(fmt.Sprintf("%s", *cluster.ConfigurationEndpoint.Address)))
}
if err := setCacheNodeData(d, cluster); err != nil {
return err
}
arn, err := buildECARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region)
if err != nil {
log.Printf("[DEBUG] Error building ARN for ElastiCache Cluster %s", *cluster.CacheClusterId)
}
d.Set("arn", arn)
tagResp, err := conn.ListTagsForResource(&elasticache.ListTagsForResourceInput{
ResourceName: aws.String(arn),
})
if err != nil {
log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn)
}
var et []*elasticache.Tag
if len(tagResp.TagList) > 0 {
et = tagResp.TagList
}
d.Set("tags", tagsToMapEC(et))
return nil
}

View File

@ -0,0 +1,56 @@
package aws
import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
)
// See https://github.com/fog/fog-aws/pull/332/files
// This list isn't exposed by AWS - it's been found through
// trouble solving
var elbHostedZoneIdPerRegionMap = map[string]string{
"ap-northeast-1": "Z14GRHDCWA56QT",
"ap-northeast-2": "ZWKZPGTI48KDX",
"ap-south-1": "ZP97RAFLXTNZK",
"ap-southeast-1": "Z1LMS91P8CMLE5",
"ap-southeast-2": "Z1GM3OXH4ZPM65",
"ca-central-1": "ZQSVJUPU6J1EY",
"eu-central-1": "Z215JYRZR1TBD5",
"eu-west-1": "Z32O12XQLNTSW2",
"eu-west-2": "ZHURV8PSTC4K8",
"us-east-1": "Z35SXDOTRQ7X7K",
"us-east-2": "Z3AADJGX6KTTL2",
"us-west-1": "Z368ELLRRE2KJ0",
"us-west-2": "Z1H1FL5HABSF5",
"sa-east-1": "Z2P70J7HTTTPLU",
"us-gov-west-1": "048591011584",
"cn-north-1": "638102146993",
}
func dataSourceAwsElbHostedZoneId() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsElbHostedZoneIdRead,
Schema: map[string]*schema.Schema{
"region": {
Type: schema.TypeString,
Optional: true,
},
},
}
}
func dataSourceAwsElbHostedZoneIdRead(d *schema.ResourceData, meta interface{}) error {
region := meta.(*AWSClient).region
if v, ok := d.GetOk("region"); ok {
region = v.(string)
}
if zoneId, ok := elbHostedZoneIdPerRegionMap[region]; ok {
d.SetId(zoneId)
return nil
}
return fmt.Errorf("Unknown region (%q)", region)
}

View File

@ -0,0 +1,61 @@
package aws
import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
)
// See http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html#attach-bucket-policy
var elbAccountIdPerRegionMap = map[string]string{
"ap-northeast-1": "582318560864",
"ap-northeast-2": "600734575887",
"ap-south-1": "718504428378",
"ap-southeast-1": "114774131450",
"ap-southeast-2": "783225319266",
"ca-central-1": "985666609251",
"cn-north-1": "638102146993",
"eu-central-1": "054676820928",
"eu-west-1": "156460612806",
"eu-west-2": "652711504416",
"sa-east-1": "507241528517",
"us-east-1": "127311923021",
"us-east-2": "033677994240",
"us-gov-west": "048591011584",
"us-west-1": "027434742980",
"us-west-2": "797873946194",
}
func dataSourceAwsElbServiceAccount() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsElbServiceAccountRead,
Schema: map[string]*schema.Schema{
"region": {
Type: schema.TypeString,
Optional: true,
},
"arn": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceAwsElbServiceAccountRead(d *schema.ResourceData, meta interface{}) error {
region := meta.(*AWSClient).region
if v, ok := d.GetOk("region"); ok {
region = v.(string)
}
if accid, ok := elbAccountIdPerRegionMap[region]; ok {
d.SetId(accid)
d.Set("arn", fmt.Sprintf("arn:%s:iam::%s:root", meta.(*AWSClient).partition, accid))
return nil
}
return fmt.Errorf("Unknown region (%q)", region)
}

View File

@ -0,0 +1,48 @@
package aws
import (
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsIamAccountAlias() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsIamAccountAliasRead,
Schema: map[string]*schema.Schema{
"account_alias": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceAwsIamAccountAliasRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).iamconn
log.Printf("[DEBUG] Reading IAM Account Aliases.")
d.SetId(time.Now().UTC().String())
req := &iam.ListAccountAliasesInput{}
resp, err := conn.ListAccountAliases(req)
if err != nil {
return err
}
// 'AccountAliases': [] if there is no alias.
if resp == nil || len(resp.AccountAliases) == 0 {
return fmt.Errorf("no IAM account alias found")
}
alias := aws.StringValue(resp.AccountAliases[0])
log.Printf("[DEBUG] Setting AWS IAM Account Alias to %s.", alias)
d.Set("account_alias", alias)
return nil
}

View File

@ -0,0 +1,232 @@
package aws
import (
"fmt"
"encoding/json"
"strings"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
"strconv"
)
var dataSourceAwsIamPolicyDocumentVarReplacer = strings.NewReplacer("&{", "${")
func dataSourceAwsIamPolicyDocument() *schema.Resource {
setOfString := &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
}
return &schema.Resource{
Read: dataSourceAwsIamPolicyDocumentRead,
Schema: map[string]*schema.Schema{
"policy_id": {
Type: schema.TypeString,
Optional: true,
},
"statement": {
Type: schema.TypeList,
Required: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sid": {
Type: schema.TypeString,
Optional: true,
},
"effect": {
Type: schema.TypeString,
Optional: true,
Default: "Allow",
ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
switch v.(string) {
case "Allow", "Deny":
return
default:
es = append(es, fmt.Errorf("%q must be either \"Allow\" or \"Deny\"", k))
return
}
},
},
"actions": setOfString,
"not_actions": setOfString,
"resources": setOfString,
"not_resources": setOfString,
"principals": dataSourceAwsIamPolicyPrincipalSchema(),
"not_principals": dataSourceAwsIamPolicyPrincipalSchema(),
"condition": {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"test": {
Type: schema.TypeString,
Required: true,
},
"variable": {
Type: schema.TypeString,
Required: true,
},
"values": {
Type: schema.TypeSet,
Required: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
},
},
},
},
},
},
"json": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{}) error {
doc := &IAMPolicyDoc{
Version: "2012-10-17",
}
if policyId, hasPolicyId := d.GetOk("policy_id"); hasPolicyId {
doc.Id = policyId.(string)
}
var cfgStmts = d.Get("statement").([]interface{})
stmts := make([]*IAMPolicyStatement, len(cfgStmts))
doc.Statements = stmts
for i, stmtI := range cfgStmts {
cfgStmt := stmtI.(map[string]interface{})
stmt := &IAMPolicyStatement{
Effect: cfgStmt["effect"].(string),
}
if sid, ok := cfgStmt["sid"]; ok {
stmt.Sid = sid.(string)
}
if actions := cfgStmt["actions"].(*schema.Set).List(); len(actions) > 0 {
stmt.Actions = iamPolicyDecodeConfigStringList(actions)
}
if actions := cfgStmt["not_actions"].(*schema.Set).List(); len(actions) > 0 {
stmt.NotActions = iamPolicyDecodeConfigStringList(actions)
}
if resources := cfgStmt["resources"].(*schema.Set).List(); len(resources) > 0 {
stmt.Resources = dataSourceAwsIamPolicyDocumentReplaceVarsInList(
iamPolicyDecodeConfigStringList(resources),
)
}
if resources := cfgStmt["not_resources"].(*schema.Set).List(); len(resources) > 0 {
stmt.NotResources = dataSourceAwsIamPolicyDocumentReplaceVarsInList(
iamPolicyDecodeConfigStringList(resources),
)
}
if principals := cfgStmt["principals"].(*schema.Set).List(); len(principals) > 0 {
stmt.Principals = dataSourceAwsIamPolicyDocumentMakePrincipals(principals)
}
if principals := cfgStmt["not_principals"].(*schema.Set).List(); len(principals) > 0 {
stmt.NotPrincipals = dataSourceAwsIamPolicyDocumentMakePrincipals(principals)
}
if conditions := cfgStmt["condition"].(*schema.Set).List(); len(conditions) > 0 {
stmt.Conditions = dataSourceAwsIamPolicyDocumentMakeConditions(conditions)
}
stmts[i] = stmt
}
jsonDoc, err := json.MarshalIndent(doc, "", " ")
if err != nil {
// should never happen if the above code is correct
return err
}
jsonString := string(jsonDoc)
d.Set("json", jsonString)
d.SetId(strconv.Itoa(hashcode.String(jsonString)))
return nil
}
func dataSourceAwsIamPolicyDocumentReplaceVarsInList(in interface{}) interface{} {
switch v := in.(type) {
case string:
return dataSourceAwsIamPolicyDocumentVarReplacer.Replace(v)
case []string:
out := make([]string, len(v))
for i, item := range v {
out[i] = dataSourceAwsIamPolicyDocumentVarReplacer.Replace(item)
}
return out
default:
panic("dataSourceAwsIamPolicyDocumentReplaceVarsInList: input not string nor []string")
}
}
func dataSourceAwsIamPolicyDocumentMakeConditions(in []interface{}) IAMPolicyStatementConditionSet {
out := make([]IAMPolicyStatementCondition, len(in))
for i, itemI := range in {
item := itemI.(map[string]interface{})
out[i] = IAMPolicyStatementCondition{
Test: item["test"].(string),
Variable: item["variable"].(string),
Values: dataSourceAwsIamPolicyDocumentReplaceVarsInList(
iamPolicyDecodeConfigStringList(
item["values"].(*schema.Set).List(),
),
),
}
}
return IAMPolicyStatementConditionSet(out)
}
func dataSourceAwsIamPolicyDocumentMakePrincipals(in []interface{}) IAMPolicyStatementPrincipalSet {
out := make([]IAMPolicyStatementPrincipal, len(in))
for i, itemI := range in {
item := itemI.(map[string]interface{})
out[i] = IAMPolicyStatementPrincipal{
Type: item["type"].(string),
Identifiers: dataSourceAwsIamPolicyDocumentReplaceVarsInList(
iamPolicyDecodeConfigStringList(
item["identifiers"].(*schema.Set).List(),
),
),
}
}
return IAMPolicyStatementPrincipalSet(out)
}
func dataSourceAwsIamPolicyPrincipalSchema() *schema.Schema {
return &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"type": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"identifiers": &schema.Schema{
Type: schema.TypeSet,
Required: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
},
},
}
}

View File

@ -0,0 +1,67 @@
package aws
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsIAMRole() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsIAMRoleRead,
Schema: map[string]*schema.Schema{
"arn": {
Type: schema.TypeString,
Computed: true,
},
"assume_role_policy_document": {
Type: schema.TypeString,
Computed: true,
},
"path": {
Type: schema.TypeString,
Computed: true,
},
"role_id": {
Type: schema.TypeString,
Computed: true,
},
"role_name": {
Type: schema.TypeString,
Required: true,
},
},
}
}
func dataSourceAwsIAMRoleRead(d *schema.ResourceData, meta interface{}) error {
iamconn := meta.(*AWSClient).iamconn
roleName := d.Get("role_name").(string)
req := &iam.GetRoleInput{
RoleName: aws.String(roleName),
}
resp, err := iamconn.GetRole(req)
if err != nil {
return errwrap.Wrapf("Error getting roles: {{err}}", err)
}
if resp == nil {
return fmt.Errorf("no IAM role found")
}
role := resp.Role
d.SetId(*role.RoleId)
d.Set("arn", role.Arn)
d.Set("assume_role_policy_document", role.AssumeRolePolicyDocument)
d.Set("path", role.Path)
d.Set("role_id", role.RoleId)
return nil
}

View File

@ -0,0 +1,140 @@
package aws
import (
"fmt"
"sort"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsIAMServerCertificate() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsIAMServerCertificateRead,
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ConflictsWith: []string{"name_prefix"},
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if len(value) > 128 {
errors = append(errors, fmt.Errorf(
"%q cannot be longer than 128 characters", k))
}
return
},
},
"name_prefix": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if len(value) > 30 {
errors = append(errors, fmt.Errorf(
"%q cannot be longer than 30 characters, name is limited to 128", k))
}
return
},
},
"latest": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
Default: false,
},
"arn": {
Type: schema.TypeString,
Computed: true,
},
"id": {
Type: schema.TypeString,
Computed: true,
},
"path": {
Type: schema.TypeString,
Computed: true,
},
"expiration_date": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
type certificateByExpiration []*iam.ServerCertificateMetadata
func (m certificateByExpiration) Len() int {
return len(m)
}
func (m certificateByExpiration) Swap(i, j int) {
m[i], m[j] = m[j], m[i]
}
func (m certificateByExpiration) Less(i, j int) bool {
return m[i].Expiration.After(*m[j].Expiration)
}
func dataSourceAwsIAMServerCertificateRead(d *schema.ResourceData, meta interface{}) error {
iamconn := meta.(*AWSClient).iamconn
var matcher = func(cert *iam.ServerCertificateMetadata) bool {
return strings.HasPrefix(aws.StringValue(cert.ServerCertificateName), d.Get("name_prefix").(string))
}
if v, ok := d.GetOk("name"); ok {
matcher = func(cert *iam.ServerCertificateMetadata) bool {
return aws.StringValue(cert.ServerCertificateName) == v.(string)
}
}
var metadatas = []*iam.ServerCertificateMetadata{}
err := iamconn.ListServerCertificatesPages(&iam.ListServerCertificatesInput{}, func(p *iam.ListServerCertificatesOutput, lastPage bool) bool {
for _, cert := range p.ServerCertificateMetadataList {
if matcher(cert) {
metadatas = append(metadatas, cert)
}
}
return true
})
if err != nil {
return errwrap.Wrapf("Error describing certificates: {{err}}", err)
}
if len(metadatas) == 0 {
return fmt.Errorf("Search for AWS IAM server certificate returned no results")
}
if len(metadatas) > 1 {
if !d.Get("latest").(bool) {
return fmt.Errorf("Search for AWS IAM server certificate returned too many results")
}
sort.Sort(certificateByExpiration(metadatas))
}
metadata := metadatas[0]
d.SetId(*metadata.ServerCertificateId)
d.Set("arn", *metadata.Arn)
d.Set("path", *metadata.Path)
d.Set("id", *metadata.ServerCertificateId)
d.Set("name", *metadata.ServerCertificateName)
if metadata.Expiration != nil {
d.Set("expiration_date", metadata.Expiration.Format("2006-01-02T15:04:05"))
}
return nil
}

View File

@ -0,0 +1,356 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsInstance() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsInstanceRead,
Schema: map[string]*schema.Schema{
"filter": dataSourceFiltersSchema(),
"tags": dataSourceTagsSchema(),
"instance_tags": tagsSchemaComputed(),
"instance_id": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"ami": {
Type: schema.TypeString,
Computed: true,
},
"instance_type": {
Type: schema.TypeString,
Computed: true,
},
"instance_state": {
Type: schema.TypeString,
Computed: true,
},
"availability_zone": {
Type: schema.TypeString,
Computed: true,
},
"tenancy": {
Type: schema.TypeString,
Computed: true,
},
"key_name": {
Type: schema.TypeString,
Computed: true,
},
"public_dns": {
Type: schema.TypeString,
Computed: true,
},
"public_ip": {
Type: schema.TypeString,
Computed: true,
},
"private_dns": {
Type: schema.TypeString,
Computed: true,
},
"private_ip": {
Type: schema.TypeString,
Computed: true,
},
"iam_instance_profile": {
Type: schema.TypeString,
Computed: true,
},
"subnet_id": {
Type: schema.TypeString,
Computed: true,
},
"network_interface_id": {
Type: schema.TypeString,
Computed: true,
},
"associate_public_ip_address": {
Type: schema.TypeBool,
Computed: true,
},
"ebs_optimized": {
Type: schema.TypeBool,
Computed: true,
},
"source_dest_check": {
Type: schema.TypeBool,
Computed: true,
},
"monitoring": {
Type: schema.TypeBool,
Computed: true,
},
"user_data": {
Type: schema.TypeString,
Computed: true,
},
"security_groups": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"vpc_security_group_ids": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"ephemeral_block_device": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"device_name": {
Type: schema.TypeString,
Required: true,
},
"virtual_name": {
Type: schema.TypeString,
Optional: true,
},
"no_device": {
Type: schema.TypeBool,
Optional: true,
},
},
},
},
"ebs_block_device": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"delete_on_termination": {
Type: schema.TypeBool,
Computed: true,
},
"device_name": {
Type: schema.TypeString,
Computed: true,
},
"encrypted": {
Type: schema.TypeBool,
Computed: true,
},
"iops": {
Type: schema.TypeInt,
Computed: true,
},
"snapshot_id": {
Type: schema.TypeString,
Computed: true,
},
"volume_size": {
Type: schema.TypeInt,
Computed: true,
},
"volume_type": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"root_block_device": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"delete_on_termination": {
Type: schema.TypeBool,
Computed: true,
},
"iops": {
Type: schema.TypeInt,
Computed: true,
},
"volume_size": {
Type: schema.TypeInt,
Computed: true,
},
"volume_type": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
},
}
}
// dataSourceAwsInstanceRead performs the instanceID lookup
func dataSourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
filters, filtersOk := d.GetOk("filter")
instanceID, instanceIDOk := d.GetOk("instance_id")
tags, tagsOk := d.GetOk("instance_tags")
if filtersOk == false && instanceIDOk == false && tagsOk == false {
return fmt.Errorf("One of filters, instance_tags, or instance_id must be assigned")
}
// Build up search parameters
params := &ec2.DescribeInstancesInput{}
if filtersOk {
params.Filters = buildAwsDataSourceFilters(filters.(*schema.Set))
}
if instanceIDOk {
params.InstanceIds = []*string{aws.String(instanceID.(string))}
}
if tagsOk {
params.Filters = append(params.Filters, buildEC2TagFilterList(
tagsFromMap(tags.(map[string]interface{})),
)...)
}
// Perform the lookup
resp, err := conn.DescribeInstances(params)
if err != nil {
return err
}
// If no instances were returned, return
if len(resp.Reservations) == 0 {
return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.")
}
var filteredInstances []*ec2.Instance
// loop through reservations, and remove terminated instances, populate instance slice
for _, res := range resp.Reservations {
for _, instance := range res.Instances {
if instance.State != nil && *instance.State.Name != "terminated" {
filteredInstances = append(filteredInstances, instance)
}
}
}
var instance *ec2.Instance
if len(filteredInstances) < 1 {
return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.")
}
// (TODO: Support a list of instances to be returned)
// Possibly with a different data source that returns a list of individual instance data sources
if len(filteredInstances) > 1 {
return fmt.Errorf("Your query returned more than one result. Please try a more " +
"specific search criteria.")
} else {
instance = filteredInstances[0]
}
log.Printf("[DEBUG] aws_instance - Single Instance ID found: %s", *instance.InstanceId)
return instanceDescriptionAttributes(d, instance, conn)
}
// Populate instance attribute fields with the returned instance
func instanceDescriptionAttributes(d *schema.ResourceData, instance *ec2.Instance, conn *ec2.EC2) error {
d.SetId(*instance.InstanceId)
// Set the easy attributes
d.Set("instance_state", instance.State.Name)
if instance.Placement != nil {
d.Set("availability_zone", instance.Placement.AvailabilityZone)
}
if instance.Placement.Tenancy != nil {
d.Set("tenancy", instance.Placement.Tenancy)
}
d.Set("ami", instance.ImageId)
d.Set("instance_type", instance.InstanceType)
d.Set("key_name", instance.KeyName)
d.Set("public_dns", instance.PublicDnsName)
d.Set("public_ip", instance.PublicIpAddress)
d.Set("private_dns", instance.PrivateDnsName)
d.Set("private_ip", instance.PrivateIpAddress)
d.Set("iam_instance_profile", iamInstanceProfileArnToName(instance.IamInstanceProfile))
// iterate through network interfaces, and set subnet, network_interface, public_addr
if len(instance.NetworkInterfaces) > 0 {
for _, ni := range instance.NetworkInterfaces {
if *ni.Attachment.DeviceIndex == 0 {
d.Set("subnet_id", ni.SubnetId)
d.Set("network_interface_id", ni.NetworkInterfaceId)
d.Set("associate_public_ip_address", ni.Association != nil)
}
}
} else {
d.Set("subnet_id", instance.SubnetId)
d.Set("network_interface_id", "")
}
d.Set("ebs_optimized", instance.EbsOptimized)
if instance.SubnetId != nil && *instance.SubnetId != "" {
d.Set("source_dest_check", instance.SourceDestCheck)
}
if instance.Monitoring != nil && instance.Monitoring.State != nil {
monitoringState := *instance.Monitoring.State
d.Set("monitoring", monitoringState == "enabled" || monitoringState == "pending")
}
d.Set("tags", dataSourceTags(instance.Tags))
// Security Groups
if err := readSecurityGroups(d, instance); err != nil {
return err
}
// Block devices
if err := readBlockDevices(d, instance, conn); err != nil {
return err
}
if _, ok := d.GetOk("ephemeral_block_device"); !ok {
d.Set("ephemeral_block_device", []interface{}{})
}
// Lookup and Set Instance Attributes
{
attr, err := conn.DescribeInstanceAttribute(&ec2.DescribeInstanceAttributeInput{
Attribute: aws.String("disableApiTermination"),
InstanceId: aws.String(d.Id()),
})
if err != nil {
return err
}
d.Set("disable_api_termination", attr.DisableApiTermination.Value)
}
{
attr, err := conn.DescribeInstanceAttribute(&ec2.DescribeInstanceAttributeInput{
Attribute: aws.String(ec2.InstanceAttributeNameUserData),
InstanceId: aws.String(d.Id()),
})
if err != nil {
return err
}
if attr.UserData.Value != nil {
d.Set("user_data", userDataHashSum(*attr.UserData.Value))
}
}
return nil
}

View File

@ -0,0 +1,151 @@
package aws
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"sort"
"strconv"
"strings"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/terraform/helper/schema"
)
type dataSourceAwsIPRangesResult struct {
CreateDate string
Prefixes []dataSourceAwsIPRangesPrefix
SyncToken string
}
type dataSourceAwsIPRangesPrefix struct {
IpPrefix string `json:"ip_prefix"`
Region string
Service string
}
func dataSourceAwsIPRanges() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsIPRangesRead,
Schema: map[string]*schema.Schema{
"cidr_blocks": &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"create_date": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"regions": &schema.Schema{
Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString},
Optional: true,
},
"services": &schema.Schema{
Type: schema.TypeSet,
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"sync_token": &schema.Schema{
Type: schema.TypeInt,
Computed: true,
},
},
}
}
func dataSourceAwsIPRangesRead(d *schema.ResourceData, meta interface{}) error {
conn := cleanhttp.DefaultClient()
log.Printf("[DEBUG] Reading IP ranges")
res, err := conn.Get("https://ip-ranges.amazonaws.com/ip-ranges.json")
if err != nil {
return fmt.Errorf("Error listing IP ranges: %s", err)
}
defer res.Body.Close()
data, err := ioutil.ReadAll(res.Body)
if err != nil {
return fmt.Errorf("Error reading response body: %s", err)
}
result := new(dataSourceAwsIPRangesResult)
if err := json.Unmarshal(data, result); err != nil {
return fmt.Errorf("Error parsing result: %s", err)
}
if err := d.Set("create_date", result.CreateDate); err != nil {
return fmt.Errorf("Error setting create date: %s", err)
}
syncToken, err := strconv.Atoi(result.SyncToken)
if err != nil {
return fmt.Errorf("Error while converting sync token: %s", err)
}
d.SetId(result.SyncToken)
if err := d.Set("sync_token", syncToken); err != nil {
return fmt.Errorf("Error setting sync token: %s", err)
}
get := func(key string) *schema.Set {
set := d.Get(key).(*schema.Set)
for _, e := range set.List() {
s := e.(string)
set.Remove(s)
set.Add(strings.ToLower(s))
}
return set
}
var (
regions = get("regions")
services = get("services")
noRegionFilter = regions.Len() == 0
prefixes []string
)
for _, e := range result.Prefixes {
var (
matchRegion = noRegionFilter || regions.Contains(strings.ToLower(e.Region))
matchService = services.Contains(strings.ToLower(e.Service))
)
if matchRegion && matchService {
prefixes = append(prefixes, e.IpPrefix)
}
}
if len(prefixes) == 0 {
return fmt.Errorf(" No IP ranges result from filters")
}
sort.Strings(prefixes)
if err := d.Set("cidr_blocks", prefixes); err != nil {
return fmt.Errorf("Error setting ip ranges: %s", err)
}
return nil
}

View File

@ -0,0 +1,95 @@
package aws
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/kinesis"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsKinesisStream() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsKinesisStreamRead,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"arn": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"creation_timestamp": &schema.Schema{
Type: schema.TypeInt,
Computed: true,
},
"status": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"retention_period": &schema.Schema{
Type: schema.TypeInt,
Computed: true,
},
"open_shards": &schema.Schema{
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"closed_shards": &schema.Schema{
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"shard_level_metrics": &schema.Schema{
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"tags": &schema.Schema{
Type: schema.TypeMap,
Computed: true,
},
},
}
}
func dataSourceAwsKinesisStreamRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).kinesisconn
sn := d.Get("name").(string)
state, err := readKinesisStreamState(conn, sn)
if err != nil {
return err
}
d.SetId(state.arn)
d.Set("arn", state.arn)
d.Set("name", sn)
d.Set("open_shards", state.openShards)
d.Set("closed_shards", state.closedShards)
d.Set("status", state.status)
d.Set("creation_timestamp", state.creationTimestamp)
d.Set("retention_period", state.retentionPeriod)
d.Set("shard_level_metrics", state.shardLevelMetrics)
tags, err := conn.ListTagsForStream(&kinesis.ListTagsForStreamInput{
StreamName: aws.String(sn),
})
if err != nil {
return err
}
d.Set("tags", tagsToMapKinesis(tags.Tags))
return nil
}

View File

@ -0,0 +1,62 @@
package aws
import (
"fmt"
"time"
"github.com/aws/aws-sdk-go/service/kms"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsKmsAlias() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsKmsAliasRead,
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validateAwsKmsName,
},
"arn": {
Type: schema.TypeString,
Computed: true,
},
"target_key_id": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceAwsKmsAliasRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).kmsconn
params := &kms.ListAliasesInput{}
target := d.Get("name")
var alias *kms.AliasListEntry
err := conn.ListAliasesPages(params, func(page *kms.ListAliasesOutput, lastPage bool) bool {
for _, entity := range page.Aliases {
if *entity.AliasName == target {
alias = entity
return false
}
}
return true
})
if err != nil {
return errwrap.Wrapf("Error fetch KMS alias list: {{err}}", err)
}
if alias == nil {
return fmt.Errorf("No alias with name %q found in this region.", target)
}
d.SetId(time.Now().UTC().String())
d.Set("arn", alias.AliasArn)
d.Set("target_key_id", alias.TargetKeyId)
return nil
}

View File

@ -0,0 +1,66 @@
package aws
import (
"encoding/base64"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/kms"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsKmsCiphetext() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsKmsCiphetextRead,
Schema: map[string]*schema.Schema{
"plaintext": {
Type: schema.TypeString,
Required: true,
},
"key_id": {
Type: schema.TypeString,
Required: true,
},
"context": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"ciphertext_blob": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceAwsKmsCiphetextRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).kmsconn
d.SetId(time.Now().UTC().String())
req := &kms.EncryptInput{
KeyId: aws.String(d.Get("key_id").(string)),
Plaintext: []byte(d.Get("plaintext").(string)),
}
if ec := d.Get("context"); ec != nil {
req.EncryptionContext = stringMapToPointers(ec.(map[string]interface{}))
}
log.Printf("[DEBUG] KMS encrypt for key: %s", d.Get("key_id").(string))
resp, err := conn.Encrypt(req)
if err != nil {
return err
}
d.Set("ciphertext_blob", base64.StdEncoding.EncodeToString(resp.CiphertextBlob))
return nil
}

View File

@ -0,0 +1,99 @@
package aws
import (
"encoding/base64"
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/kms"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsKmsSecret() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsKmsSecretRead,
Schema: map[string]*schema.Schema{
"secret": &schema.Schema{
Type: schema.TypeSet,
Required: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"payload": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"context": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"grant_tokens": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
},
},
"__has_dynamic_attributes": {
Type: schema.TypeString,
Optional: true,
},
},
}
}
// dataSourceAwsKmsSecretRead decrypts the specified secrets
func dataSourceAwsKmsSecretRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).kmsconn
secrets := d.Get("secret").(*schema.Set)
d.SetId(time.Now().UTC().String())
for _, v := range secrets.List() {
secret := v.(map[string]interface{})
// base64 decode the payload
payload, err := base64.StdEncoding.DecodeString(secret["payload"].(string))
if err != nil {
return fmt.Errorf("Invalid base64 value for secret '%s': %v", secret["name"].(string), err)
}
// build the kms decrypt params
params := &kms.DecryptInput{
CiphertextBlob: []byte(payload),
}
if context, exists := secret["context"]; exists {
params.EncryptionContext = make(map[string]*string)
for k, v := range context.(map[string]interface{}) {
params.EncryptionContext[k] = aws.String(v.(string))
}
}
if grant_tokens, exists := secret["grant_tokens"]; exists {
params.GrantTokens = make([]*string, 0)
for _, v := range grant_tokens.([]interface{}) {
params.GrantTokens = append(params.GrantTokens, aws.String(v.(string)))
}
}
// decrypt
resp, err := conn.Decrypt(params)
if err != nil {
return fmt.Errorf("Failed to decrypt '%s': %s", secret["name"].(string), err)
}
// Set the secret via the name
log.Printf("[DEBUG] aws_kms_secret - successfully decrypted secret: %s", secret["name"].(string))
d.UnsafeSetFieldRaw(secret["name"].(string), string(resp.Plaintext))
}
return nil
}

View File

@ -0,0 +1,33 @@
package aws
import (
"log"
"time"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsPartition() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsPartitionRead,
Schema: map[string]*schema.Schema{
"partition": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceAwsPartitionRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AWSClient)
log.Printf("[DEBUG] Reading Partition.")
d.SetId(time.Now().UTC().String())
log.Printf("[DEBUG] Setting AWS Partition to %s.", client.partition)
d.Set("partition", meta.(*AWSClient).partition)
return nil
}

View File

@ -0,0 +1,76 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsPrefixList() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsPrefixListRead,
Schema: map[string]*schema.Schema{
"prefix_list_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
// Computed values.
"id": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"cidr_blocks": &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
}
}
func dataSourceAwsPrefixListRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
req := &ec2.DescribePrefixListsInput{}
if prefixListID := d.Get("prefix_list_id"); prefixListID != "" {
req.PrefixListIds = aws.StringSlice([]string{prefixListID.(string)})
}
req.Filters = buildEC2AttributeFilterList(
map[string]string{
"prefix-list-name": d.Get("name").(string),
},
)
log.Printf("[DEBUG] DescribePrefixLists %s\n", req)
resp, err := conn.DescribePrefixLists(req)
if err != nil {
return err
}
if resp == nil || len(resp.PrefixLists) == 0 {
return fmt.Errorf("no matching prefix list found; the prefix list ID or name may be invalid or not exist in the current region")
}
pl := resp.PrefixLists[0]
d.SetId(*pl.PrefixListId)
d.Set("id", pl.PrefixListId)
d.Set("name", pl.PrefixListName)
cidrs := make([]string, len(pl.Cidrs))
for i, v := range pl.Cidrs {
cidrs[i] = *v
}
d.Set("cidr_blocks", cidrs)
return nil
}

View File

@ -0,0 +1,50 @@
package aws
import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
)
// See http://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-enable-logging
var redshiftServiceAccountPerRegionMap = map[string]string{
"us-east-1": "193672423079",
"us-east-2": "391106570357",
"us-west-1": "262260360010",
"us-west-2": "902366379725",
"ap-south-1": "865932855811",
"ap-northeast-2": "760740231472",
"ap-southeast-1": "361669875840",
"ap-southeast-2": "762762565011",
"ap-northeast-1": "404641285394",
"ca-central-1": "907379612154",
"eu-central-1": "053454850223",
"eu-west-1": "210876761215",
}
func dataSourceAwsRedshiftServiceAccount() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsRedshiftServiceAccountRead,
Schema: map[string]*schema.Schema{
"region": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
},
}
}
func dataSourceAwsRedshiftServiceAccountRead(d *schema.ResourceData, meta interface{}) error {
region := meta.(*AWSClient).region
if v, ok := d.GetOk("region"); ok {
region = v.(string)
}
if accid, ok := redshiftServiceAccountPerRegionMap[region]; ok {
d.SetId(accid)
return nil
}
return fmt.Errorf("Unknown region (%q)", region)
}

View File

@ -0,0 +1,84 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsRegion() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsRegionRead,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"current": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"endpoint": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
},
}
}
func dataSourceAwsRegionRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
currentRegion := meta.(*AWSClient).region
req := &ec2.DescribeRegionsInput{}
req.RegionNames = make([]*string, 0, 2)
if name := d.Get("name").(string); name != "" {
req.RegionNames = append(req.RegionNames, aws.String(name))
}
if d.Get("current").(bool) {
req.RegionNames = append(req.RegionNames, aws.String(currentRegion))
}
req.Filters = buildEC2AttributeFilterList(
map[string]string{
"endpoint": d.Get("endpoint").(string),
},
)
if len(req.Filters) == 0 {
// Don't send an empty filters list; the EC2 API won't accept it.
req.Filters = nil
}
log.Printf("[DEBUG] DescribeRegions %s\n", req)
resp, err := conn.DescribeRegions(req)
if err != nil {
return err
}
if resp == nil || len(resp.Regions) == 0 {
return fmt.Errorf("no matching regions found")
}
if len(resp.Regions) > 1 {
return fmt.Errorf("multiple regions matched; use additional constraints to reduce matches to a single region")
}
region := resp.Regions[0]
d.SetId(*region.RegionName)
d.Set("id", region.RegionName)
d.Set("name", region.RegionName)
d.Set("endpoint", region.Endpoint)
d.Set("current", *region.RegionName == currentRegion)
return nil
}

View File

@ -0,0 +1,176 @@
package aws
import (
"fmt"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/route53"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsRoute53Zone() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsRoute53ZoneRead,
Schema: map[string]*schema.Schema{
"zone_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"private_zone": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"comment": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"caller_reference": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"vpc_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"tags": tagsSchemaComputed(),
"resource_record_set_count": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
},
}
}
func dataSourceAwsRoute53ZoneRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).r53conn
name, nameExists := d.GetOk("name")
name = hostedZoneName(name.(string))
id, idExists := d.GetOk("zone_id")
vpcId, vpcIdExists := d.GetOk("vpc_id")
tags := tagsFromMap(d.Get("tags").(map[string]interface{}))
if nameExists && idExists {
return fmt.Errorf("zone_id and name arguments can't be used together")
} else if !nameExists && !idExists {
return fmt.Errorf("Either name or zone_id must be set")
}
var nextMarker *string
var hostedZoneFound *route53.HostedZone
// We loop through all hostedzone
for allHostedZoneListed := false; !allHostedZoneListed; {
req := &route53.ListHostedZonesInput{}
if nextMarker != nil {
req.Marker = nextMarker
}
resp, err := conn.ListHostedZones(req)
if err != nil {
return fmt.Errorf("Error finding Route 53 Hosted Zone: %v", err)
}
for _, hostedZone := range resp.HostedZones {
hostedZoneId := cleanZoneID(*hostedZone.Id)
if idExists && hostedZoneId == id.(string) {
hostedZoneFound = hostedZone
break
// we check if the name is the same as requested and if private zone field is the same as requested or if there is a vpc_id
} else if *hostedZone.Name == name && (*hostedZone.Config.PrivateZone == d.Get("private_zone").(bool) || (*hostedZone.Config.PrivateZone == true && vpcIdExists)) {
matchingVPC := false
if vpcIdExists {
reqHostedZone := &route53.GetHostedZoneInput{}
reqHostedZone.Id = aws.String(hostedZoneId)
respHostedZone, errHostedZone := conn.GetHostedZone(reqHostedZone)
if errHostedZone != nil {
return fmt.Errorf("Error finding Route 53 Hosted Zone: %v", errHostedZone)
}
// we go through all VPCs
for _, vpc := range respHostedZone.VPCs {
if *vpc.VPCId == vpcId.(string) {
matchingVPC = true
break
}
}
} else {
matchingVPC = true
}
// we check if tags match
matchingTags := true
if len(tags) > 0 {
reqListTags := &route53.ListTagsForResourceInput{}
reqListTags.ResourceId = aws.String(hostedZoneId)
reqListTags.ResourceType = aws.String("hostedzone")
respListTags, errListTags := conn.ListTagsForResource(reqListTags)
if errListTags != nil {
return fmt.Errorf("Error finding Route 53 Hosted Zone: %v", errListTags)
}
for _, tag := range tags {
found := false
for _, tagRequested := range respListTags.ResourceTagSet.Tags {
if *tag.Key == *tagRequested.Key && *tag.Value == *tagRequested.Value {
found = true
}
}
if !found {
matchingTags = false
break
}
}
}
if matchingTags && matchingVPC {
if hostedZoneFound != nil {
return fmt.Errorf("multiple Route53Zone found please use vpc_id option to filter")
} else {
hostedZoneFound = hostedZone
}
}
}
}
if *resp.IsTruncated {
nextMarker = resp.NextMarker
} else {
allHostedZoneListed = true
}
}
if hostedZoneFound == nil {
return fmt.Errorf("no matching Route53Zone found")
}
idHostedZone := cleanZoneID(*hostedZoneFound.Id)
d.SetId(idHostedZone)
d.Set("zone_id", idHostedZone)
d.Set("name", hostedZoneFound.Name)
d.Set("comment", hostedZoneFound.Config.Comment)
d.Set("private_zone", hostedZoneFound.Config.PrivateZone)
d.Set("caller_reference", hostedZoneFound.CallerReference)
d.Set("resource_record_set_count", hostedZoneFound.ResourceRecordSetCount)
return nil
}
// used to manage trailing .
func hostedZoneName(name string) string {
if strings.HasSuffix(name, ".") {
return name
} else {
return name + "."
}
}

View File

@ -0,0 +1,233 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsRouteTable() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsRouteTableRead,
Schema: map[string]*schema.Schema{
"subnet_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"route_table_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"vpc_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"filter": ec2CustomFiltersSchema(),
"tags": tagsSchemaComputed(),
"routes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cidr_block": {
Type: schema.TypeString,
Computed: true,
},
"ipv6_cidr_block": {
Type: schema.TypeString,
Computed: true,
},
"egress_only_gateway_id": {
Type: schema.TypeString,
Computed: true,
},
"gateway_id": {
Type: schema.TypeString,
Computed: true,
},
"instance_id": {
Type: schema.TypeString,
Computed: true,
},
"nat_gateway_id": {
Type: schema.TypeString,
Computed: true,
},
"vpc_peering_connection_id": {
Type: schema.TypeString,
Computed: true,
},
"network_interface_id": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"associations": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"route_table_association_id": {
Type: schema.TypeString,
Computed: true,
},
"route_table_id": {
Type: schema.TypeString,
Computed: true,
},
"subnet_id": {
Type: schema.TypeString,
Computed: true,
},
"main": {
Type: schema.TypeBool,
Computed: true,
},
},
},
},
},
}
}
func dataSourceAwsRouteTableRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
req := &ec2.DescribeRouteTablesInput{}
vpcId, vpcIdOk := d.GetOk("vpc_id")
subnetId, subnetIdOk := d.GetOk("subnet_id")
rtbId, rtbOk := d.GetOk("route_table_id")
tags, tagsOk := d.GetOk("tags")
filter, filterOk := d.GetOk("filter")
if !vpcIdOk && !subnetIdOk && !tagsOk && !filterOk && !rtbOk {
return fmt.Errorf("One of route_table_id, vpc_id, subnet_id, filters, or tags must be assigned")
}
req.Filters = buildEC2AttributeFilterList(
map[string]string{
"route-table-id": rtbId.(string),
"vpc-id": vpcId.(string),
"association.subnet-id": subnetId.(string),
},
)
req.Filters = append(req.Filters, buildEC2TagFilterList(
tagsFromMap(tags.(map[string]interface{})),
)...)
req.Filters = append(req.Filters, buildEC2CustomFilterList(
filter.(*schema.Set),
)...)
log.Printf("[DEBUG] Describe Route Tables %v\n", req)
resp, err := conn.DescribeRouteTables(req)
if err != nil {
return err
}
if resp == nil || len(resp.RouteTables) == 0 {
return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.")
}
if len(resp.RouteTables) > 1 {
return fmt.Errorf("Multiple Route Table matched; use additional constraints to reduce matches to a single Route Table")
}
rt := resp.RouteTables[0]
d.SetId(aws.StringValue(rt.RouteTableId))
d.Set("route_table_id", rt.RouteTableId)
d.Set("vpc_id", rt.VpcId)
d.Set("tags", tagsToMap(rt.Tags))
if err := d.Set("routes", dataSourceRoutesRead(rt.Routes)); err != nil {
return err
}
if err := d.Set("associations", dataSourceAssociationsRead(rt.Associations)); err != nil {
return err
}
return nil
}
func dataSourceRoutesRead(ec2Routes []*ec2.Route) []map[string]interface{} {
routes := make([]map[string]interface{}, 0, len(ec2Routes))
// Loop through the routes and add them to the set
for _, r := range ec2Routes {
if r.GatewayId != nil && *r.GatewayId == "local" {
continue
}
if r.Origin != nil && *r.Origin == "EnableVgwRoutePropagation" {
continue
}
if r.DestinationPrefixListId != nil {
// Skipping because VPC endpoint routes are handled separately
// See aws_vpc_endpoint
continue
}
m := make(map[string]interface{})
if r.DestinationCidrBlock != nil {
m["cidr_block"] = *r.DestinationCidrBlock
}
if r.DestinationIpv6CidrBlock != nil {
m["ipv6_cidr_block"] = *r.DestinationIpv6CidrBlock
}
if r.EgressOnlyInternetGatewayId != nil {
m["egress_only_gateway_id"] = *r.EgressOnlyInternetGatewayId
}
if r.GatewayId != nil {
m["gateway_id"] = *r.GatewayId
}
if r.NatGatewayId != nil {
m["nat_gateway_id"] = *r.NatGatewayId
}
if r.InstanceId != nil {
m["instance_id"] = *r.InstanceId
}
if r.VpcPeeringConnectionId != nil {
m["vpc_peering_connection_id"] = *r.VpcPeeringConnectionId
}
if r.NetworkInterfaceId != nil {
m["network_interface_id"] = *r.NetworkInterfaceId
}
routes = append(routes, m)
}
return routes
}
func dataSourceAssociationsRead(ec2Assocations []*ec2.RouteTableAssociation) []map[string]interface{} {
associations := make([]map[string]interface{}, 0, len(ec2Assocations))
// Loop through the routes and add them to the set
for _, a := range ec2Assocations {
m := make(map[string]interface{})
m["route_table_id"] = *a.RouteTableId
m["route_table_association_id"] = *a.RouteTableAssociationId
// GH[11134]
if a.SubnetId != nil {
m["subnet_id"] = *a.SubnetId
}
m["main"] = *a.Main
associations = append(associations, m)
}
return associations
}

View File

@ -0,0 +1,239 @@
package aws
import (
"bytes"
"fmt"
"log"
"regexp"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsS3BucketObject() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsS3BucketObjectRead,
Schema: map[string]*schema.Schema{
"body": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"bucket": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"cache_control": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"content_disposition": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"content_encoding": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"content_language": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"content_length": &schema.Schema{
Type: schema.TypeInt,
Computed: true,
},
"content_type": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"etag": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"expiration": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"expires": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"key": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"last_modified": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"metadata": &schema.Schema{
Type: schema.TypeMap,
Computed: true,
},
"range": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"server_side_encryption": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"sse_kms_key_id": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"storage_class": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"version_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"website_redirect_location": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"tags": tagsSchemaComputed(),
},
}
}
func dataSourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).s3conn
bucket := d.Get("bucket").(string)
key := d.Get("key").(string)
input := s3.HeadObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
}
if v, ok := d.GetOk("range"); ok {
input.Range = aws.String(v.(string))
}
if v, ok := d.GetOk("version_id"); ok {
input.VersionId = aws.String(v.(string))
}
versionText := ""
uniqueId := bucket + "/" + key
if v, ok := d.GetOk("version_id"); ok {
versionText = fmt.Sprintf(" of version %q", v.(string))
uniqueId += "@" + v.(string)
}
log.Printf("[DEBUG] Reading S3 object: %s", input)
out, err := conn.HeadObject(&input)
if err != nil {
return fmt.Errorf("Failed getting S3 object: %s Bucket: %q Object: %q", err, bucket, key)
}
if out.DeleteMarker != nil && *out.DeleteMarker == true {
return fmt.Errorf("Requested S3 object %q%s has been deleted",
bucket+key, versionText)
}
log.Printf("[DEBUG] Received S3 object: %s", out)
d.SetId(uniqueId)
d.Set("cache_control", out.CacheControl)
d.Set("content_disposition", out.ContentDisposition)
d.Set("content_encoding", out.ContentEncoding)
d.Set("content_language", out.ContentLanguage)
d.Set("content_length", out.ContentLength)
d.Set("content_type", out.ContentType)
// See https://forums.aws.amazon.com/thread.jspa?threadID=44003
d.Set("etag", strings.Trim(*out.ETag, `"`))
d.Set("expiration", out.Expiration)
d.Set("expires", out.Expires)
d.Set("last_modified", out.LastModified.Format(time.RFC1123))
d.Set("metadata", pointersMapToStringList(out.Metadata))
d.Set("server_side_encryption", out.ServerSideEncryption)
d.Set("sse_kms_key_id", out.SSEKMSKeyId)
d.Set("version_id", out.VersionId)
d.Set("website_redirect_location", out.WebsiteRedirectLocation)
// The "STANDARD" (which is also the default) storage
// class when set would not be included in the results.
d.Set("storage_class", s3.StorageClassStandard)
if out.StorageClass != nil {
d.Set("storage_class", out.StorageClass)
}
if isContentTypeAllowed(out.ContentType) {
input := s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
}
if v, ok := d.GetOk("range"); ok {
input.Range = aws.String(v.(string))
}
if out.VersionId != nil {
input.VersionId = out.VersionId
}
out, err := conn.GetObject(&input)
if err != nil {
return fmt.Errorf("Failed getting S3 object: %s", err)
}
buf := new(bytes.Buffer)
bytesRead, err := buf.ReadFrom(out.Body)
if err != nil {
return fmt.Errorf("Failed reading content of S3 object (%s): %s",
uniqueId, err)
}
log.Printf("[INFO] Saving %d bytes from S3 object %s", bytesRead, uniqueId)
d.Set("body", buf.String())
} else {
contentType := ""
if out.ContentType == nil {
contentType = "<EMPTY>"
} else {
contentType = *out.ContentType
}
log.Printf("[INFO] Ignoring body of S3 object %s with Content-Type %q",
uniqueId, contentType)
}
tagResp, err := conn.GetObjectTagging(
&s3.GetObjectTaggingInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
})
if err != nil {
return err
}
d.Set("tags", tagsToMapS3(tagResp.TagSet))
return nil
}
// This is to prevent potential issues w/ binary files
// and generally unprintable characters
// See https://github.com/hashicorp/terraform/pull/3858#issuecomment-156856738
func isContentTypeAllowed(contentType *string) bool {
if contentType == nil {
return false
}
allowedContentTypes := []*regexp.Regexp{
regexp.MustCompile("^text/.+"),
regexp.MustCompile("^application/json$"),
}
for _, r := range allowedContentTypes {
if r.MatchString(*contentType) {
return true
}
}
return false
}

View File

@ -0,0 +1,94 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsSecurityGroup() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsSecurityGroupRead,
Schema: map[string]*schema.Schema{
"vpc_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"filter": ec2CustomFiltersSchema(),
"id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"arn": {
Type: schema.TypeString,
Computed: true,
},
"tags": tagsSchemaComputed(),
},
}
}
func dataSourceAwsSecurityGroupRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
req := &ec2.DescribeSecurityGroupsInput{}
if id, idExists := d.GetOk("id"); idExists {
req.GroupIds = []*string{aws.String(id.(string))}
}
req.Filters = buildEC2AttributeFilterList(
map[string]string{
"group-name": d.Get("name").(string),
"vpc-id": d.Get("vpc_id").(string),
},
)
req.Filters = append(req.Filters, buildEC2TagFilterList(
tagsFromMap(d.Get("tags").(map[string]interface{})),
)...)
req.Filters = append(req.Filters, buildEC2CustomFilterList(
d.Get("filter").(*schema.Set),
)...)
if len(req.Filters) == 0 {
// Don't send an empty filters list; the EC2 API won't accept it.
req.Filters = nil
}
log.Printf("[DEBUG] Describe Security Groups %v\n", req)
resp, err := conn.DescribeSecurityGroups(req)
if err != nil {
return err
}
if resp == nil || len(resp.SecurityGroups) == 0 {
return fmt.Errorf("no matching SecurityGroup found")
}
if len(resp.SecurityGroups) > 1 {
return fmt.Errorf("multiple Security Groups matched; use additional constraints to reduce matches to a single Security Group")
}
sg := resp.SecurityGroups[0]
d.SetId(*sg.GroupId)
d.Set("id", sg.VpcId)
d.Set("name", sg.GroupName)
d.Set("description", sg.Description)
d.Set("vpc_id", sg.VpcId)
d.Set("tags", tagsToMap(sg.Tags))
d.Set("arn", fmt.Sprintf("arn:%s:ec2:%s:%s/security-group/%s",
meta.(*AWSClient).partition, meta.(*AWSClient).region, *sg.OwnerId, *sg.GroupId))
return nil
}

View File

@ -0,0 +1,71 @@
package aws
import (
"fmt"
"regexp"
"time"
"github.com/aws/aws-sdk-go/service/sns"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsSnsTopic() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsSnsTopicsRead,
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
validNamePattern := "^[A-Za-z0-9_-]+$"
validName, nameMatchErr := regexp.MatchString(validNamePattern, value)
if !validName || nameMatchErr != nil {
errors = append(errors, fmt.Errorf(
"%q must match regex '%v'", k, validNamePattern))
}
return
},
},
"arn": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceAwsSnsTopicsRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).snsconn
params := &sns.ListTopicsInput{}
target := d.Get("name")
var arns []string
err := conn.ListTopicsPages(params, func(page *sns.ListTopicsOutput, lastPage bool) bool {
for _, topic := range page.Topics {
topicPattern := fmt.Sprintf(".*:%v$", target)
matched, regexpErr := regexp.MatchString(topicPattern, *topic.TopicArn)
if matched && regexpErr == nil {
arns = append(arns, *topic.TopicArn)
}
}
return true
})
if err != nil {
return errwrap.Wrapf("Error describing topics: {{err}}", err)
}
if len(arns) == 0 {
return fmt.Errorf("No topic with name %q found in this region.", target)
}
if len(arns) > 1 {
return fmt.Errorf("Multiple topics with name %q found in this region.", target)
}
d.SetId(time.Now().UTC().String())
d.Set("arn", arns[0])
return nil
}

View File

@ -0,0 +1,63 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ssm"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsSsmParameter() *schema.Resource {
return &schema.Resource{
Read: dataAwsSsmParameterRead,
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"value": {
Type: schema.TypeString,
Computed: true,
Sensitive: true,
},
},
}
}
func dataAwsSsmParameterRead(d *schema.ResourceData, meta interface{}) error {
ssmconn := meta.(*AWSClient).ssmconn
log.Printf("[DEBUG] Reading SSM Parameter: %s", d.Id())
paramInput := &ssm.GetParametersInput{
Names: []*string{
aws.String(d.Get("name").(string)),
},
WithDecryption: aws.Bool(true),
}
resp, err := ssmconn.GetParameters(paramInput)
if err != nil {
return errwrap.Wrapf("[ERROR] Error describing SSM parameter: {{err}}", err)
}
if len(resp.InvalidParameters) > 0 {
return fmt.Errorf("[ERROR] SSM Parameter %s is invalid", d.Get("name").(string))
}
param := resp.Parameters[0]
d.SetId(*param.Name)
d.Set("name", param.Name)
d.Set("type", param.Type)
d.Set("value", param.Value)
return nil
}

View File

@ -0,0 +1,160 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsSubnet() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsSubnetRead,
Schema: map[string]*schema.Schema{
"availability_zone": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"cidr_block": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"ipv6_cidr_block": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"default_for_az": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"filter": ec2CustomFiltersSchema(),
"id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"state": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"tags": tagsSchemaComputed(),
"vpc_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"assign_ipv6_address_on_creation": {
Type: schema.TypeBool,
Computed: true,
},
"map_public_ip_on_launch": {
Type: schema.TypeBool,
Computed: true,
},
"ipv6_cidr_block_association_id": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceAwsSubnetRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
req := &ec2.DescribeSubnetsInput{}
if id := d.Get("id"); id != "" {
req.SubnetIds = []*string{aws.String(id.(string))}
}
// We specify default_for_az as boolean, but EC2 filters want
// it to be serialized as a string. Note that setting it to
// "false" here does not actually filter by it *not* being
// the default, because Terraform can't distinguish between
// "false" and "not set".
defaultForAzStr := ""
if d.Get("default_for_az").(bool) {
defaultForAzStr = "true"
}
filters := map[string]string{
"availabilityZone": d.Get("availability_zone").(string),
"defaultForAz": defaultForAzStr,
"state": d.Get("state").(string),
"vpc-id": d.Get("vpc_id").(string),
}
if v, ok := d.GetOk("cidr_block"); ok {
filters["cidrBlock"] = v.(string)
}
if v, ok := d.GetOk("ipv6_cidr_block"); ok {
filters["ipv6-cidr-block-association.ipv6-cidr-block"] = v.(string)
}
req.Filters = buildEC2AttributeFilterList(filters)
req.Filters = append(req.Filters, buildEC2TagFilterList(
tagsFromMap(d.Get("tags").(map[string]interface{})),
)...)
req.Filters = append(req.Filters, buildEC2CustomFilterList(
d.Get("filter").(*schema.Set),
)...)
if len(req.Filters) == 0 {
// Don't send an empty filters list; the EC2 API won't accept it.
req.Filters = nil
}
log.Printf("[DEBUG] DescribeSubnets %s\n", req)
resp, err := conn.DescribeSubnets(req)
if err != nil {
return err
}
if resp == nil || len(resp.Subnets) == 0 {
return fmt.Errorf("no matching subnet found")
}
if len(resp.Subnets) > 1 {
return fmt.Errorf("multiple subnets matched; use additional constraints to reduce matches to a single subnet")
}
subnet := resp.Subnets[0]
d.SetId(*subnet.SubnetId)
d.Set("id", subnet.SubnetId)
d.Set("vpc_id", subnet.VpcId)
d.Set("availability_zone", subnet.AvailabilityZone)
d.Set("cidr_block", subnet.CidrBlock)
d.Set("default_for_az", subnet.DefaultForAz)
d.Set("state", subnet.State)
d.Set("tags", tagsToMap(subnet.Tags))
d.Set("assign_ipv6_address_on_creation", subnet.AssignIpv6AddressOnCreation)
d.Set("map_public_ip_on_launch", subnet.MapPublicIpOnLaunch)
for _, a := range subnet.Ipv6CidrBlockAssociationSet {
if *a.Ipv6CidrBlockState.State == "associated" { //we can only ever have 1 IPv6 block associated at once
d.Set("ipv6_cidr_block_association_id", a.AssociationId)
d.Set("ipv6_cidr_block", a.Ipv6CidrBlock)
}
}
return nil
}

View File

@ -0,0 +1,68 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsSubnetIDs() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsSubnetIDsRead,
Schema: map[string]*schema.Schema{
"tags": tagsSchemaComputed(),
"vpc_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"ids": &schema.Schema{
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
},
}
}
func dataSourceAwsSubnetIDsRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
req := &ec2.DescribeSubnetsInput{}
req.Filters = buildEC2AttributeFilterList(
map[string]string{
"vpc-id": d.Get("vpc_id").(string),
},
)
req.Filters = append(req.Filters, buildEC2TagFilterList(
tagsFromMap(d.Get("tags").(map[string]interface{})),
)...)
log.Printf("[DEBUG] DescribeSubnets %s\n", req)
resp, err := conn.DescribeSubnets(req)
if err != nil {
return err
}
if resp == nil || len(resp.Subnets) == 0 {
return fmt.Errorf("no matching subnet found for vpc with id %s", d.Get("vpc_id").(string))
}
subnets := make([]string, 0)
for _, subnet := range resp.Subnets {
subnets = append(subnets, *subnet.SubnetId)
}
d.SetId(d.Get("vpc_id").(string))
d.Set("ids", subnets)
return nil
}

View File

@ -0,0 +1,136 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsVpc() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsVpcRead,
Schema: map[string]*schema.Schema{
"cidr_block": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"dhcp_options_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"default": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"filter": ec2CustomFiltersSchema(),
"id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"instance_tenancy": {
Type: schema.TypeString,
Computed: true,
},
"ipv6_cidr_block": {
Type: schema.TypeString,
Computed: true,
},
"ipv6_association_id": {
Type: schema.TypeString,
Computed: true,
},
"state": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"tags": tagsSchemaComputed(),
},
}
}
func dataSourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
req := &ec2.DescribeVpcsInput{}
if id := d.Get("id"); id != "" {
req.VpcIds = []*string{aws.String(id.(string))}
}
// We specify "default" as boolean, but EC2 filters want
// it to be serialized as a string. Note that setting it to
// "false" here does not actually filter by it *not* being
// the default, because Terraform can't distinguish between
// "false" and "not set".
isDefaultStr := ""
if d.Get("default").(bool) {
isDefaultStr = "true"
}
req.Filters = buildEC2AttributeFilterList(
map[string]string{
"cidr": d.Get("cidr_block").(string),
"dhcp-options-id": d.Get("dhcp_options_id").(string),
"isDefault": isDefaultStr,
"state": d.Get("state").(string),
},
)
req.Filters = append(req.Filters, buildEC2TagFilterList(
tagsFromMap(d.Get("tags").(map[string]interface{})),
)...)
req.Filters = append(req.Filters, buildEC2CustomFilterList(
d.Get("filter").(*schema.Set),
)...)
if len(req.Filters) == 0 {
// Don't send an empty filters list; the EC2 API won't accept it.
req.Filters = nil
}
log.Printf("[DEBUG] DescribeVpcs %s\n", req)
resp, err := conn.DescribeVpcs(req)
if err != nil {
return err
}
if resp == nil || len(resp.Vpcs) == 0 {
return fmt.Errorf("no matching VPC found")
}
if len(resp.Vpcs) > 1 {
return fmt.Errorf("multiple VPCs matched; use additional constraints to reduce matches to a single VPC")
}
vpc := resp.Vpcs[0]
d.SetId(*vpc.VpcId)
d.Set("id", vpc.VpcId)
d.Set("cidr_block", vpc.CidrBlock)
d.Set("dhcp_options_id", vpc.DhcpOptionsId)
d.Set("instance_tenancy", vpc.InstanceTenancy)
d.Set("default", vpc.IsDefault)
d.Set("state", vpc.State)
d.Set("tags", tagsToMap(vpc.Tags))
if vpc.Ipv6CidrBlockAssociationSet != nil {
d.Set("ipv6_association_id", vpc.Ipv6CidrBlockAssociationSet[0].AssociationId)
d.Set("ipv6_cidr_block", vpc.Ipv6CidrBlockAssociationSet[0].Ipv6CidrBlock)
}
return nil
}

View File

@ -0,0 +1,103 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsVpcEndpoint() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsVpcEndpointRead,
Schema: map[string]*schema.Schema{
"id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"state": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"vpc_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"service_name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"policy": {
Type: schema.TypeString,
Computed: true,
},
"route_table_ids": &schema.Schema{
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
},
}
}
func dataSourceAwsVpcEndpointRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
log.Printf("[DEBUG] Reading VPC Endpoints.")
req := &ec2.DescribeVpcEndpointsInput{}
if id, ok := d.GetOk("id"); ok {
req.VpcEndpointIds = aws.StringSlice([]string{id.(string)})
}
req.Filters = buildEC2AttributeFilterList(
map[string]string{
"vpc-endpoint-state": d.Get("state").(string),
"vpc-id": d.Get("vpc_id").(string),
"service-name": d.Get("service_name").(string),
},
)
if len(req.Filters) == 0 {
// Don't send an empty filters list; the EC2 API won't accept it.
req.Filters = nil
}
resp, err := conn.DescribeVpcEndpoints(req)
if err != nil {
return err
}
if resp == nil || len(resp.VpcEndpoints) == 0 {
return fmt.Errorf("no matching VPC endpoint found")
}
if len(resp.VpcEndpoints) > 1 {
return fmt.Errorf("multiple VPC endpoints matched; use additional constraints to reduce matches to a single VPC endpoint")
}
vpce := resp.VpcEndpoints[0]
policy, err := normalizeJsonString(*vpce.PolicyDocument)
if err != nil {
return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err)
}
d.SetId(aws.StringValue(vpce.VpcEndpointId))
d.Set("id", vpce.VpcEndpointId)
d.Set("state", vpce.State)
d.Set("vpc_id", vpce.VpcId)
d.Set("service_name", vpce.ServiceName)
d.Set("policy", policy)
if err := d.Set("route_table_ids", aws.StringValueSlice(vpce.RouteTableIds)); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,56 @@
package aws
import (
"fmt"
"log"
"strconv"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsVpcEndpointService() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsVpcEndpointServiceRead,
Schema: map[string]*schema.Schema{
"service": {
Type: schema.TypeString,
Required: true,
},
"service_name": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceAwsVpcEndpointServiceRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
service := d.Get("service").(string)
log.Printf("[DEBUG] Reading VPC Endpoint Services.")
request := &ec2.DescribeVpcEndpointServicesInput{}
resp, err := conn.DescribeVpcEndpointServices(request)
if err != nil {
return fmt.Errorf("Error fetching VPC Endpoint Services: %s", err)
}
names := aws.StringValueSlice(resp.ServiceNames)
for _, name := range names {
if strings.HasSuffix(name, "."+service) {
d.SetId(strconv.Itoa(hashcode.String(name)))
d.Set("service_name", name)
return nil
}
}
return fmt.Errorf("VPC Endpoint Service (%s) not found", service)
}

View File

@ -0,0 +1,143 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsVpcPeeringConnection() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsVpcPeeringConnectionRead,
Schema: map[string]*schema.Schema{
"id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"status": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"vpc_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"owner_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"cidr_block": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"peer_vpc_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"peer_owner_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"peer_cidr_block": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"accepter": {
Type: schema.TypeMap,
Computed: true,
Elem: schema.TypeBool,
},
"requester": {
Type: schema.TypeMap,
Computed: true,
Elem: schema.TypeBool,
},
"filter": ec2CustomFiltersSchema(),
"tags": tagsSchemaComputed(),
},
}
}
func dataSourceAwsVpcPeeringConnectionRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
log.Printf("[DEBUG] Reading VPC Peering Connections.")
req := &ec2.DescribeVpcPeeringConnectionsInput{}
if id, ok := d.GetOk("id"); ok {
req.VpcPeeringConnectionIds = aws.StringSlice([]string{id.(string)})
}
req.Filters = buildEC2AttributeFilterList(
map[string]string{
"status-code": d.Get("status").(string),
"requester-vpc-info.vpc-id": d.Get("vpc_id").(string),
"requester-vpc-info.owner-id": d.Get("owner_id").(string),
"requester-vpc-info.cidr-block": d.Get("cidr_block").(string),
"accepter-vpc-info.vpc-id": d.Get("peer_vpc_id").(string),
"accepter-vpc-info.owner-id": d.Get("peer_owner_id").(string),
"accepter-vpc-info.cidr-block": d.Get("peer_cidr_block").(string),
},
)
req.Filters = append(req.Filters, buildEC2TagFilterList(
tagsFromMap(d.Get("tags").(map[string]interface{})),
)...)
req.Filters = append(req.Filters, buildEC2CustomFilterList(
d.Get("filter").(*schema.Set),
)...)
if len(req.Filters) == 0 {
// Don't send an empty filters list; the EC2 API won't accept it.
req.Filters = nil
}
resp, err := conn.DescribeVpcPeeringConnections(req)
if err != nil {
return err
}
if resp == nil || len(resp.VpcPeeringConnections) == 0 {
return fmt.Errorf("no matching VPC peering connection found")
}
if len(resp.VpcPeeringConnections) > 1 {
return fmt.Errorf("multiple VPC peering connections matched; use additional constraints to reduce matches to a single VPC peering connection")
}
pcx := resp.VpcPeeringConnections[0]
d.SetId(aws.StringValue(pcx.VpcPeeringConnectionId))
d.Set("id", pcx.VpcPeeringConnectionId)
d.Set("status", pcx.Status.Code)
d.Set("vpc_id", pcx.RequesterVpcInfo.VpcId)
d.Set("owner_id", pcx.RequesterVpcInfo.OwnerId)
d.Set("cidr_block", pcx.RequesterVpcInfo.CidrBlock)
d.Set("peer_vpc_id", pcx.AccepterVpcInfo.VpcId)
d.Set("peer_owner_id", pcx.AccepterVpcInfo.OwnerId)
d.Set("peer_cidr_block", pcx.AccepterVpcInfo.CidrBlock)
d.Set("tags", tagsToMap(pcx.Tags))
if pcx.AccepterVpcInfo.PeeringOptions != nil {
if err := d.Set("accepter", flattenPeeringOptions(pcx.AccepterVpcInfo.PeeringOptions)[0]); err != nil {
return err
}
}
if pcx.RequesterVpcInfo.PeeringOptions != nil {
if err := d.Set("requester", flattenPeeringOptions(pcx.RequesterVpcInfo.PeeringOptions)[0]); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,105 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsVpnGateway() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsVpnGatewayRead,
Schema: map[string]*schema.Schema{
"id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"state": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"attached_vpc_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"availability_zone": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"filter": ec2CustomFiltersSchema(),
"tags": tagsSchemaComputed(),
},
}
}
func dataSourceAwsVpnGatewayRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
log.Printf("[DEBUG] Reading VPN Gateways.")
req := &ec2.DescribeVpnGatewaysInput{}
if id, ok := d.GetOk("id"); ok {
req.VpnGatewayIds = aws.StringSlice([]string{id.(string)})
}
req.Filters = buildEC2AttributeFilterList(
map[string]string{
"state": d.Get("state").(string),
"availability-zone": d.Get("availability_zone").(string),
},
)
if id, ok := d.GetOk("attached_vpc_id"); ok {
req.Filters = append(req.Filters, buildEC2AttributeFilterList(
map[string]string{
"attachment.state": "attached",
"attachment.vpc-id": id.(string),
},
)...)
}
req.Filters = append(req.Filters, buildEC2TagFilterList(
tagsFromMap(d.Get("tags").(map[string]interface{})),
)...)
req.Filters = append(req.Filters, buildEC2CustomFilterList(
d.Get("filter").(*schema.Set),
)...)
if len(req.Filters) == 0 {
// Don't send an empty filters list; the EC2 API won't accept it.
req.Filters = nil
}
resp, err := conn.DescribeVpnGateways(req)
if err != nil {
return err
}
if resp == nil || len(resp.VpnGateways) == 0 {
return fmt.Errorf("no matching VPN gateway found: %#v", req)
}
if len(resp.VpnGateways) > 1 {
return fmt.Errorf("multiple VPN gateways matched; use additional constraints to reduce matches to a single VPN gateway")
}
vgw := resp.VpnGateways[0]
d.SetId(aws.StringValue(vgw.VpnGatewayId))
d.Set("state", vgw.State)
d.Set("availability_zone", vgw.AvailabilityZone)
d.Set("tags", tagsToMap(vgw.Tags))
for _, attachment := range vgw.VpcAttachments {
if *attachment.State == "attached" {
d.Set("attached_vpc_id", attachment.VpcId)
break
}
}
return nil
}

View File

@ -0,0 +1,77 @@
package aws
import (
"bytes"
"encoding/json"
"log"
"net/url"
"strings"
"github.com/hashicorp/terraform/helper/schema"
"github.com/jen20/awspolicyequivalence"
)
func suppressEquivalentAwsPolicyDiffs(k, old, new string, d *schema.ResourceData) bool {
equivalent, err := awspolicy.PoliciesAreEquivalent(old, new)
if err != nil {
return false
}
return equivalent
}
// Suppresses minor version changes to the db_instance engine_version attribute
func suppressAwsDbEngineVersionDiffs(k, old, new string, d *schema.ResourceData) bool {
// First check if the old/new values are nil.
// If both are nil, we have no state to compare the values with, so register a diff.
// This populates the attribute field during a plan/apply with fresh state, allowing
// the attribute to still be used in future resources.
// See https://github.com/hashicorp/terraform/issues/11881
if old == "" && new == "" {
return false
}
if v, ok := d.GetOk("auto_minor_version_upgrade"); ok {
if v.(bool) {
// If we're set to auto upgrade minor versions
// ignore a minor version diff between versions
if strings.HasPrefix(old, new) {
log.Printf("[DEBUG] Ignoring minor version diff")
return true
}
}
}
// Throw a diff by default
return false
}
func suppressEquivalentJsonDiffs(k, old, new string, d *schema.ResourceData) bool {
ob := bytes.NewBufferString("")
if err := json.Compact(ob, []byte(old)); err != nil {
return false
}
nb := bytes.NewBufferString("")
if err := json.Compact(nb, []byte(new)); err != nil {
return false
}
return jsonBytesEqual(ob.Bytes(), nb.Bytes())
}
func suppressOpenIdURL(k, old, new string, d *schema.ResourceData) bool {
oldUrl, err := url.Parse(old)
if err != nil {
return false
}
newUrl, err := url.Parse(new)
if err != nil {
return false
}
oldUrl.Scheme = "https"
return oldUrl.String() == newUrl.String()
}

View File

@ -0,0 +1,163 @@
package aws
import (
"fmt"
"sort"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
// buildEC2AttributeFilterList takes a flat map of scalar attributes (most
// likely values extracted from a *schema.ResourceData on an EC2-querying
// data source) and produces a []*ec2.Filter representing an exact match
// for each of the given non-empty attributes.
//
// The keys of the given attributes map are the attribute names expected
// by the EC2 API, which are usually either in camelcase or with dash-separated
// words. We conventionally map these to underscore-separated identifiers
// with the same words when presenting these as data source query attributes
// in Terraform.
//
// It's the callers responsibility to transform any non-string values into
// the appropriate string serialization required by the AWS API when
// encoding the given filter. Any attributes given with empty string values
// are ignored, assuming that the user wishes to leave that attribute
// unconstrained while filtering.
//
// The purpose of this function is to create values to pass in
// for the "Filters" attribute on most of the "Describe..." API functions in
// the EC2 API, to aid in the implementation of Terraform data sources that
// retrieve data about EC2 objects.
func buildEC2AttributeFilterList(attrs map[string]string) []*ec2.Filter {
var filters []*ec2.Filter
// sort the filters by name to make the output deterministic
var names []string
for filterName := range attrs {
names = append(names, filterName)
}
sort.Strings(names)
for _, filterName := range names {
value := attrs[filterName]
if value == "" {
continue
}
filters = append(filters, &ec2.Filter{
Name: aws.String(filterName),
Values: []*string{aws.String(value)},
})
}
return filters
}
// buildEC2TagFilterList takes a []*ec2.Tag and produces a []*ec2.Filter that
// represents exact matches for all of the tag key/value pairs given in
// the tag set.
//
// The purpose of this function is to create values to pass in for
// the "Filters" attribute on most of the "Describe..." API functions
// in the EC2 API, to implement filtering by tag values e.g. in Terraform
// data sources that retrieve data about EC2 objects.
//
// It is conventional for an EC2 data source to include an attribute called
// "tags" which conforms to the schema returned by the tagsSchema() function.
// The value of this can then be converted to a tags slice using tagsFromMap,
// and the result finally passed in to this function.
//
// In Terraform configuration this would then look like this, to constrain
// results by name:
//
// tags {
// Name = "my-awesome-subnet"
// }
func buildEC2TagFilterList(tags []*ec2.Tag) []*ec2.Filter {
filters := make([]*ec2.Filter, len(tags))
for i, tag := range tags {
filters[i] = &ec2.Filter{
Name: aws.String(fmt.Sprintf("tag:%s", *tag.Key)),
Values: []*string{tag.Value},
}
}
return filters
}
// ec2CustomFiltersSchema returns a *schema.Schema that represents
// a set of custom filtering criteria that a user can specify as input
// to a data source that wraps one of the many "Describe..." API calls
// in the EC2 API.
//
// It is conventional for an attribute of this type to be included
// as a top-level attribute called "filter". This is the "catch all" for
// filter combinations that are not possible to express using scalar
// attributes or tags. In Terraform configuration, the custom filter blocks
// then look like this:
//
// filter {
// name = "availabilityZone"
// values = ["us-west-2a", "us-west-2b"]
// }
func ec2CustomFiltersSchema() *schema.Schema {
return &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
},
"values": {
Type: schema.TypeSet,
Required: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
},
},
}
}
// buildEC2CustomFilterList takes the set value extracted from a schema
// attribute conforming to the schema returned by ec2CustomFiltersSchema,
// and transforms it into a []*ec2.Filter representing the same filter
// expressions which is ready to pass into the "Filters" attribute on most
// of the "Describe..." functions in the EC2 API.
//
// This function is intended only to be used in conjunction with
// ec2CustomFitlersSchema. See the docs on that function for more details
// on the configuration pattern this is intended to support.
func buildEC2CustomFilterList(filterSet *schema.Set) []*ec2.Filter {
if filterSet == nil {
return []*ec2.Filter{}
}
customFilters := filterSet.List()
filters := make([]*ec2.Filter, len(customFilters))
for filterIdx, customFilterI := range customFilters {
customFilterMapI := customFilterI.(map[string]interface{})
name := customFilterMapI["name"].(string)
valuesI := customFilterMapI["values"].(*schema.Set).List()
values := make([]*string, len(valuesI))
for valueIdx, valueI := range valuesI {
values[valueIdx] = aws.String(valueI.(string))
}
filters[filterIdx] = &ec2.Filter{
Name: &name,
Values: values,
}
}
return filters
}

View File

@ -0,0 +1,28 @@
package aws
// This list is copied from
// http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints
// It currently cannot be generated from the API json.
var hostedZoneIDsMap = map[string]string{
"us-east-1": "Z3AQBSTGFYJSTF",
"us-east-2": "Z2O1EMRO9K5GLX",
"us-west-2": "Z3BJ6K6RIION7M",
"us-west-1": "Z2F56UZL2M1ACD",
"eu-west-1": "Z1BKCTXD74EZPE",
"eu-west-2": "Z3GKZC51ZF0DB4",
"eu-central-1": "Z21DNDUVLTQW6Q",
"ap-south-1": "Z11RGJOFQNVJUP",
"ap-southeast-1": "Z3O0J2DXBE1FTB",
"ap-southeast-2": "Z1WCIGYICN2BYD",
"ap-northeast-1": "Z2M4EHUR26P7ZW",
"ap-northeast-2": "Z3W03O7B5YMIYP",
"ca-central-1": "Z1QDHH18159H29",
"sa-east-1": "Z7KQH4QJS55SO",
"us-gov-west-1": "Z31GFT0UA1I2HV",
}
// Returns the hosted zone ID for an S3 website endpoint region. This can be
// used as input to the aws_route53_record resource's zone_id argument.
func HostedZoneIDForRegion(region string) string {
return hostedZoneIDsMap[region]
}

View File

@ -0,0 +1,112 @@
package aws
import (
"encoding/json"
"sort"
)
type IAMPolicyDoc struct {
Version string `json:",omitempty"`
Id string `json:",omitempty"`
Statements []*IAMPolicyStatement `json:"Statement"`
}
type IAMPolicyStatement struct {
Sid string
Effect string `json:",omitempty"`
Actions interface{} `json:"Action,omitempty"`
NotActions interface{} `json:"NotAction,omitempty"`
Resources interface{} `json:"Resource,omitempty"`
NotResources interface{} `json:"NotResource,omitempty"`
Principals IAMPolicyStatementPrincipalSet `json:"Principal,omitempty"`
NotPrincipals IAMPolicyStatementPrincipalSet `json:"NotPrincipal,omitempty"`
Conditions IAMPolicyStatementConditionSet `json:"Condition,omitempty"`
}
type IAMPolicyStatementPrincipal struct {
Type string
Identifiers interface{}
}
type IAMPolicyStatementCondition struct {
Test string
Variable string
Values interface{}
}
type IAMPolicyStatementPrincipalSet []IAMPolicyStatementPrincipal
type IAMPolicyStatementConditionSet []IAMPolicyStatementCondition
func (ps IAMPolicyStatementPrincipalSet) MarshalJSON() ([]byte, error) {
raw := map[string]interface{}{}
// As a special case, IAM considers the string value "*" to be
// equivalent to "AWS": "*", and normalizes policies as such.
// We'll follow their lead and do the same normalization here.
// IAM also considers {"*": "*"} to be equivalent to this.
if len(ps) == 1 {
p := ps[0]
if p.Type == "AWS" || p.Type == "*" {
if sv, ok := p.Identifiers.(string); ok && sv == "*" {
return []byte(`"*"`), nil
}
if av, ok := p.Identifiers.([]string); ok && len(av) == 1 && av[0] == "*" {
return []byte(`"*"`), nil
}
}
}
for _, p := range ps {
switch i := p.Identifiers.(type) {
case []string:
if _, ok := raw[p.Type]; !ok {
raw[p.Type] = make([]string, 0, len(i))
}
sort.Sort(sort.Reverse(sort.StringSlice(i)))
raw[p.Type] = append(raw[p.Type].([]string), i...)
case string:
raw[p.Type] = i
default:
panic("Unsupported data type for IAMPolicyStatementPrincipalSet")
}
}
return json.Marshal(&raw)
}
func (cs IAMPolicyStatementConditionSet) MarshalJSON() ([]byte, error) {
raw := map[string]map[string]interface{}{}
for _, c := range cs {
if _, ok := raw[c.Test]; !ok {
raw[c.Test] = map[string]interface{}{}
}
switch i := c.Values.(type) {
case []string:
if _, ok := raw[c.Test][c.Variable]; !ok {
raw[c.Test][c.Variable] = make([]string, 0, len(i))
}
sort.Sort(sort.Reverse(sort.StringSlice(i)))
raw[c.Test][c.Variable] = append(raw[c.Test][c.Variable].([]string), i...)
case string:
raw[c.Test][c.Variable] = i
default:
panic("Unsupported data type for IAMPolicyStatementConditionSet")
}
}
return json.Marshal(&raw)
}
func iamPolicyDecodeConfigStringList(lI []interface{}) interface{} {
if len(lI) == 1 {
return lI[0].(string)
}
ret := make([]string, len(lI))
for i, vI := range lI {
ret[i] = vI.(string)
}
sort.Sort(sort.Reverse(sort.StringSlice(ret)))
return ret
}

View File

@ -0,0 +1,32 @@
package aws
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudfront"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsCloudFrontDistributionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
// This is a non API attribute
// We are merely setting this to the same value as the Default setting in the schema
d.Set("retain_on_delete", false)
conn := meta.(*AWSClient).cloudfrontconn
id := d.Id()
resp, err := conn.GetDistributionConfig(&cloudfront.GetDistributionConfigInput{
Id: aws.String(id),
})
if err != nil {
return nil, err
}
distConfig := resp.DistributionConfig
results := make([]*schema.ResourceData, 1)
err = flattenDistributionConfig(d, distConfig)
if err != nil {
return nil, err
}
results[0] = d
return results, nil
}

View File

@ -0,0 +1,17 @@
package aws
import "github.com/hashicorp/terraform/helper/schema"
func resourceAwsDbEventSubscriptionImport(
d *schema.ResourceData,
meta interface{}) ([]*schema.ResourceData, error) {
// The db event subscription Read function only needs the "name" of the event subscription
// in order to populate the necessary values. This takes the "id" from the supplied StateFunc
// and sets it as the "name" attribute, as described in the import documentation. This allows
// the Read function to actually succeed and set the ID of the resource
results := make([]*schema.ResourceData, 1, 1)
d.Set("name", d.Id())
results[0] = d
return results, nil
}

View File

@ -0,0 +1,95 @@
package aws
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
// Network ACLs import their rules and associations
func resourceAwsNetworkAclImportState(
d *schema.ResourceData,
meta interface{}) ([]*schema.ResourceData, error) {
conn := meta.(*AWSClient).ec2conn
// First query the resource itself
resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{
NetworkAclIds: []*string{aws.String(d.Id())},
})
if err != nil {
return nil, err
}
if resp == nil || len(resp.NetworkAcls) < 1 || resp.NetworkAcls[0] == nil {
return nil, fmt.Errorf("network ACL %s is not found", d.Id())
}
acl := resp.NetworkAcls[0]
// Start building our results
results := make([]*schema.ResourceData, 1,
2+len(acl.Associations)+len(acl.Entries))
results[0] = d
/*
{
// Construct the entries
subResource := resourceAwsNetworkAclRule()
for _, entry := range acl.Entries {
// Minimal data for route
d := subResource.Data(nil)
d.SetType("aws_network_acl_rule")
d.Set("network_acl_id", acl.NetworkAclId)
d.Set("rule_number", entry.RuleNumber)
d.Set("egress", entry.Egress)
d.Set("protocol", entry.Protocol)
d.SetId(networkAclIdRuleNumberEgressHash(
d.Get("network_acl_id").(string),
d.Get("rule_number").(int),
d.Get("egress").(bool),
d.Get("protocol").(string)))
results = append(results, d)
}
}
{
// Construct the associations
subResource := resourceAwsRouteTableAssociation()
for _, assoc := range table.Associations {
if *assoc.Main {
// Ignore
continue
}
// Minimal data for route
d := subResource.Data(nil)
d.SetType("aws_route_table_association")
d.Set("route_table_id", assoc.RouteTableId)
d.SetId(*assoc.RouteTableAssociationId)
results = append(results, d)
}
}
{
// Construct the main associations. We could do this above but
// I keep this as a separate section since it is a separate resource.
subResource := resourceAwsMainRouteTableAssociation()
for _, assoc := range table.Associations {
if !*assoc.Main {
// Ignore
continue
}
// Minimal data for route
d := subResource.Data(nil)
d.SetType("aws_main_route_table_association")
d.Set("route_table_id", id)
d.Set("vpc_id", table.VpcId)
d.SetId(*assoc.RouteTableAssociationId)
results = append(results, d)
}
}
*/
return results, nil
}

View File

@ -0,0 +1,99 @@
package aws
import (
"fmt"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
// Route table import also imports all the rules
func resourceAwsRouteTableImportState(
d *schema.ResourceData,
meta interface{}) ([]*schema.ResourceData, error) {
conn := meta.(*AWSClient).ec2conn
// First query the resource itself
id := d.Id()
resp, err := conn.DescribeRouteTables(&ec2.DescribeRouteTablesInput{
RouteTableIds: []*string{&id},
})
if err != nil {
return nil, err
}
if len(resp.RouteTables) < 1 || resp.RouteTables[0] == nil {
return nil, fmt.Errorf("route table %s is not found", id)
}
table := resp.RouteTables[0]
// Start building our results
results := make([]*schema.ResourceData, 1,
2+len(table.Associations)+len(table.Routes))
results[0] = d
{
// Construct the routes
subResource := resourceAwsRoute()
for _, route := range table.Routes {
// Ignore the local/default route
if route.GatewayId != nil && *route.GatewayId == "local" {
continue
}
if route.DestinationPrefixListId != nil {
// Skipping because VPC endpoint routes are handled separately
// See aws_vpc_endpoint
continue
}
// Minimal data for route
d := subResource.Data(nil)
d.SetType("aws_route")
d.Set("route_table_id", id)
d.Set("destination_cidr_block", route.DestinationCidrBlock)
d.Set("destination_ipv6_cidr_block", route.DestinationIpv6CidrBlock)
d.SetId(routeIDHash(d, route))
results = append(results, d)
}
}
{
// Construct the associations
subResource := resourceAwsRouteTableAssociation()
for _, assoc := range table.Associations {
if *assoc.Main {
// Ignore
continue
}
// Minimal data for route
d := subResource.Data(nil)
d.SetType("aws_route_table_association")
d.Set("route_table_id", assoc.RouteTableId)
d.SetId(*assoc.RouteTableAssociationId)
results = append(results, d)
}
}
{
// Construct the main associations. We could do this above but
// I keep this as a separate section since it is a separate resource.
subResource := resourceAwsMainRouteTableAssociation()
for _, assoc := range table.Associations {
if !*assoc.Main {
// Ignore
continue
}
// Minimal data for route
d := subResource.Data(nil)
d.SetType("aws_main_route_table_association")
d.Set("route_table_id", id)
d.Set("vpc_id", table.VpcId)
d.SetId(*assoc.RouteTableAssociationId)
results = append(results, d)
}
}
return results, nil
}

View File

@ -0,0 +1,39 @@
package aws
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsS3BucketImportState(
d *schema.ResourceData,
meta interface{}) ([]*schema.ResourceData, error) {
results := make([]*schema.ResourceData, 1, 1)
results[0] = d
conn := meta.(*AWSClient).s3conn
pol, err := conn.GetBucketPolicy(&s3.GetBucketPolicyInput{
Bucket: aws.String(d.Id()),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoSuchBucketPolicy" {
// Bucket without policy
return results, nil
}
return nil, errwrap.Wrapf("Error importing AWS S3 bucket policy: {{err}}", err)
}
policy := resourceAwsS3BucketPolicy()
pData := policy.Data(nil)
pData.SetId(d.Id())
pData.SetType("aws_s3_bucket_policy")
pData.Set("bucket", d.Id())
pData.Set("policy", pol)
results = append(results, pData)
return results, nil
}

View File

@ -0,0 +1,186 @@
package aws
import (
"fmt"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/schema"
)
// Security group import fans out to multiple resources due to the
// security group rules. Instead of creating one resource with nested
// rules, we use the best practices approach of one resource per rule.
func resourceAwsSecurityGroupImportState(
d *schema.ResourceData,
meta interface{}) ([]*schema.ResourceData, error) {
conn := meta.(*AWSClient).ec2conn
// First query the security group
sgRaw, _, err := SGStateRefreshFunc(conn, d.Id())()
if err != nil {
return nil, err
}
if sgRaw == nil {
return nil, fmt.Errorf("security group not found")
}
sg := sgRaw.(*ec2.SecurityGroup)
// Start building our results
results := make([]*schema.ResourceData, 1,
1+len(sg.IpPermissions)+len(sg.IpPermissionsEgress))
results[0] = d
// Construct the rules
permMap := map[string][]*ec2.IpPermission{
"ingress": sg.IpPermissions,
"egress": sg.IpPermissionsEgress,
}
for ruleType, perms := range permMap {
for _, perm := range perms {
ds, err := resourceAwsSecurityGroupImportStatePerm(sg, ruleType, perm)
if err != nil {
return nil, err
}
results = append(results, ds...)
}
}
return results, nil
}
func resourceAwsSecurityGroupImportStatePerm(sg *ec2.SecurityGroup, ruleType string, perm *ec2.IpPermission) ([]*schema.ResourceData, error) {
/*
Create a seperate Security Group Rule for:
* The collection of IpRanges (cidr_blocks)
* The collection of Ipv6Ranges (ipv6_cidr_blocks)
* Each individual UserIdGroupPair (source_security_group_id)
If, for example, a security group has rules for:
* 2 IpRanges
* 2 Ipv6Ranges
* 2 UserIdGroupPairs
This would generate 4 security group rules:
* 1 for the collection of IpRanges
* 1 for the collection of Ipv6Ranges
* 1 for the first UserIdGroupPair
* 1 for the second UserIdGroupPair
*/
var result []*schema.ResourceData
if perm.IpRanges != nil {
p := &ec2.IpPermission{
FromPort: perm.FromPort,
IpProtocol: perm.IpProtocol,
PrefixListIds: perm.PrefixListIds,
ToPort: perm.ToPort,
IpRanges: perm.IpRanges,
}
r, err := resourceAwsSecurityGroupImportStatePermPair(sg, ruleType, p)
if err != nil {
return nil, err
}
result = append(result, r)
}
if perm.Ipv6Ranges != nil {
p := &ec2.IpPermission{
FromPort: perm.FromPort,
IpProtocol: perm.IpProtocol,
PrefixListIds: perm.PrefixListIds,
ToPort: perm.ToPort,
Ipv6Ranges: perm.Ipv6Ranges,
}
r, err := resourceAwsSecurityGroupImportStatePermPair(sg, ruleType, p)
if err != nil {
return nil, err
}
result = append(result, r)
}
if len(perm.UserIdGroupPairs) > 0 {
for _, pair := range perm.UserIdGroupPairs {
p := &ec2.IpPermission{
FromPort: perm.FromPort,
IpProtocol: perm.IpProtocol,
PrefixListIds: perm.PrefixListIds,
ToPort: perm.ToPort,
UserIdGroupPairs: []*ec2.UserIdGroupPair{pair},
}
r, err := resourceAwsSecurityGroupImportStatePermPair(sg, ruleType, p)
if err != nil {
return nil, err
}
result = append(result, r)
}
}
if len(result) == 0 && len(perm.PrefixListIds) > 0 {
p := &ec2.IpPermission{
FromPort: perm.FromPort,
IpProtocol: perm.IpProtocol,
PrefixListIds: perm.PrefixListIds,
ToPort: perm.ToPort,
}
r, err := resourceAwsSecurityGroupImportStatePermPair(sg, ruleType, p)
if err != nil {
return nil, err
}
result = append(result, r)
}
return result, nil
}
func resourceAwsSecurityGroupImportStatePermPair(sg *ec2.SecurityGroup, ruleType string, perm *ec2.IpPermission) (*schema.ResourceData, error) {
// Construct the rule. We do this by populating the absolute
// minimum necessary for Refresh on the rule to work. This
// happens to be a lot of fields since they're almost all needed
// for de-dupping.
sgId := sg.GroupId
id := ipPermissionIDHash(*sgId, ruleType, perm)
ruleResource := resourceAwsSecurityGroupRule()
d := ruleResource.Data(nil)
d.SetId(id)
d.SetType("aws_security_group_rule")
d.Set("security_group_id", sgId)
d.Set("type", ruleType)
// 'self' is false by default. Below, we range over the group ids and set true
// if the parent sg id is found
d.Set("self", false)
if len(perm.UserIdGroupPairs) > 0 {
s := perm.UserIdGroupPairs[0]
// Check for Pair that is the same as the Security Group, to denote self.
// Otherwise, mark the group id in source_security_group_id
isVPC := sg.VpcId != nil && *sg.VpcId != ""
if isVPC {
if *s.GroupId == *sg.GroupId {
d.Set("self", true)
// prune the self reference from the UserIdGroupPairs, so we don't
// have duplicate sg ids (both self and in source_security_group_id)
perm.UserIdGroupPairs = append(perm.UserIdGroupPairs[:0], perm.UserIdGroupPairs[0+1:]...)
}
} else {
if *s.GroupName == *sg.GroupName {
d.Set("self", true)
// prune the self reference from the UserIdGroupPairs, so we don't
// have duplicate sg ids (both self and in source_security_group_id)
perm.UserIdGroupPairs = append(perm.UserIdGroupPairs[:0], perm.UserIdGroupPairs[0+1:]...)
}
}
}
if err := setFromIPPerm(d, sg, perm); err != nil {
return nil, errwrap.Wrapf("Error importing AWS Security Group: {{err}}", err)
}
return d, nil
}

View File

@ -0,0 +1,141 @@
package aws
import (
"fmt"
"net"
"strconv"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
)
func expandNetworkAclEntries(configured []interface{}, entryType string) ([]*ec2.NetworkAclEntry, error) {
entries := make([]*ec2.NetworkAclEntry, 0, len(configured))
for _, eRaw := range configured {
data := eRaw.(map[string]interface{})
protocol := data["protocol"].(string)
p, err := strconv.Atoi(protocol)
if err != nil {
var ok bool
p, ok = protocolIntegers()[protocol]
if !ok {
return nil, fmt.Errorf("Invalid Protocol %s for rule %#v", protocol, data)
}
}
e := &ec2.NetworkAclEntry{
Protocol: aws.String(strconv.Itoa(p)),
PortRange: &ec2.PortRange{
From: aws.Int64(int64(data["from_port"].(int))),
To: aws.Int64(int64(data["to_port"].(int))),
},
Egress: aws.Bool(entryType == "egress"),
RuleAction: aws.String(data["action"].(string)),
RuleNumber: aws.Int64(int64(data["rule_no"].(int))),
}
if v, ok := data["ipv6_cidr_block"]; ok {
e.Ipv6CidrBlock = aws.String(v.(string))
}
if v, ok := data["cidr_block"]; ok {
e.CidrBlock = aws.String(v.(string))
}
// Specify additional required fields for ICMP
if p == 1 {
e.IcmpTypeCode = &ec2.IcmpTypeCode{}
if v, ok := data["icmp_code"]; ok {
e.IcmpTypeCode.Code = aws.Int64(int64(v.(int)))
}
if v, ok := data["icmp_type"]; ok {
e.IcmpTypeCode.Type = aws.Int64(int64(v.(int)))
}
}
entries = append(entries, e)
}
return entries, nil
}
func flattenNetworkAclEntries(list []*ec2.NetworkAclEntry) []map[string]interface{} {
entries := make([]map[string]interface{}, 0, len(list))
for _, entry := range list {
newEntry := map[string]interface{}{
"from_port": *entry.PortRange.From,
"to_port": *entry.PortRange.To,
"action": *entry.RuleAction,
"rule_no": *entry.RuleNumber,
"protocol": *entry.Protocol,
}
if entry.CidrBlock != nil {
newEntry["cidr_block"] = *entry.CidrBlock
}
if entry.Ipv6CidrBlock != nil {
newEntry["ipv6_cidr_block"] = *entry.Ipv6CidrBlock
}
entries = append(entries, newEntry)
}
return entries
}
func protocolStrings(protocolIntegers map[string]int) map[int]string {
protocolStrings := make(map[int]string, len(protocolIntegers))
for k, v := range protocolIntegers {
protocolStrings[v] = k
}
return protocolStrings
}
func protocolIntegers() map[string]int {
var protocolIntegers = make(map[string]int)
protocolIntegers = map[string]int{
// defined at https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
"ah": 51,
"esp": 50,
"udp": 17,
"tcp": 6,
"icmp": 1,
"all": -1,
"vrrp": 112,
}
return protocolIntegers
}
// expectedPortPair stores a pair of ports we expect to see together.
type expectedPortPair struct {
to_port int64
from_port int64
}
// validatePorts ensures the ports and protocol match expected
// values.
func validatePorts(to int64, from int64, expected expectedPortPair) bool {
if to != expected.to_port || from != expected.from_port {
return false
}
return true
}
// validateCIDRBlock ensures the passed CIDR block represents an implied
// network, and not an overly-specified IP address.
func validateCIDRBlock(cidr string) error {
_, ipnet, err := net.ParseCIDR(cidr)
if err != nil {
return err
}
if ipnet.String() != cidr {
return fmt.Errorf("%s is not a valid mask; did you mean %s?", cidr, ipnet)
}
return nil
}

View File

@ -0,0 +1,645 @@
package aws
import (
"fmt"
"log"
"strconv"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/opsworks"
)
// OpsWorks has a single concept of "layer" which represents several different
// layer types. The differences between these are in some extra properties that
// get packed into an "Attributes" map, but in the OpsWorks UI these are presented
// as first-class options, and so Terraform prefers to expose them this way and
// hide the implementation detail that they are all packed into a single type
// in the underlying API.
//
// This file contains utilities that are shared between all of the concrete
// layer resource types, which have names matching aws_opsworks_*_layer .
type opsworksLayerTypeAttribute struct {
AttrName string
Type schema.ValueType
Default interface{}
Required bool
WriteOnly bool
}
type opsworksLayerType struct {
TypeName string
DefaultLayerName string
Attributes map[string]*opsworksLayerTypeAttribute
CustomShortName bool
}
var (
opsworksTrueString = "true"
opsworksFalseString = "false"
)
func (lt *opsworksLayerType) SchemaResource() *schema.Resource {
resourceSchema := map[string]*schema.Schema{
"id": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"auto_assign_elastic_ips": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"auto_assign_public_ips": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"custom_instance_profile_arn": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"elastic_load_balancer": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"custom_setup_recipes": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"custom_configure_recipes": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"custom_deploy_recipes": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"custom_undeploy_recipes": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"custom_shutdown_recipes": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"custom_security_group_ids": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"custom_json": &schema.Schema{
Type: schema.TypeString,
StateFunc: normalizeJson,
Optional: true,
},
"auto_healing": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: true,
},
"install_updates_on_boot": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: true,
},
"instance_shutdown_timeout": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Default: 120,
},
"drain_elb_on_shutdown": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: true,
},
"system_packages": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"stack_id": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
Required: true,
},
"use_ebs_optimized_instances": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"ebs_volume": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"iops": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Default: 0,
},
"mount_point": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"number_of_disks": &schema.Schema{
Type: schema.TypeInt,
Required: true,
},
"raid_level": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: "",
},
"size": &schema.Schema{
Type: schema.TypeInt,
Required: true,
},
"type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: "standard",
},
},
},
Set: func(v interface{}) int {
m := v.(map[string]interface{})
return hashcode.String(m["mount_point"].(string))
},
},
}
if lt.CustomShortName {
resourceSchema["short_name"] = &schema.Schema{
Type: schema.TypeString,
Required: true,
}
}
if lt.DefaultLayerName != "" {
resourceSchema["name"] = &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: lt.DefaultLayerName,
}
} else {
resourceSchema["name"] = &schema.Schema{
Type: schema.TypeString,
Required: true,
}
}
for key, def := range lt.Attributes {
resourceSchema[key] = &schema.Schema{
Type: def.Type,
Default: def.Default,
Required: def.Required,
Optional: !def.Required,
}
}
return &schema.Resource{
Read: func(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AWSClient).opsworksconn
return lt.Read(d, client)
},
Create: func(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AWSClient).opsworksconn
return lt.Create(d, client)
},
Update: func(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AWSClient).opsworksconn
return lt.Update(d, client)
},
Delete: func(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AWSClient).opsworksconn
return lt.Delete(d, client)
},
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: resourceSchema,
}
}
func (lt *opsworksLayerType) Read(d *schema.ResourceData, client *opsworks.OpsWorks) error {
req := &opsworks.DescribeLayersInput{
LayerIds: []*string{
aws.String(d.Id()),
},
}
log.Printf("[DEBUG] Reading OpsWorks layer: %s", d.Id())
resp, err := client.DescribeLayers(req)
if err != nil {
if awserr, ok := err.(awserr.Error); ok {
if awserr.Code() == "ResourceNotFoundException" {
d.SetId("")
return nil
}
}
return err
}
layer := resp.Layers[0]
d.Set("id", layer.LayerId)
d.Set("auto_assign_elastic_ips", layer.AutoAssignElasticIps)
d.Set("auto_assign_public_ips", layer.AutoAssignPublicIps)
d.Set("custom_instance_profile_arn", layer.CustomInstanceProfileArn)
d.Set("custom_security_group_ids", flattenStringList(layer.CustomSecurityGroupIds))
d.Set("auto_healing", layer.EnableAutoHealing)
d.Set("install_updates_on_boot", layer.InstallUpdatesOnBoot)
d.Set("name", layer.Name)
d.Set("system_packages", flattenStringList(layer.Packages))
d.Set("stack_id", layer.StackId)
d.Set("use_ebs_optimized_instances", layer.UseEbsOptimizedInstances)
if lt.CustomShortName {
d.Set("short_name", layer.Shortname)
}
if v := layer.CustomJson; v == nil {
if err := d.Set("custom_json", ""); err != nil {
return err
}
} else if err := d.Set("custom_json", normalizeJson(*v)); err != nil {
return err
}
lt.SetAttributeMap(d, layer.Attributes)
lt.SetLifecycleEventConfiguration(d, layer.LifecycleEventConfiguration)
lt.SetCustomRecipes(d, layer.CustomRecipes)
lt.SetVolumeConfigurations(d, layer.VolumeConfigurations)
/* get ELB */
ebsRequest := &opsworks.DescribeElasticLoadBalancersInput{
LayerIds: []*string{
aws.String(d.Id()),
},
}
loadBalancers, err := client.DescribeElasticLoadBalancers(ebsRequest)
if err != nil {
return err
}
if loadBalancers.ElasticLoadBalancers == nil || len(loadBalancers.ElasticLoadBalancers) == 0 {
d.Set("elastic_load_balancer", "")
} else {
loadBalancer := loadBalancers.ElasticLoadBalancers[0]
if loadBalancer != nil {
d.Set("elastic_load_balancer", loadBalancer.ElasticLoadBalancerName)
}
}
return nil
}
func (lt *opsworksLayerType) Create(d *schema.ResourceData, client *opsworks.OpsWorks) error {
req := &opsworks.CreateLayerInput{
AutoAssignElasticIps: aws.Bool(d.Get("auto_assign_elastic_ips").(bool)),
AutoAssignPublicIps: aws.Bool(d.Get("auto_assign_public_ips").(bool)),
CustomInstanceProfileArn: aws.String(d.Get("custom_instance_profile_arn").(string)),
CustomRecipes: lt.CustomRecipes(d),
CustomSecurityGroupIds: expandStringSet(d.Get("custom_security_group_ids").(*schema.Set)),
EnableAutoHealing: aws.Bool(d.Get("auto_healing").(bool)),
InstallUpdatesOnBoot: aws.Bool(d.Get("install_updates_on_boot").(bool)),
LifecycleEventConfiguration: lt.LifecycleEventConfiguration(d),
Name: aws.String(d.Get("name").(string)),
Packages: expandStringSet(d.Get("system_packages").(*schema.Set)),
Type: aws.String(lt.TypeName),
StackId: aws.String(d.Get("stack_id").(string)),
UseEbsOptimizedInstances: aws.Bool(d.Get("use_ebs_optimized_instances").(bool)),
Attributes: lt.AttributeMap(d),
VolumeConfigurations: lt.VolumeConfigurations(d),
}
if lt.CustomShortName {
req.Shortname = aws.String(d.Get("short_name").(string))
} else {
req.Shortname = aws.String(lt.TypeName)
}
req.CustomJson = aws.String(d.Get("custom_json").(string))
log.Printf("[DEBUG] Creating OpsWorks layer: %s", d.Id())
resp, err := client.CreateLayer(req)
if err != nil {
return err
}
layerId := *resp.LayerId
d.SetId(layerId)
d.Set("id", layerId)
loadBalancer := aws.String(d.Get("elastic_load_balancer").(string))
if loadBalancer != nil && *loadBalancer != "" {
log.Printf("[DEBUG] Attaching load balancer: %s", *loadBalancer)
_, err := client.AttachElasticLoadBalancer(&opsworks.AttachElasticLoadBalancerInput{
ElasticLoadBalancerName: loadBalancer,
LayerId: &layerId,
})
if err != nil {
return err
}
}
return lt.Read(d, client)
}
func (lt *opsworksLayerType) Update(d *schema.ResourceData, client *opsworks.OpsWorks) error {
req := &opsworks.UpdateLayerInput{
LayerId: aws.String(d.Id()),
AutoAssignElasticIps: aws.Bool(d.Get("auto_assign_elastic_ips").(bool)),
AutoAssignPublicIps: aws.Bool(d.Get("auto_assign_public_ips").(bool)),
CustomInstanceProfileArn: aws.String(d.Get("custom_instance_profile_arn").(string)),
CustomRecipes: lt.CustomRecipes(d),
CustomSecurityGroupIds: expandStringSet(d.Get("custom_security_group_ids").(*schema.Set)),
EnableAutoHealing: aws.Bool(d.Get("auto_healing").(bool)),
InstallUpdatesOnBoot: aws.Bool(d.Get("install_updates_on_boot").(bool)),
LifecycleEventConfiguration: lt.LifecycleEventConfiguration(d),
Name: aws.String(d.Get("name").(string)),
Packages: expandStringSet(d.Get("system_packages").(*schema.Set)),
UseEbsOptimizedInstances: aws.Bool(d.Get("use_ebs_optimized_instances").(bool)),
Attributes: lt.AttributeMap(d),
VolumeConfigurations: lt.VolumeConfigurations(d),
}
if lt.CustomShortName {
req.Shortname = aws.String(d.Get("short_name").(string))
} else {
req.Shortname = aws.String(lt.TypeName)
}
req.CustomJson = aws.String(d.Get("custom_json").(string))
log.Printf("[DEBUG] Updating OpsWorks layer: %s", d.Id())
if d.HasChange("elastic_load_balancer") {
lbo, lbn := d.GetChange("elastic_load_balancer")
loadBalancerOld := aws.String(lbo.(string))
loadBalancerNew := aws.String(lbn.(string))
if loadBalancerOld != nil && *loadBalancerOld != "" {
log.Printf("[DEBUG] Dettaching load balancer: %s", *loadBalancerOld)
_, err := client.DetachElasticLoadBalancer(&opsworks.DetachElasticLoadBalancerInput{
ElasticLoadBalancerName: loadBalancerOld,
LayerId: aws.String(d.Id()),
})
if err != nil {
return err
}
}
if loadBalancerNew != nil && *loadBalancerNew != "" {
log.Printf("[DEBUG] Attaching load balancer: %s", *loadBalancerNew)
_, err := client.AttachElasticLoadBalancer(&opsworks.AttachElasticLoadBalancerInput{
ElasticLoadBalancerName: loadBalancerNew,
LayerId: aws.String(d.Id()),
})
if err != nil {
return err
}
}
}
_, err := client.UpdateLayer(req)
if err != nil {
return err
}
return lt.Read(d, client)
}
func (lt *opsworksLayerType) Delete(d *schema.ResourceData, client *opsworks.OpsWorks) error {
req := &opsworks.DeleteLayerInput{
LayerId: aws.String(d.Id()),
}
log.Printf("[DEBUG] Deleting OpsWorks layer: %s", d.Id())
_, err := client.DeleteLayer(req)
return err
}
func (lt *opsworksLayerType) AttributeMap(d *schema.ResourceData) map[string]*string {
attrs := map[string]*string{}
for key, def := range lt.Attributes {
value := d.Get(key)
switch def.Type {
case schema.TypeString:
strValue := value.(string)
attrs[def.AttrName] = &strValue
case schema.TypeInt:
intValue := value.(int)
strValue := strconv.Itoa(intValue)
attrs[def.AttrName] = &strValue
case schema.TypeBool:
boolValue := value.(bool)
if boolValue {
attrs[def.AttrName] = &opsworksTrueString
} else {
attrs[def.AttrName] = &opsworksFalseString
}
default:
// should never happen
panic(fmt.Errorf("Unsupported OpsWorks layer attribute type"))
}
}
return attrs
}
func (lt *opsworksLayerType) SetAttributeMap(d *schema.ResourceData, attrs map[string]*string) {
for key, def := range lt.Attributes {
// Ignore write-only attributes; we'll just keep what we already have stored.
// (The AWS API returns garbage placeholder values for these.)
if def.WriteOnly {
continue
}
if strPtr, ok := attrs[def.AttrName]; ok && strPtr != nil {
strValue := *strPtr
switch def.Type {
case schema.TypeString:
d.Set(key, strValue)
case schema.TypeInt:
intValue, err := strconv.Atoi(strValue)
if err == nil {
d.Set(key, intValue)
} else {
// Got garbage from the AWS API
d.Set(key, nil)
}
case schema.TypeBool:
boolValue := true
if strValue == opsworksFalseString {
boolValue = false
}
d.Set(key, boolValue)
default:
// should never happen
panic(fmt.Errorf("Unsupported OpsWorks layer attribute type"))
}
return
} else {
d.Set(key, nil)
}
}
}
func (lt *opsworksLayerType) LifecycleEventConfiguration(d *schema.ResourceData) *opsworks.LifecycleEventConfiguration {
return &opsworks.LifecycleEventConfiguration{
Shutdown: &opsworks.ShutdownEventConfiguration{
DelayUntilElbConnectionsDrained: aws.Bool(d.Get("drain_elb_on_shutdown").(bool)),
ExecutionTimeout: aws.Int64(int64(d.Get("instance_shutdown_timeout").(int))),
},
}
}
func (lt *opsworksLayerType) SetLifecycleEventConfiguration(d *schema.ResourceData, v *opsworks.LifecycleEventConfiguration) {
if v == nil || v.Shutdown == nil {
d.Set("drain_elb_on_shutdown", nil)
d.Set("instance_shutdown_timeout", nil)
} else {
d.Set("drain_elb_on_shutdown", v.Shutdown.DelayUntilElbConnectionsDrained)
d.Set("instance_shutdown_timeout", v.Shutdown.ExecutionTimeout)
}
}
func (lt *opsworksLayerType) CustomRecipes(d *schema.ResourceData) *opsworks.Recipes {
return &opsworks.Recipes{
Configure: expandStringList(d.Get("custom_configure_recipes").([]interface{})),
Deploy: expandStringList(d.Get("custom_deploy_recipes").([]interface{})),
Setup: expandStringList(d.Get("custom_setup_recipes").([]interface{})),
Shutdown: expandStringList(d.Get("custom_shutdown_recipes").([]interface{})),
Undeploy: expandStringList(d.Get("custom_undeploy_recipes").([]interface{})),
}
}
func (lt *opsworksLayerType) SetCustomRecipes(d *schema.ResourceData, v *opsworks.Recipes) {
// Null out everything first, and then we'll consider what to put back.
d.Set("custom_configure_recipes", nil)
d.Set("custom_deploy_recipes", nil)
d.Set("custom_setup_recipes", nil)
d.Set("custom_shutdown_recipes", nil)
d.Set("custom_undeploy_recipes", nil)
if v == nil {
return
}
d.Set("custom_configure_recipes", flattenStringList(v.Configure))
d.Set("custom_deploy_recipes", flattenStringList(v.Deploy))
d.Set("custom_setup_recipes", flattenStringList(v.Setup))
d.Set("custom_shutdown_recipes", flattenStringList(v.Shutdown))
d.Set("custom_undeploy_recipes", flattenStringList(v.Undeploy))
}
func (lt *opsworksLayerType) VolumeConfigurations(d *schema.ResourceData) []*opsworks.VolumeConfiguration {
configuredVolumes := d.Get("ebs_volume").(*schema.Set).List()
result := make([]*opsworks.VolumeConfiguration, len(configuredVolumes))
for i := 0; i < len(configuredVolumes); i++ {
volumeData := configuredVolumes[i].(map[string]interface{})
result[i] = &opsworks.VolumeConfiguration{
MountPoint: aws.String(volumeData["mount_point"].(string)),
NumberOfDisks: aws.Int64(int64(volumeData["number_of_disks"].(int))),
Size: aws.Int64(int64(volumeData["size"].(int))),
VolumeType: aws.String(volumeData["type"].(string)),
}
iops := int64(volumeData["iops"].(int))
if iops != 0 {
result[i].Iops = aws.Int64(iops)
}
raidLevelStr := volumeData["raid_level"].(string)
if raidLevelStr != "" {
raidLevel, err := strconv.Atoi(raidLevelStr)
if err == nil {
result[i].RaidLevel = aws.Int64(int64(raidLevel))
}
}
}
return result
}
func (lt *opsworksLayerType) SetVolumeConfigurations(d *schema.ResourceData, v []*opsworks.VolumeConfiguration) {
newValue := make([]*map[string]interface{}, len(v))
for i := 0; i < len(v); i++ {
config := v[i]
data := make(map[string]interface{})
newValue[i] = &data
if config.Iops != nil {
data["iops"] = int(*config.Iops)
} else {
data["iops"] = 0
}
if config.MountPoint != nil {
data["mount_point"] = *config.MountPoint
}
if config.NumberOfDisks != nil {
data["number_of_disks"] = int(*config.NumberOfDisks)
}
if config.RaidLevel != nil {
data["raid_level"] = strconv.Itoa(int(*config.RaidLevel))
}
if config.Size != nil {
data["size"] = int(*config.Size)
}
if config.VolumeType != nil {
data["type"] = *config.VolumeType
}
}
d.Set("ebs_volume", newValue)
}

View File

@ -0,0 +1,815 @@
package aws
import (
"bytes"
"fmt"
"log"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/mutexkv"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
)
// Provider returns a terraform.ResourceProvider.
func Provider() terraform.ResourceProvider {
// TODO: Move the validation to this, requires conditional schemas
// TODO: Move the configuration to this, requires validation
// The actual provider
return &schema.Provider{
Schema: map[string]*schema.Schema{
"access_key": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["access_key"],
},
"secret_key": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["secret_key"],
},
"profile": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["profile"],
},
"assume_role": assumeRoleSchema(),
"shared_credentials_file": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["shared_credentials_file"],
},
"token": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["token"],
},
"region": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
"AWS_REGION",
"AWS_DEFAULT_REGION",
}, nil),
Description: descriptions["region"],
InputDefault: "us-east-1",
},
"max_retries": {
Type: schema.TypeInt,
Optional: true,
Default: 25,
Description: descriptions["max_retries"],
},
"allowed_account_ids": {
Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString},
Optional: true,
ConflictsWith: []string{"forbidden_account_ids"},
Set: schema.HashString,
},
"forbidden_account_ids": {
Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString},
Optional: true,
ConflictsWith: []string{"allowed_account_ids"},
Set: schema.HashString,
},
"dynamodb_endpoint": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["dynamodb_endpoint"],
Removed: "Use `dynamodb` inside `endpoints` block instead",
},
"kinesis_endpoint": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["kinesis_endpoint"],
Removed: "Use `kinesis` inside `endpoints` block instead",
},
"endpoints": endpointsSchema(),
"insecure": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: descriptions["insecure"],
},
"skip_credentials_validation": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: descriptions["skip_credentials_validation"],
},
"skip_get_ec2_platforms": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: descriptions["skip_get_ec2_platforms"],
},
"skip_region_validation": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: descriptions["skip_region_validation"],
},
"skip_requesting_account_id": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: descriptions["skip_requesting_account_id"],
},
"skip_metadata_api_check": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: descriptions["skip_metadata_api_check"],
},
"s3_force_path_style": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: descriptions["s3_force_path_style"],
},
},
DataSourcesMap: map[string]*schema.Resource{
"aws_acm_certificate": dataSourceAwsAcmCertificate(),
"aws_alb": dataSourceAwsAlb(),
"aws_alb_listener": dataSourceAwsAlbListener(),
"aws_ami": dataSourceAwsAmi(),
"aws_ami_ids": dataSourceAwsAmiIds(),
"aws_autoscaling_groups": dataSourceAwsAutoscalingGroups(),
"aws_availability_zone": dataSourceAwsAvailabilityZone(),
"aws_availability_zones": dataSourceAwsAvailabilityZones(),
"aws_billing_service_account": dataSourceAwsBillingServiceAccount(),
"aws_caller_identity": dataSourceAwsCallerIdentity(),
"aws_canonical_user_id": dataSourceAwsCanonicalUserId(),
"aws_cloudformation_stack": dataSourceAwsCloudFormationStack(),
"aws_db_instance": dataSourceAwsDbInstance(),
"aws_db_snapshot": dataSourceAwsDbSnapshot(),
"aws_ebs_snapshot": dataSourceAwsEbsSnapshot(),
"aws_ebs_snapshot_ids": dataSourceAwsEbsSnapshotIds(),
"aws_ebs_volume": dataSourceAwsEbsVolume(),
"aws_ecs_cluster": dataSourceAwsEcsCluster(),
"aws_ecs_container_definition": dataSourceAwsEcsContainerDefinition(),
"aws_ecs_task_definition": dataSourceAwsEcsTaskDefinition(),
"aws_efs_file_system": dataSourceAwsEfsFileSystem(),
"aws_eip": dataSourceAwsEip(),
"aws_elastic_beanstalk_solution_stack": dataSourceAwsElasticBeanstalkSolutionStack(),
"aws_elasticache_cluster": dataSourceAwsElastiCacheCluster(),
"aws_elb_hosted_zone_id": dataSourceAwsElbHostedZoneId(),
"aws_elb_service_account": dataSourceAwsElbServiceAccount(),
"aws_iam_account_alias": dataSourceAwsIamAccountAlias(),
"aws_iam_policy_document": dataSourceAwsIamPolicyDocument(),
"aws_iam_role": dataSourceAwsIAMRole(),
"aws_iam_server_certificate": dataSourceAwsIAMServerCertificate(),
"aws_instance": dataSourceAwsInstance(),
"aws_ip_ranges": dataSourceAwsIPRanges(),
"aws_kinesis_stream": dataSourceAwsKinesisStream(),
"aws_kms_alias": dataSourceAwsKmsAlias(),
"aws_kms_ciphertext": dataSourceAwsKmsCiphetext(),
"aws_kms_secret": dataSourceAwsKmsSecret(),
"aws_partition": dataSourceAwsPartition(),
"aws_prefix_list": dataSourceAwsPrefixList(),
"aws_redshift_service_account": dataSourceAwsRedshiftServiceAccount(),
"aws_region": dataSourceAwsRegion(),
"aws_route_table": dataSourceAwsRouteTable(),
"aws_route53_zone": dataSourceAwsRoute53Zone(),
"aws_s3_bucket_object": dataSourceAwsS3BucketObject(),
"aws_sns_topic": dataSourceAwsSnsTopic(),
"aws_ssm_parameter": dataSourceAwsSsmParameter(),
"aws_subnet": dataSourceAwsSubnet(),
"aws_subnet_ids": dataSourceAwsSubnetIDs(),
"aws_security_group": dataSourceAwsSecurityGroup(),
"aws_vpc": dataSourceAwsVpc(),
"aws_vpc_endpoint": dataSourceAwsVpcEndpoint(),
"aws_vpc_endpoint_service": dataSourceAwsVpcEndpointService(),
"aws_vpc_peering_connection": dataSourceAwsVpcPeeringConnection(),
"aws_vpn_gateway": dataSourceAwsVpnGateway(),
},
ResourcesMap: map[string]*schema.Resource{
"aws_alb": resourceAwsAlb(),
"aws_alb_listener": resourceAwsAlbListener(),
"aws_alb_listener_rule": resourceAwsAlbListenerRule(),
"aws_alb_target_group": resourceAwsAlbTargetGroup(),
"aws_alb_target_group_attachment": resourceAwsAlbTargetGroupAttachment(),
"aws_ami": resourceAwsAmi(),
"aws_ami_copy": resourceAwsAmiCopy(),
"aws_ami_from_instance": resourceAwsAmiFromInstance(),
"aws_ami_launch_permission": resourceAwsAmiLaunchPermission(),
"aws_api_gateway_account": resourceAwsApiGatewayAccount(),
"aws_api_gateway_api_key": resourceAwsApiGatewayApiKey(),
"aws_api_gateway_authorizer": resourceAwsApiGatewayAuthorizer(),
"aws_api_gateway_base_path_mapping": resourceAwsApiGatewayBasePathMapping(),
"aws_api_gateway_client_certificate": resourceAwsApiGatewayClientCertificate(),
"aws_api_gateway_deployment": resourceAwsApiGatewayDeployment(),
"aws_api_gateway_domain_name": resourceAwsApiGatewayDomainName(),
"aws_api_gateway_integration": resourceAwsApiGatewayIntegration(),
"aws_api_gateway_integration_response": resourceAwsApiGatewayIntegrationResponse(),
"aws_api_gateway_method": resourceAwsApiGatewayMethod(),
"aws_api_gateway_method_response": resourceAwsApiGatewayMethodResponse(),
"aws_api_gateway_method_settings": resourceAwsApiGatewayMethodSettings(),
"aws_api_gateway_model": resourceAwsApiGatewayModel(),
"aws_api_gateway_resource": resourceAwsApiGatewayResource(),
"aws_api_gateway_rest_api": resourceAwsApiGatewayRestApi(),
"aws_api_gateway_stage": resourceAwsApiGatewayStage(),
"aws_api_gateway_usage_plan": resourceAwsApiGatewayUsagePlan(),
"aws_api_gateway_usage_plan_key": resourceAwsApiGatewayUsagePlanKey(),
"aws_app_cookie_stickiness_policy": resourceAwsAppCookieStickinessPolicy(),
"aws_appautoscaling_target": resourceAwsAppautoscalingTarget(),
"aws_appautoscaling_policy": resourceAwsAppautoscalingPolicy(),
"aws_autoscaling_attachment": resourceAwsAutoscalingAttachment(),
"aws_autoscaling_group": resourceAwsAutoscalingGroup(),
"aws_autoscaling_notification": resourceAwsAutoscalingNotification(),
"aws_autoscaling_policy": resourceAwsAutoscalingPolicy(),
"aws_autoscaling_schedule": resourceAwsAutoscalingSchedule(),
"aws_cloudformation_stack": resourceAwsCloudFormationStack(),
"aws_cloudfront_distribution": resourceAwsCloudFrontDistribution(),
"aws_cloudfront_origin_access_identity": resourceAwsCloudFrontOriginAccessIdentity(),
"aws_cloudtrail": resourceAwsCloudTrail(),
"aws_cloudwatch_event_rule": resourceAwsCloudWatchEventRule(),
"aws_cloudwatch_event_target": resourceAwsCloudWatchEventTarget(),
"aws_cloudwatch_log_destination": resourceAwsCloudWatchLogDestination(),
"aws_cloudwatch_log_destination_policy": resourceAwsCloudWatchLogDestinationPolicy(),
"aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(),
"aws_cloudwatch_log_metric_filter": resourceAwsCloudWatchLogMetricFilter(),
"aws_cloudwatch_log_stream": resourceAwsCloudWatchLogStream(),
"aws_cloudwatch_log_subscription_filter": resourceAwsCloudwatchLogSubscriptionFilter(),
"aws_config_config_rule": resourceAwsConfigConfigRule(),
"aws_config_configuration_recorder": resourceAwsConfigConfigurationRecorder(),
"aws_config_configuration_recorder_status": resourceAwsConfigConfigurationRecorderStatus(),
"aws_config_delivery_channel": resourceAwsConfigDeliveryChannel(),
"aws_cognito_identity_pool": resourceAwsCognitoIdentityPool(),
"aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(),
"aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(),
"aws_codedeploy_app": resourceAwsCodeDeployApp(),
"aws_codedeploy_deployment_config": resourceAwsCodeDeployDeploymentConfig(),
"aws_codedeploy_deployment_group": resourceAwsCodeDeployDeploymentGroup(),
"aws_codecommit_repository": resourceAwsCodeCommitRepository(),
"aws_codecommit_trigger": resourceAwsCodeCommitTrigger(),
"aws_codebuild_project": resourceAwsCodeBuildProject(),
"aws_codepipeline": resourceAwsCodePipeline(),
"aws_customer_gateway": resourceAwsCustomerGateway(),
"aws_db_event_subscription": resourceAwsDbEventSubscription(),
"aws_db_instance": resourceAwsDbInstance(),
"aws_db_option_group": resourceAwsDbOptionGroup(),
"aws_db_parameter_group": resourceAwsDbParameterGroup(),
"aws_db_security_group": resourceAwsDbSecurityGroup(),
"aws_db_snapshot": resourceAwsDbSnapshot(),
"aws_db_subnet_group": resourceAwsDbSubnetGroup(),
"aws_devicefarm_project": resourceAwsDevicefarmProject(),
"aws_directory_service_directory": resourceAwsDirectoryServiceDirectory(),
"aws_dms_certificate": resourceAwsDmsCertificate(),
"aws_dms_endpoint": resourceAwsDmsEndpoint(),
"aws_dms_replication_instance": resourceAwsDmsReplicationInstance(),
"aws_dms_replication_subnet_group": resourceAwsDmsReplicationSubnetGroup(),
"aws_dms_replication_task": resourceAwsDmsReplicationTask(),
"aws_dynamodb_table": resourceAwsDynamoDbTable(),
"aws_ebs_snapshot": resourceAwsEbsSnapshot(),
"aws_ebs_volume": resourceAwsEbsVolume(),
"aws_ecr_repository": resourceAwsEcrRepository(),
"aws_ecr_repository_policy": resourceAwsEcrRepositoryPolicy(),
"aws_ecs_cluster": resourceAwsEcsCluster(),
"aws_ecs_service": resourceAwsEcsService(),
"aws_ecs_task_definition": resourceAwsEcsTaskDefinition(),
"aws_efs_file_system": resourceAwsEfsFileSystem(),
"aws_efs_mount_target": resourceAwsEfsMountTarget(),
"aws_egress_only_internet_gateway": resourceAwsEgressOnlyInternetGateway(),
"aws_eip": resourceAwsEip(),
"aws_eip_association": resourceAwsEipAssociation(),
"aws_elasticache_cluster": resourceAwsElasticacheCluster(),
"aws_elasticache_parameter_group": resourceAwsElasticacheParameterGroup(),
"aws_elasticache_replication_group": resourceAwsElasticacheReplicationGroup(),
"aws_elasticache_security_group": resourceAwsElasticacheSecurityGroup(),
"aws_elasticache_subnet_group": resourceAwsElasticacheSubnetGroup(),
"aws_elastic_beanstalk_application": resourceAwsElasticBeanstalkApplication(),
"aws_elastic_beanstalk_application_version": resourceAwsElasticBeanstalkApplicationVersion(),
"aws_elastic_beanstalk_configuration_template": resourceAwsElasticBeanstalkConfigurationTemplate(),
"aws_elastic_beanstalk_environment": resourceAwsElasticBeanstalkEnvironment(),
"aws_elasticsearch_domain": resourceAwsElasticSearchDomain(),
"aws_elasticsearch_domain_policy": resourceAwsElasticSearchDomainPolicy(),
"aws_elastictranscoder_pipeline": resourceAwsElasticTranscoderPipeline(),
"aws_elastictranscoder_preset": resourceAwsElasticTranscoderPreset(),
"aws_elb": resourceAwsElb(),
"aws_elb_attachment": resourceAwsElbAttachment(),
"aws_emr_cluster": resourceAwsEMRCluster(),
"aws_emr_instance_group": resourceAwsEMRInstanceGroup(),
"aws_emr_security_configuration": resourceAwsEMRSecurityConfiguration(),
"aws_flow_log": resourceAwsFlowLog(),
"aws_glacier_vault": resourceAwsGlacierVault(),
"aws_iam_access_key": resourceAwsIamAccessKey(),
"aws_iam_account_alias": resourceAwsIamAccountAlias(),
"aws_iam_account_password_policy": resourceAwsIamAccountPasswordPolicy(),
"aws_iam_group_policy": resourceAwsIamGroupPolicy(),
"aws_iam_group": resourceAwsIamGroup(),
"aws_iam_group_membership": resourceAwsIamGroupMembership(),
"aws_iam_group_policy_attachment": resourceAwsIamGroupPolicyAttachment(),
"aws_iam_instance_profile": resourceAwsIamInstanceProfile(),
"aws_iam_openid_connect_provider": resourceAwsIamOpenIDConnectProvider(),
"aws_iam_policy": resourceAwsIamPolicy(),
"aws_iam_policy_attachment": resourceAwsIamPolicyAttachment(),
"aws_iam_role_policy_attachment": resourceAwsIamRolePolicyAttachment(),
"aws_iam_role_policy": resourceAwsIamRolePolicy(),
"aws_iam_role": resourceAwsIamRole(),
"aws_iam_saml_provider": resourceAwsIamSamlProvider(),
"aws_iam_server_certificate": resourceAwsIAMServerCertificate(),
"aws_iam_user_policy_attachment": resourceAwsIamUserPolicyAttachment(),
"aws_iam_user_policy": resourceAwsIamUserPolicy(),
"aws_iam_user_ssh_key": resourceAwsIamUserSshKey(),
"aws_iam_user": resourceAwsIamUser(),
"aws_iam_user_login_profile": resourceAwsIamUserLoginProfile(),
"aws_inspector_assessment_target": resourceAWSInspectorAssessmentTarget(),
"aws_inspector_assessment_template": resourceAWSInspectorAssessmentTemplate(),
"aws_inspector_resource_group": resourceAWSInspectorResourceGroup(),
"aws_instance": resourceAwsInstance(),
"aws_internet_gateway": resourceAwsInternetGateway(),
"aws_key_pair": resourceAwsKeyPair(),
"aws_kinesis_firehose_delivery_stream": resourceAwsKinesisFirehoseDeliveryStream(),
"aws_kinesis_stream": resourceAwsKinesisStream(),
"aws_kms_alias": resourceAwsKmsAlias(),
"aws_kms_key": resourceAwsKmsKey(),
"aws_lambda_function": resourceAwsLambdaFunction(),
"aws_lambda_event_source_mapping": resourceAwsLambdaEventSourceMapping(),
"aws_lambda_alias": resourceAwsLambdaAlias(),
"aws_lambda_permission": resourceAwsLambdaPermission(),
"aws_launch_configuration": resourceAwsLaunchConfiguration(),
"aws_lightsail_domain": resourceAwsLightsailDomain(),
"aws_lightsail_instance": resourceAwsLightsailInstance(),
"aws_lightsail_key_pair": resourceAwsLightsailKeyPair(),
"aws_lightsail_static_ip": resourceAwsLightsailStaticIp(),
"aws_lightsail_static_ip_attachment": resourceAwsLightsailStaticIpAttachment(),
"aws_lb_cookie_stickiness_policy": resourceAwsLBCookieStickinessPolicy(),
"aws_load_balancer_policy": resourceAwsLoadBalancerPolicy(),
"aws_load_balancer_backend_server_policy": resourceAwsLoadBalancerBackendServerPolicies(),
"aws_load_balancer_listener_policy": resourceAwsLoadBalancerListenerPolicies(),
"aws_lb_ssl_negotiation_policy": resourceAwsLBSSLNegotiationPolicy(),
"aws_main_route_table_association": resourceAwsMainRouteTableAssociation(),
"aws_nat_gateway": resourceAwsNatGateway(),
"aws_network_acl": resourceAwsNetworkAcl(),
"aws_default_network_acl": resourceAwsDefaultNetworkAcl(),
"aws_network_acl_rule": resourceAwsNetworkAclRule(),
"aws_network_interface": resourceAwsNetworkInterface(),
"aws_network_interface_attachment": resourceAwsNetworkInterfaceAttachment(),
"aws_opsworks_application": resourceAwsOpsworksApplication(),
"aws_opsworks_stack": resourceAwsOpsworksStack(),
"aws_opsworks_java_app_layer": resourceAwsOpsworksJavaAppLayer(),
"aws_opsworks_haproxy_layer": resourceAwsOpsworksHaproxyLayer(),
"aws_opsworks_static_web_layer": resourceAwsOpsworksStaticWebLayer(),
"aws_opsworks_php_app_layer": resourceAwsOpsworksPhpAppLayer(),
"aws_opsworks_rails_app_layer": resourceAwsOpsworksRailsAppLayer(),
"aws_opsworks_nodejs_app_layer": resourceAwsOpsworksNodejsAppLayer(),
"aws_opsworks_memcached_layer": resourceAwsOpsworksMemcachedLayer(),
"aws_opsworks_mysql_layer": resourceAwsOpsworksMysqlLayer(),
"aws_opsworks_ganglia_layer": resourceAwsOpsworksGangliaLayer(),
"aws_opsworks_custom_layer": resourceAwsOpsworksCustomLayer(),
"aws_opsworks_instance": resourceAwsOpsworksInstance(),
"aws_opsworks_user_profile": resourceAwsOpsworksUserProfile(),
"aws_opsworks_permission": resourceAwsOpsworksPermission(),
"aws_opsworks_rds_db_instance": resourceAwsOpsworksRdsDbInstance(),
"aws_placement_group": resourceAwsPlacementGroup(),
"aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(),
"aws_rds_cluster": resourceAwsRDSCluster(),
"aws_rds_cluster_instance": resourceAwsRDSClusterInstance(),
"aws_rds_cluster_parameter_group": resourceAwsRDSClusterParameterGroup(),
"aws_redshift_cluster": resourceAwsRedshiftCluster(),
"aws_redshift_security_group": resourceAwsRedshiftSecurityGroup(),
"aws_redshift_parameter_group": resourceAwsRedshiftParameterGroup(),
"aws_redshift_subnet_group": resourceAwsRedshiftSubnetGroup(),
"aws_route53_delegation_set": resourceAwsRoute53DelegationSet(),
"aws_route53_record": resourceAwsRoute53Record(),
"aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(),
"aws_route53_zone": resourceAwsRoute53Zone(),
"aws_route53_health_check": resourceAwsRoute53HealthCheck(),
"aws_route": resourceAwsRoute(),
"aws_route_table": resourceAwsRouteTable(),
"aws_default_route_table": resourceAwsDefaultRouteTable(),
"aws_route_table_association": resourceAwsRouteTableAssociation(),
"aws_ses_active_receipt_rule_set": resourceAwsSesActiveReceiptRuleSet(),
"aws_ses_domain_identity": resourceAwsSesDomainIdentity(),
"aws_ses_receipt_filter": resourceAwsSesReceiptFilter(),
"aws_ses_receipt_rule": resourceAwsSesReceiptRule(),
"aws_ses_receipt_rule_set": resourceAwsSesReceiptRuleSet(),
"aws_ses_configuration_set": resourceAwsSesConfigurationSet(),
"aws_ses_event_destination": resourceAwsSesEventDestination(),
"aws_s3_bucket": resourceAwsS3Bucket(),
"aws_s3_bucket_policy": resourceAwsS3BucketPolicy(),
"aws_s3_bucket_object": resourceAwsS3BucketObject(),
"aws_s3_bucket_notification": resourceAwsS3BucketNotification(),
"aws_security_group": resourceAwsSecurityGroup(),
"aws_default_security_group": resourceAwsDefaultSecurityGroup(),
"aws_security_group_rule": resourceAwsSecurityGroupRule(),
"aws_simpledb_domain": resourceAwsSimpleDBDomain(),
"aws_ssm_activation": resourceAwsSsmActivation(),
"aws_ssm_association": resourceAwsSsmAssociation(),
"aws_ssm_document": resourceAwsSsmDocument(),
"aws_ssm_maintenance_window": resourceAwsSsmMaintenanceWindow(),
"aws_ssm_maintenance_window_target": resourceAwsSsmMaintenanceWindowTarget(),
"aws_ssm_maintenance_window_task": resourceAwsSsmMaintenanceWindowTask(),
"aws_ssm_patch_baseline": resourceAwsSsmPatchBaseline(),
"aws_ssm_patch_group": resourceAwsSsmPatchGroup(),
"aws_ssm_parameter": resourceAwsSsmParameter(),
"aws_spot_datafeed_subscription": resourceAwsSpotDataFeedSubscription(),
"aws_spot_instance_request": resourceAwsSpotInstanceRequest(),
"aws_spot_fleet_request": resourceAwsSpotFleetRequest(),
"aws_sqs_queue": resourceAwsSqsQueue(),
"aws_sqs_queue_policy": resourceAwsSqsQueuePolicy(),
"aws_snapshot_create_volume_permission": resourceAwsSnapshotCreateVolumePermission(),
"aws_sns_topic": resourceAwsSnsTopic(),
"aws_sns_topic_policy": resourceAwsSnsTopicPolicy(),
"aws_sns_topic_subscription": resourceAwsSnsTopicSubscription(),
"aws_sfn_activity": resourceAwsSfnActivity(),
"aws_sfn_state_machine": resourceAwsSfnStateMachine(),
"aws_default_subnet": resourceAwsDefaultSubnet(),
"aws_subnet": resourceAwsSubnet(),
"aws_volume_attachment": resourceAwsVolumeAttachment(),
"aws_vpc_dhcp_options_association": resourceAwsVpcDhcpOptionsAssociation(),
"aws_default_vpc_dhcp_options": resourceAwsDefaultVpcDhcpOptions(),
"aws_vpc_dhcp_options": resourceAwsVpcDhcpOptions(),
"aws_vpc_peering_connection": resourceAwsVpcPeeringConnection(),
"aws_vpc_peering_connection_accepter": resourceAwsVpcPeeringConnectionAccepter(),
"aws_default_vpc": resourceAwsDefaultVpc(),
"aws_vpc": resourceAwsVpc(),
"aws_vpc_endpoint": resourceAwsVpcEndpoint(),
"aws_vpc_endpoint_route_table_association": resourceAwsVpcEndpointRouteTableAssociation(),
"aws_vpn_connection": resourceAwsVpnConnection(),
"aws_vpn_connection_route": resourceAwsVpnConnectionRoute(),
"aws_vpn_gateway": resourceAwsVpnGateway(),
"aws_vpn_gateway_attachment": resourceAwsVpnGatewayAttachment(),
"aws_vpn_gateway_route_propagation": resourceAwsVpnGatewayRoutePropagation(),
"aws_waf_byte_match_set": resourceAwsWafByteMatchSet(),
"aws_waf_ipset": resourceAwsWafIPSet(),
"aws_waf_rule": resourceAwsWafRule(),
"aws_waf_size_constraint_set": resourceAwsWafSizeConstraintSet(),
"aws_waf_web_acl": resourceAwsWafWebAcl(),
"aws_waf_xss_match_set": resourceAwsWafXssMatchSet(),
"aws_waf_sql_injection_match_set": resourceAwsWafSqlInjectionMatchSet(),
"aws_wafregional_byte_match_set": resourceAwsWafRegionalByteMatchSet(),
"aws_wafregional_ipset": resourceAwsWafRegionalIPSet(),
},
ConfigureFunc: providerConfigure,
}
}
var descriptions map[string]string
func init() {
descriptions = map[string]string{
"region": "The region where AWS operations will take place. Examples\n" +
"are us-east-1, us-west-2, etc.",
"access_key": "The access key for API operations. You can retrieve this\n" +
"from the 'Security & Credentials' section of the AWS console.",
"secret_key": "The secret key for API operations. You can retrieve this\n" +
"from the 'Security & Credentials' section of the AWS console.",
"profile": "The profile for API operations. If not set, the default profile\n" +
"created with `aws configure` will be used.",
"shared_credentials_file": "The path to the shared credentials file. If not set\n" +
"this defaults to ~/.aws/credentials.",
"token": "session token. A session token is only required if you are\n" +
"using temporary security credentials.",
"max_retries": "The maximum number of times an AWS API request is\n" +
"being executed. If the API request still fails, an error is\n" +
"thrown.",
"cloudformation_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n",
"cloudwatch_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n",
"cloudwatchevents_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n",
"cloudwatchlogs_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n",
"devicefarm_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n",
"dynamodb_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n" +
"It's typically used to connect to dynamodb-local.",
"kinesis_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n" +
"It's typically used to connect to kinesalite.",
"kms_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n",
"iam_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n",
"ec2_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n",
"elb_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n",
"rds_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n",
"s3_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n",
"sns_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n",
"sqs_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n",
"insecure": "Explicitly allow the provider to perform \"insecure\" SSL requests. If omitted," +
"default value is `false`",
"skip_credentials_validation": "Skip the credentials validation via STS API. " +
"Used for AWS API implementations that do not have STS available/implemented.",
"skip_get_ec2_platforms": "Skip getting the supported EC2 platforms. " +
"Used by users that don't have ec2:DescribeAccountAttributes permissions.",
"skip_region_validation": "Skip static validation of region name. " +
"Used by users of alternative AWS-like APIs or users w/ access to regions that are not public (yet).",
"skip_requesting_account_id": "Skip requesting the account ID. " +
"Used for AWS API implementations that do not have IAM/STS API and/or metadata API.",
"skip_medatadata_api_check": "Skip the AWS Metadata API check. " +
"Used for AWS API implementations that do not have a metadata api endpoint.",
"s3_force_path_style": "Set this to true to force the request to use path-style addressing,\n" +
"i.e., http://s3.amazonaws.com/BUCKET/KEY. By default, the S3 client will\n" +
"use virtual hosted bucket addressing when possible\n" +
"(http://BUCKET.s3.amazonaws.com/KEY). Specific to the Amazon S3 service.",
"assume_role_role_arn": "The ARN of an IAM role to assume prior to making API calls.",
"assume_role_session_name": "The session name to use when assuming the role. If omitted," +
" no session name is passed to the AssumeRole call.",
"assume_role_external_id": "The external ID to use when assuming the role. If omitted," +
" no external ID is passed to the AssumeRole call.",
"assume_role_policy": "The permissions applied when assuming a role. You cannot use," +
" this policy to grant further permissions that are in excess to those of the, " +
" role that is being assumed.",
}
}
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
config := Config{
AccessKey: d.Get("access_key").(string),
SecretKey: d.Get("secret_key").(string),
Profile: d.Get("profile").(string),
CredsFilename: d.Get("shared_credentials_file").(string),
Token: d.Get("token").(string),
Region: d.Get("region").(string),
MaxRetries: d.Get("max_retries").(int),
Insecure: d.Get("insecure").(bool),
SkipCredsValidation: d.Get("skip_credentials_validation").(bool),
SkipGetEC2Platforms: d.Get("skip_get_ec2_platforms").(bool),
SkipRegionValidation: d.Get("skip_region_validation").(bool),
SkipRequestingAccountId: d.Get("skip_requesting_account_id").(bool),
SkipMetadataApiCheck: d.Get("skip_metadata_api_check").(bool),
S3ForcePathStyle: d.Get("s3_force_path_style").(bool),
}
assumeRoleList := d.Get("assume_role").(*schema.Set).List()
if len(assumeRoleList) == 1 {
assumeRole := assumeRoleList[0].(map[string]interface{})
config.AssumeRoleARN = assumeRole["role_arn"].(string)
config.AssumeRoleSessionName = assumeRole["session_name"].(string)
config.AssumeRoleExternalID = assumeRole["external_id"].(string)
if v := assumeRole["policy"].(string); v != "" {
config.AssumeRolePolicy = v
}
log.Printf("[INFO] assume_role configuration set: (ARN: %q, SessionID: %q, ExternalID: %q, Policy: %q)",
config.AssumeRoleARN, config.AssumeRoleSessionName, config.AssumeRoleExternalID, config.AssumeRolePolicy)
} else {
log.Printf("[INFO] No assume_role block read from configuration")
}
endpointsSet := d.Get("endpoints").(*schema.Set)
for _, endpointsSetI := range endpointsSet.List() {
endpoints := endpointsSetI.(map[string]interface{})
config.CloudFormationEndpoint = endpoints["cloudformation"].(string)
config.CloudWatchEndpoint = endpoints["cloudwatch"].(string)
config.CloudWatchEventsEndpoint = endpoints["cloudwatchevents"].(string)
config.CloudWatchLogsEndpoint = endpoints["cloudwatchlogs"].(string)
config.DeviceFarmEndpoint = endpoints["devicefarm"].(string)
config.DynamoDBEndpoint = endpoints["dynamodb"].(string)
config.Ec2Endpoint = endpoints["ec2"].(string)
config.ElbEndpoint = endpoints["elb"].(string)
config.IamEndpoint = endpoints["iam"].(string)
config.KinesisEndpoint = endpoints["kinesis"].(string)
config.KmsEndpoint = endpoints["kms"].(string)
config.RdsEndpoint = endpoints["rds"].(string)
config.S3Endpoint = endpoints["s3"].(string)
config.SnsEndpoint = endpoints["sns"].(string)
config.SqsEndpoint = endpoints["sqs"].(string)
}
if v, ok := d.GetOk("allowed_account_ids"); ok {
config.AllowedAccountIds = v.(*schema.Set).List()
}
if v, ok := d.GetOk("forbidden_account_ids"); ok {
config.ForbiddenAccountIds = v.(*schema.Set).List()
}
return config.Client()
}
// This is a global MutexKV for use within this plugin.
var awsMutexKV = mutexkv.NewMutexKV()
func assumeRoleSchema() *schema.Schema {
return &schema.Schema{
Type: schema.TypeSet,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"role_arn": {
Type: schema.TypeString,
Optional: true,
Description: descriptions["assume_role_role_arn"],
},
"session_name": {
Type: schema.TypeString,
Optional: true,
Description: descriptions["assume_role_session_name"],
},
"external_id": {
Type: schema.TypeString,
Optional: true,
Description: descriptions["assume_role_external_id"],
},
"policy": {
Type: schema.TypeString,
Optional: true,
Description: descriptions["assume_role_policy"],
},
},
},
Set: assumeRoleToHash,
}
}
func assumeRoleToHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-", m["role_arn"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["session_name"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["external_id"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["policy"].(string)))
return hashcode.String(buf.String())
}
func endpointsSchema() *schema.Schema {
return &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cloudwatch": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["cloudwatch_endpoint"],
},
"cloudwatchevents": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["cloudwatchevents_endpoint"],
},
"cloudwatchlogs": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["cloudwatchlogs_endpoint"],
},
"cloudformation": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["cloudformation_endpoint"],
},
"devicefarm": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["devicefarm_endpoint"],
},
"dynamodb": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["dynamodb_endpoint"],
},
"iam": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["iam_endpoint"],
},
"ec2": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["ec2_endpoint"],
},
"elb": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["elb_endpoint"],
},
"kinesis": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["kinesis_endpoint"],
},
"kms": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["kms_endpoint"],
},
"rds": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["rds_endpoint"],
},
"s3": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["s3_endpoint"],
},
"sns": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["sns_endpoint"],
},
"sqs": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["sqs_endpoint"],
},
},
},
Set: endpointsToHash,
}
}
func endpointsToHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-", m["cloudwatch"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["cloudwatchevents"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["cloudwatchlogs"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["cloudformation"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["devicefarm"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["dynamodb"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["iam"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["ec2"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["elb"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["kinesis"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["kms"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["rds"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["s3"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["sns"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["sqs"].(string)))
return hashcode.String(buf.String())
}

View File

@ -0,0 +1,497 @@
package aws
import (
"fmt"
"log"
"regexp"
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/elbv2"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsAlb() *schema.Resource {
return &schema.Resource{
Create: resourceAwsAlbCreate,
Read: resourceAwsAlbRead,
Update: resourceAwsAlbUpdate,
Delete: resourceAwsAlbDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(10 * time.Minute),
Update: schema.DefaultTimeout(10 * time.Minute),
Delete: schema.DefaultTimeout(10 * time.Minute),
},
Schema: map[string]*schema.Schema{
"arn": {
Type: schema.TypeString,
Computed: true,
},
"arn_suffix": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ConflictsWith: []string{"name_prefix"},
ValidateFunc: validateElbName,
},
"name_prefix": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validateElbNamePrefix,
},
"internal": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
Computed: true,
},
"security_groups": {
Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString},
Computed: true,
Optional: true,
Set: schema.HashString,
},
"subnets": {
Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString},
Required: true,
Set: schema.HashString,
},
"access_logs": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"bucket": {
Type: schema.TypeString,
Required: true,
},
"prefix": {
Type: schema.TypeString,
Optional: true,
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Default: true,
},
},
},
},
"enable_deletion_protection": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"idle_timeout": {
Type: schema.TypeInt,
Optional: true,
Default: 60,
},
"ip_address_type": {
Type: schema.TypeString,
Computed: true,
Optional: true,
},
"vpc_id": {
Type: schema.TypeString,
Computed: true,
},
"zone_id": {
Type: schema.TypeString,
Computed: true,
},
"dns_name": {
Type: schema.TypeString,
Computed: true,
},
"tags": tagsSchema(),
},
}
}
func resourceAwsAlbCreate(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
var name string
if v, ok := d.GetOk("name"); ok {
name = v.(string)
} else if v, ok := d.GetOk("name_prefix"); ok {
name = resource.PrefixedUniqueId(v.(string))
} else {
name = resource.PrefixedUniqueId("tf-lb-")
}
d.Set("name", name)
elbOpts := &elbv2.CreateLoadBalancerInput{
Name: aws.String(name),
Tags: tagsFromMapELBv2(d.Get("tags").(map[string]interface{})),
}
if scheme, ok := d.GetOk("internal"); ok && scheme.(bool) {
elbOpts.Scheme = aws.String("internal")
}
if v, ok := d.GetOk("security_groups"); ok {
elbOpts.SecurityGroups = expandStringList(v.(*schema.Set).List())
}
if v, ok := d.GetOk("subnets"); ok {
elbOpts.Subnets = expandStringList(v.(*schema.Set).List())
}
if v, ok := d.GetOk("ip_address_type"); ok {
elbOpts.IpAddressType = aws.String(v.(string))
}
log.Printf("[DEBUG] ALB create configuration: %#v", elbOpts)
resp, err := elbconn.CreateLoadBalancer(elbOpts)
if err != nil {
return errwrap.Wrapf("Error creating Application Load Balancer: {{err}}", err)
}
if len(resp.LoadBalancers) != 1 {
return fmt.Errorf("No load balancers returned following creation of %s", d.Get("name").(string))
}
lb := resp.LoadBalancers[0]
d.SetId(*lb.LoadBalancerArn)
log.Printf("[INFO] ALB ID: %s", d.Id())
stateConf := &resource.StateChangeConf{
Pending: []string{"provisioning", "failed"},
Target: []string{"active"},
Refresh: func() (interface{}, string, error) {
describeResp, err := elbconn.DescribeLoadBalancers(&elbv2.DescribeLoadBalancersInput{
LoadBalancerArns: []*string{lb.LoadBalancerArn},
})
if err != nil {
return nil, "", err
}
if len(describeResp.LoadBalancers) != 1 {
return nil, "", fmt.Errorf("No load balancers returned for %s", *lb.LoadBalancerArn)
}
dLb := describeResp.LoadBalancers[0]
log.Printf("[INFO] ALB state: %s", *dLb.State.Code)
return describeResp, *dLb.State.Code, nil
},
Timeout: d.Timeout(schema.TimeoutCreate),
MinTimeout: 10 * time.Second,
Delay: 30 * time.Second, // Wait 30 secs before starting
}
_, err = stateConf.WaitForState()
if err != nil {
return err
}
return resourceAwsAlbUpdate(d, meta)
}
func resourceAwsAlbRead(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
albArn := d.Id()
describeAlbOpts := &elbv2.DescribeLoadBalancersInput{
LoadBalancerArns: []*string{aws.String(albArn)},
}
describeResp, err := elbconn.DescribeLoadBalancers(describeAlbOpts)
if err != nil {
if isLoadBalancerNotFound(err) {
// The ALB is gone now, so just remove it from the state
log.Printf("[WARN] ALB %s not found in AWS, removing from state", d.Id())
d.SetId("")
return nil
}
return errwrap.Wrapf("Error retrieving ALB: {{err}}", err)
}
if len(describeResp.LoadBalancers) != 1 {
return fmt.Errorf("Unable to find ALB: %#v", describeResp.LoadBalancers)
}
return flattenAwsAlbResource(d, meta, describeResp.LoadBalancers[0])
}
func resourceAwsAlbUpdate(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
if !d.IsNewResource() {
if err := setElbV2Tags(elbconn, d); err != nil {
return errwrap.Wrapf("Error Modifying Tags on ALB: {{err}}", err)
}
}
attributes := make([]*elbv2.LoadBalancerAttribute, 0)
if d.HasChange("access_logs") {
logs := d.Get("access_logs").([]interface{})
if len(logs) == 1 {
log := logs[0].(map[string]interface{})
attributes = append(attributes,
&elbv2.LoadBalancerAttribute{
Key: aws.String("access_logs.s3.enabled"),
Value: aws.String(strconv.FormatBool(log["enabled"].(bool))),
},
&elbv2.LoadBalancerAttribute{
Key: aws.String("access_logs.s3.bucket"),
Value: aws.String(log["bucket"].(string)),
})
if prefix, ok := log["prefix"]; ok {
attributes = append(attributes, &elbv2.LoadBalancerAttribute{
Key: aws.String("access_logs.s3.prefix"),
Value: aws.String(prefix.(string)),
})
}
} else if len(logs) == 0 {
attributes = append(attributes, &elbv2.LoadBalancerAttribute{
Key: aws.String("access_logs.s3.enabled"),
Value: aws.String("false"),
})
}
}
if d.HasChange("enable_deletion_protection") {
attributes = append(attributes, &elbv2.LoadBalancerAttribute{
Key: aws.String("deletion_protection.enabled"),
Value: aws.String(fmt.Sprintf("%t", d.Get("enable_deletion_protection").(bool))),
})
}
if d.HasChange("idle_timeout") {
attributes = append(attributes, &elbv2.LoadBalancerAttribute{
Key: aws.String("idle_timeout.timeout_seconds"),
Value: aws.String(fmt.Sprintf("%d", d.Get("idle_timeout").(int))),
})
}
if len(attributes) != 0 {
input := &elbv2.ModifyLoadBalancerAttributesInput{
LoadBalancerArn: aws.String(d.Id()),
Attributes: attributes,
}
log.Printf("[DEBUG] ALB Modify Load Balancer Attributes Request: %#v", input)
_, err := elbconn.ModifyLoadBalancerAttributes(input)
if err != nil {
return fmt.Errorf("Failure configuring ALB attributes: %s", err)
}
}
if d.HasChange("security_groups") {
sgs := expandStringList(d.Get("security_groups").(*schema.Set).List())
params := &elbv2.SetSecurityGroupsInput{
LoadBalancerArn: aws.String(d.Id()),
SecurityGroups: sgs,
}
_, err := elbconn.SetSecurityGroups(params)
if err != nil {
return fmt.Errorf("Failure Setting ALB Security Groups: %s", err)
}
}
if d.HasChange("subnets") {
subnets := expandStringList(d.Get("subnets").(*schema.Set).List())
params := &elbv2.SetSubnetsInput{
LoadBalancerArn: aws.String(d.Id()),
Subnets: subnets,
}
_, err := elbconn.SetSubnets(params)
if err != nil {
return fmt.Errorf("Failure Setting ALB Subnets: %s", err)
}
}
if d.HasChange("ip_address_type") {
params := &elbv2.SetIpAddressTypeInput{
LoadBalancerArn: aws.String(d.Id()),
IpAddressType: aws.String(d.Get("ip_address_type").(string)),
}
_, err := elbconn.SetIpAddressType(params)
if err != nil {
return fmt.Errorf("Failure Setting ALB IP Address Type: %s", err)
}
}
stateConf := &resource.StateChangeConf{
Pending: []string{"active", "provisioning", "failed"},
Target: []string{"active"},
Refresh: func() (interface{}, string, error) {
describeResp, err := elbconn.DescribeLoadBalancers(&elbv2.DescribeLoadBalancersInput{
LoadBalancerArns: []*string{aws.String(d.Id())},
})
if err != nil {
return nil, "", err
}
if len(describeResp.LoadBalancers) != 1 {
return nil, "", fmt.Errorf("No load balancers returned for %s", d.Id())
}
dLb := describeResp.LoadBalancers[0]
log.Printf("[INFO] ALB state: %s", *dLb.State.Code)
return describeResp, *dLb.State.Code, nil
},
Timeout: d.Timeout(schema.TimeoutUpdate),
MinTimeout: 10 * time.Second,
Delay: 30 * time.Second, // Wait 30 secs before starting
}
_, err := stateConf.WaitForState()
if err != nil {
return err
}
return resourceAwsAlbRead(d, meta)
}
func resourceAwsAlbDelete(d *schema.ResourceData, meta interface{}) error {
albconn := meta.(*AWSClient).elbv2conn
log.Printf("[INFO] Deleting ALB: %s", d.Id())
// Destroy the load balancer
deleteElbOpts := elbv2.DeleteLoadBalancerInput{
LoadBalancerArn: aws.String(d.Id()),
}
if _, err := albconn.DeleteLoadBalancer(&deleteElbOpts); err != nil {
return fmt.Errorf("Error deleting ALB: %s", err)
}
return nil
}
// flattenSubnetsFromAvailabilityZones creates a slice of strings containing the subnet IDs
// for the ALB based on the AvailabilityZones structure returned by the API.
func flattenSubnetsFromAvailabilityZones(availabilityZones []*elbv2.AvailabilityZone) []string {
var result []string
for _, az := range availabilityZones {
result = append(result, *az.SubnetId)
}
return result
}
func albSuffixFromARN(arn *string) string {
if arn == nil {
return ""
}
if arnComponents := regexp.MustCompile(`arn:.*:loadbalancer/(.*)`).FindAllStringSubmatch(*arn, -1); len(arnComponents) == 1 {
if len(arnComponents[0]) == 2 {
return arnComponents[0][1]
}
}
return ""
}
// flattenAwsAlbResource takes a *elbv2.LoadBalancer and populates all respective resource fields.
func flattenAwsAlbResource(d *schema.ResourceData, meta interface{}, alb *elbv2.LoadBalancer) error {
elbconn := meta.(*AWSClient).elbv2conn
d.Set("arn", alb.LoadBalancerArn)
d.Set("arn_suffix", albSuffixFromARN(alb.LoadBalancerArn))
d.Set("name", alb.LoadBalancerName)
d.Set("internal", (alb.Scheme != nil && *alb.Scheme == "internal"))
d.Set("security_groups", flattenStringList(alb.SecurityGroups))
d.Set("subnets", flattenSubnetsFromAvailabilityZones(alb.AvailabilityZones))
d.Set("vpc_id", alb.VpcId)
d.Set("zone_id", alb.CanonicalHostedZoneId)
d.Set("dns_name", alb.DNSName)
d.Set("ip_address_type", alb.IpAddressType)
respTags, err := elbconn.DescribeTags(&elbv2.DescribeTagsInput{
ResourceArns: []*string{alb.LoadBalancerArn},
})
if err != nil {
return errwrap.Wrapf("Error retrieving ALB Tags: {{err}}", err)
}
var et []*elbv2.Tag
if len(respTags.TagDescriptions) > 0 {
et = respTags.TagDescriptions[0].Tags
}
d.Set("tags", tagsToMapELBv2(et))
attributesResp, err := elbconn.DescribeLoadBalancerAttributes(&elbv2.DescribeLoadBalancerAttributesInput{
LoadBalancerArn: aws.String(d.Id()),
})
if err != nil {
return errwrap.Wrapf("Error retrieving ALB Attributes: {{err}}", err)
}
accessLogMap := map[string]interface{}{}
for _, attr := range attributesResp.Attributes {
switch *attr.Key {
case "access_logs.s3.enabled":
accessLogMap["enabled"] = *attr.Value
case "access_logs.s3.bucket":
accessLogMap["bucket"] = *attr.Value
case "access_logs.s3.prefix":
accessLogMap["prefix"] = *attr.Value
case "idle_timeout.timeout_seconds":
timeout, err := strconv.Atoi(*attr.Value)
if err != nil {
return errwrap.Wrapf("Error parsing ALB timeout: {{err}}", err)
}
log.Printf("[DEBUG] Setting ALB Timeout Seconds: %d", timeout)
d.Set("idle_timeout", timeout)
case "deletion_protection.enabled":
protectionEnabled := (*attr.Value) == "true"
log.Printf("[DEBUG] Setting ALB Deletion Protection Enabled: %t", protectionEnabled)
d.Set("enable_deletion_protection", protectionEnabled)
}
}
log.Printf("[DEBUG] Setting ALB Access Logs: %#v", accessLogMap)
if accessLogMap["bucket"] != "" || accessLogMap["prefix"] != "" {
d.Set("access_logs", []interface{}{accessLogMap})
} else {
d.Set("access_logs", []interface{}{})
}
return nil
}

View File

@ -0,0 +1,284 @@
package aws
import (
"errors"
"fmt"
"log"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/elbv2"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsAlbListener() *schema.Resource {
return &schema.Resource{
Create: resourceAwsAlbListenerCreate,
Read: resourceAwsAlbListenerRead,
Update: resourceAwsAlbListenerUpdate,
Delete: resourceAwsAlbListenerDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"arn": {
Type: schema.TypeString,
Computed: true,
},
"load_balancer_arn": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"port": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validateAwsAlbListenerPort,
},
"protocol": {
Type: schema.TypeString,
Optional: true,
Default: "HTTP",
StateFunc: func(v interface{}) string {
return strings.ToUpper(v.(string))
},
ValidateFunc: validateAwsAlbListenerProtocol,
},
"ssl_policy": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"certificate_arn": {
Type: schema.TypeString,
Optional: true,
},
"default_action": {
Type: schema.TypeList,
Required: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"target_group_arn": {
Type: schema.TypeString,
Required: true,
},
"type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validateAwsAlbListenerActionType,
},
},
},
},
},
}
}
func resourceAwsAlbListenerCreate(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
albArn := d.Get("load_balancer_arn").(string)
params := &elbv2.CreateListenerInput{
LoadBalancerArn: aws.String(albArn),
Port: aws.Int64(int64(d.Get("port").(int))),
Protocol: aws.String(d.Get("protocol").(string)),
}
if sslPolicy, ok := d.GetOk("ssl_policy"); ok {
params.SslPolicy = aws.String(sslPolicy.(string))
}
if certificateArn, ok := d.GetOk("certificate_arn"); ok {
params.Certificates = make([]*elbv2.Certificate, 1)
params.Certificates[0] = &elbv2.Certificate{
CertificateArn: aws.String(certificateArn.(string)),
}
}
if defaultActions := d.Get("default_action").([]interface{}); len(defaultActions) == 1 {
params.DefaultActions = make([]*elbv2.Action, len(defaultActions))
for i, defaultAction := range defaultActions {
defaultActionMap := defaultAction.(map[string]interface{})
params.DefaultActions[i] = &elbv2.Action{
TargetGroupArn: aws.String(defaultActionMap["target_group_arn"].(string)),
Type: aws.String(defaultActionMap["type"].(string)),
}
}
}
var resp *elbv2.CreateListenerOutput
err := resource.Retry(5*time.Minute, func() *resource.RetryError {
var err error
log.Printf("[DEBUG] Creating ALB listener for ARN: %s", d.Get("load_balancer_arn").(string))
resp, err = elbconn.CreateListener(params)
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "CertificateNotFound" {
log.Printf("[WARN] Got an error while trying to create ALB listener for ARN: %s: %s", albArn, err)
return resource.RetryableError(err)
}
}
if err != nil {
return resource.NonRetryableError(err)
}
return nil
})
if err != nil {
return errwrap.Wrapf("Error creating ALB Listener: {{err}}", err)
}
if len(resp.Listeners) == 0 {
return errors.New("Error creating ALB Listener: no listeners returned in response")
}
d.SetId(*resp.Listeners[0].ListenerArn)
return resourceAwsAlbListenerRead(d, meta)
}
func resourceAwsAlbListenerRead(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
resp, err := elbconn.DescribeListeners(&elbv2.DescribeListenersInput{
ListenerArns: []*string{aws.String(d.Id())},
})
if err != nil {
if isListenerNotFound(err) {
log.Printf("[WARN] DescribeListeners - removing %s from state", d.Id())
d.SetId("")
return nil
}
return errwrap.Wrapf("Error retrieving Listener: {{err}}", err)
}
if len(resp.Listeners) != 1 {
return fmt.Errorf("Error retrieving Listener %q", d.Id())
}
listener := resp.Listeners[0]
d.Set("arn", listener.ListenerArn)
d.Set("load_balancer_arn", listener.LoadBalancerArn)
d.Set("port", listener.Port)
d.Set("protocol", listener.Protocol)
d.Set("ssl_policy", listener.SslPolicy)
if listener.Certificates != nil && len(listener.Certificates) == 1 {
d.Set("certificate_arn", listener.Certificates[0].CertificateArn)
}
defaultActions := make([]map[string]interface{}, 0)
if listener.DefaultActions != nil && len(listener.DefaultActions) > 0 {
for _, defaultAction := range listener.DefaultActions {
action := map[string]interface{}{
"target_group_arn": *defaultAction.TargetGroupArn,
"type": *defaultAction.Type,
}
defaultActions = append(defaultActions, action)
}
}
d.Set("default_action", defaultActions)
return nil
}
func resourceAwsAlbListenerUpdate(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
params := &elbv2.ModifyListenerInput{
ListenerArn: aws.String(d.Id()),
Port: aws.Int64(int64(d.Get("port").(int))),
Protocol: aws.String(d.Get("protocol").(string)),
}
if sslPolicy, ok := d.GetOk("ssl_policy"); ok {
params.SslPolicy = aws.String(sslPolicy.(string))
}
if certificateArn, ok := d.GetOk("certificate_arn"); ok {
params.Certificates = make([]*elbv2.Certificate, 1)
params.Certificates[0] = &elbv2.Certificate{
CertificateArn: aws.String(certificateArn.(string)),
}
}
if defaultActions := d.Get("default_action").([]interface{}); len(defaultActions) == 1 {
params.DefaultActions = make([]*elbv2.Action, len(defaultActions))
for i, defaultAction := range defaultActions {
defaultActionMap := defaultAction.(map[string]interface{})
params.DefaultActions[i] = &elbv2.Action{
TargetGroupArn: aws.String(defaultActionMap["target_group_arn"].(string)),
Type: aws.String(defaultActionMap["type"].(string)),
}
}
}
_, err := elbconn.ModifyListener(params)
if err != nil {
return errwrap.Wrapf("Error modifying ALB Listener: {{err}}", err)
}
return resourceAwsAlbListenerRead(d, meta)
}
func resourceAwsAlbListenerDelete(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
_, err := elbconn.DeleteListener(&elbv2.DeleteListenerInput{
ListenerArn: aws.String(d.Id()),
})
if err != nil {
return errwrap.Wrapf("Error deleting Listener: {{err}}", err)
}
return nil
}
func validateAwsAlbListenerPort(v interface{}, k string) (ws []string, errors []error) {
port := v.(int)
if port < 1 || port > 65536 {
errors = append(errors, fmt.Errorf("%q must be a valid port number (1-65536)", k))
}
return
}
func validateAwsAlbListenerProtocol(v interface{}, k string) (ws []string, errors []error) {
value := strings.ToLower(v.(string))
if value == "http" || value == "https" {
return
}
errors = append(errors, fmt.Errorf("%q must be either %q or %q", k, "HTTP", "HTTPS"))
return
}
func validateAwsAlbListenerActionType(v interface{}, k string) (ws []string, errors []error) {
value := strings.ToLower(v.(string))
if value != "forward" {
errors = append(errors, fmt.Errorf("%q must have the value %q", k, "forward"))
}
return
}
func isListenerNotFound(err error) bool {
elberr, ok := err.(awserr.Error)
return ok && elberr.Code() == "ListenerNotFound"
}

View File

@ -0,0 +1,293 @@
package aws
import (
"errors"
"fmt"
"log"
"strconv"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/elbv2"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsAlbListenerRule() *schema.Resource {
return &schema.Resource{
Create: resourceAwsAlbListenerRuleCreate,
Read: resourceAwsAlbListenerRuleRead,
Update: resourceAwsAlbListenerRuleUpdate,
Delete: resourceAwsAlbListenerRuleDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"arn": {
Type: schema.TypeString,
Computed: true,
},
"listener_arn": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"priority": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
ValidateFunc: validateAwsAlbListenerRulePriority,
},
"action": {
Type: schema.TypeList,
Required: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"target_group_arn": {
Type: schema.TypeString,
Required: true,
},
"type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validateAwsAlbListenerActionType,
},
},
},
},
"condition": {
Type: schema.TypeList,
Required: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"field": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateAwsListenerRuleField,
},
"values": {
Type: schema.TypeList,
MaxItems: 1,
Elem: &schema.Schema{Type: schema.TypeString},
Optional: true,
},
},
},
},
},
}
}
func resourceAwsAlbListenerRuleCreate(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
params := &elbv2.CreateRuleInput{
ListenerArn: aws.String(d.Get("listener_arn").(string)),
Priority: aws.Int64(int64(d.Get("priority").(int))),
}
actions := d.Get("action").([]interface{})
params.Actions = make([]*elbv2.Action, len(actions))
for i, action := range actions {
actionMap := action.(map[string]interface{})
params.Actions[i] = &elbv2.Action{
TargetGroupArn: aws.String(actionMap["target_group_arn"].(string)),
Type: aws.String(actionMap["type"].(string)),
}
}
conditions := d.Get("condition").([]interface{})
params.Conditions = make([]*elbv2.RuleCondition, len(conditions))
for i, condition := range conditions {
conditionMap := condition.(map[string]interface{})
values := conditionMap["values"].([]interface{})
params.Conditions[i] = &elbv2.RuleCondition{
Field: aws.String(conditionMap["field"].(string)),
Values: make([]*string, len(values)),
}
for j, value := range values {
params.Conditions[i].Values[j] = aws.String(value.(string))
}
}
resp, err := elbconn.CreateRule(params)
if err != nil {
return errwrap.Wrapf("Error creating ALB Listener Rule: {{err}}", err)
}
if len(resp.Rules) == 0 {
return errors.New("Error creating ALB Listener Rule: no rules returned in response")
}
d.SetId(*resp.Rules[0].RuleArn)
return resourceAwsAlbListenerRuleRead(d, meta)
}
func resourceAwsAlbListenerRuleRead(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
resp, err := elbconn.DescribeRules(&elbv2.DescribeRulesInput{
RuleArns: []*string{aws.String(d.Id())},
})
if err != nil {
if isRuleNotFound(err) {
log.Printf("[WARN] DescribeRules - removing %s from state", d.Id())
d.SetId("")
return nil
}
return errwrap.Wrapf(fmt.Sprintf("Error retrieving Rules for listener %s: {{err}}", d.Id()), err)
}
if len(resp.Rules) != 1 {
return fmt.Errorf("Error retrieving Rule %q", d.Id())
}
rule := resp.Rules[0]
d.Set("arn", rule.RuleArn)
// Rules are evaluated in priority order, from the lowest value to the highest value. The default rule has the lowest priority.
if *rule.Priority == "default" {
d.Set("priority", 99999)
} else {
if priority, err := strconv.Atoi(*rule.Priority); err != nil {
return errwrap.Wrapf("Cannot convert rule priority %q to int: {{err}}", err)
} else {
d.Set("priority", priority)
}
}
actions := make([]interface{}, len(rule.Actions))
for i, action := range rule.Actions {
actionMap := make(map[string]interface{})
actionMap["target_group_arn"] = *action.TargetGroupArn
actionMap["type"] = *action.Type
actions[i] = actionMap
}
d.Set("action", actions)
conditions := make([]interface{}, len(rule.Conditions))
for i, condition := range rule.Conditions {
conditionMap := make(map[string]interface{})
conditionMap["field"] = *condition.Field
conditionValues := make([]string, len(condition.Values))
for k, value := range condition.Values {
conditionValues[k] = *value
}
conditionMap["values"] = conditionValues
conditions[i] = conditionMap
}
d.Set("condition", conditions)
return nil
}
func resourceAwsAlbListenerRuleUpdate(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
d.Partial(true)
if d.HasChange("priority") {
params := &elbv2.SetRulePrioritiesInput{
RulePriorities: []*elbv2.RulePriorityPair{
{
RuleArn: aws.String(d.Id()),
Priority: aws.Int64(int64(d.Get("priority").(int))),
},
},
}
_, err := elbconn.SetRulePriorities(params)
if err != nil {
return err
}
d.SetPartial("priority")
}
requestUpdate := false
params := &elbv2.ModifyRuleInput{
RuleArn: aws.String(d.Id()),
}
if d.HasChange("action") {
actions := d.Get("action").([]interface{})
params.Actions = make([]*elbv2.Action, len(actions))
for i, action := range actions {
actionMap := action.(map[string]interface{})
params.Actions[i] = &elbv2.Action{
TargetGroupArn: aws.String(actionMap["target_group_arn"].(string)),
Type: aws.String(actionMap["type"].(string)),
}
}
requestUpdate = true
d.SetPartial("action")
}
if d.HasChange("condition") {
conditions := d.Get("condition").([]interface{})
params.Conditions = make([]*elbv2.RuleCondition, len(conditions))
for i, condition := range conditions {
conditionMap := condition.(map[string]interface{})
values := conditionMap["values"].([]interface{})
params.Conditions[i] = &elbv2.RuleCondition{
Field: aws.String(conditionMap["field"].(string)),
Values: make([]*string, len(values)),
}
for j, value := range values {
params.Conditions[i].Values[j] = aws.String(value.(string))
}
}
requestUpdate = true
d.SetPartial("condition")
}
if requestUpdate {
resp, err := elbconn.ModifyRule(params)
if err != nil {
return errwrap.Wrapf("Error modifying ALB Listener Rule: {{err}}", err)
}
if len(resp.Rules) == 0 {
return errors.New("Error modifying creating ALB Listener Rule: no rules returned in response")
}
}
d.Partial(false)
return resourceAwsAlbListenerRuleRead(d, meta)
}
func resourceAwsAlbListenerRuleDelete(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
_, err := elbconn.DeleteRule(&elbv2.DeleteRuleInput{
RuleArn: aws.String(d.Id()),
})
if err != nil && !isRuleNotFound(err) {
return errwrap.Wrapf("Error deleting ALB Listener Rule: {{err}}", err)
}
return nil
}
func validateAwsAlbListenerRulePriority(v interface{}, k string) (ws []string, errors []error) {
value := v.(int)
if value < 1 || value > 99999 {
errors = append(errors, fmt.Errorf("%q must be in the range 1-99999", k))
}
return
}
func validateAwsListenerRuleField(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if len(value) > 64 {
errors = append(errors, fmt.Errorf("%q must be a maximum of 64 characters", k))
}
return
}
func isRuleNotFound(err error) bool {
elberr, ok := err.(awserr.Error)
return ok && elberr.Code() == "RuleNotFound"
}

View File

@ -0,0 +1,538 @@
package aws
import (
"errors"
"fmt"
"log"
"regexp"
"strconv"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/elbv2"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsAlbTargetGroup() *schema.Resource {
return &schema.Resource{
Create: resourceAwsAlbTargetGroupCreate,
Read: resourceAwsAlbTargetGroupRead,
Update: resourceAwsAlbTargetGroupUpdate,
Delete: resourceAwsAlbTargetGroupDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"arn": {
Type: schema.TypeString,
Computed: true,
},
"arn_suffix": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ConflictsWith: []string{"name_prefix"},
ValidateFunc: validateAwsAlbTargetGroupName,
},
"name_prefix": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validateAwsAlbTargetGroupNamePrefix,
},
"port": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
ValidateFunc: validateAwsAlbTargetGroupPort,
},
"protocol": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateAwsAlbTargetGroupProtocol,
},
"vpc_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"deregistration_delay": {
Type: schema.TypeInt,
Optional: true,
Default: 300,
ValidateFunc: validateAwsAlbTargetGroupDeregistrationDelay,
},
"stickiness": {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"enabled": {
Type: schema.TypeBool,
Optional: true,
Default: true,
},
"type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validateAwsAlbTargetGroupStickinessType,
},
"cookie_duration": {
Type: schema.TypeInt,
Optional: true,
Default: 86400,
ValidateFunc: validateAwsAlbTargetGroupStickinessCookieDuration,
},
},
},
},
"health_check": {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"interval": {
Type: schema.TypeInt,
Optional: true,
Default: 30,
},
"path": {
Type: schema.TypeString,
Optional: true,
Default: "/",
ValidateFunc: validateAwsAlbTargetGroupHealthCheckPath,
},
"port": {
Type: schema.TypeString,
Optional: true,
Default: "traffic-port",
ValidateFunc: validateAwsAlbTargetGroupHealthCheckPort,
},
"protocol": {
Type: schema.TypeString,
Optional: true,
Default: "HTTP",
StateFunc: func(v interface{}) string {
return strings.ToUpper(v.(string))
},
ValidateFunc: validateAwsAlbTargetGroupHealthCheckProtocol,
},
"timeout": {
Type: schema.TypeInt,
Optional: true,
Default: 5,
ValidateFunc: validateAwsAlbTargetGroupHealthCheckTimeout,
},
"healthy_threshold": {
Type: schema.TypeInt,
Optional: true,
Default: 5,
ValidateFunc: validateAwsAlbTargetGroupHealthCheckHealthyThreshold,
},
"matcher": {
Type: schema.TypeString,
Optional: true,
Default: "200",
},
"unhealthy_threshold": {
Type: schema.TypeInt,
Optional: true,
Default: 2,
ValidateFunc: validateAwsAlbTargetGroupHealthCheckHealthyThreshold,
},
},
},
},
"tags": tagsSchema(),
},
}
}
func resourceAwsAlbTargetGroupCreate(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
var groupName string
if v, ok := d.GetOk("name"); ok {
groupName = v.(string)
} else if v, ok := d.GetOk("name_prefix"); ok {
groupName = resource.PrefixedUniqueId(v.(string))
} else {
groupName = resource.PrefixedUniqueId("tf-")
}
params := &elbv2.CreateTargetGroupInput{
Name: aws.String(groupName),
Port: aws.Int64(int64(d.Get("port").(int))),
Protocol: aws.String(d.Get("protocol").(string)),
VpcId: aws.String(d.Get("vpc_id").(string)),
}
if healthChecks := d.Get("health_check").([]interface{}); len(healthChecks) == 1 {
healthCheck := healthChecks[0].(map[string]interface{})
params.HealthCheckIntervalSeconds = aws.Int64(int64(healthCheck["interval"].(int)))
params.HealthCheckPath = aws.String(healthCheck["path"].(string))
params.HealthCheckPort = aws.String(healthCheck["port"].(string))
params.HealthCheckProtocol = aws.String(healthCheck["protocol"].(string))
params.HealthCheckTimeoutSeconds = aws.Int64(int64(healthCheck["timeout"].(int)))
params.HealthyThresholdCount = aws.Int64(int64(healthCheck["healthy_threshold"].(int)))
params.UnhealthyThresholdCount = aws.Int64(int64(healthCheck["unhealthy_threshold"].(int)))
params.Matcher = &elbv2.Matcher{
HttpCode: aws.String(healthCheck["matcher"].(string)),
}
}
resp, err := elbconn.CreateTargetGroup(params)
if err != nil {
return errwrap.Wrapf("Error creating ALB Target Group: {{err}}", err)
}
if len(resp.TargetGroups) == 0 {
return errors.New("Error creating ALB Target Group: no groups returned in response")
}
targetGroupArn := resp.TargetGroups[0].TargetGroupArn
d.SetId(*targetGroupArn)
return resourceAwsAlbTargetGroupUpdate(d, meta)
}
func resourceAwsAlbTargetGroupRead(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
resp, err := elbconn.DescribeTargetGroups(&elbv2.DescribeTargetGroupsInput{
TargetGroupArns: []*string{aws.String(d.Id())},
})
if err != nil {
if isTargetGroupNotFound(err) {
log.Printf("[DEBUG] DescribeTargetGroups - removing %s from state", d.Id())
d.SetId("")
return nil
}
return errwrap.Wrapf("Error retrieving Target Group: {{err}}", err)
}
if len(resp.TargetGroups) != 1 {
return fmt.Errorf("Error retrieving Target Group %q", d.Id())
}
targetGroup := resp.TargetGroups[0]
d.Set("arn", targetGroup.TargetGroupArn)
d.Set("arn_suffix", albTargetGroupSuffixFromARN(targetGroup.TargetGroupArn))
d.Set("name", targetGroup.TargetGroupName)
d.Set("port", targetGroup.Port)
d.Set("protocol", targetGroup.Protocol)
d.Set("vpc_id", targetGroup.VpcId)
healthCheck := make(map[string]interface{})
healthCheck["interval"] = *targetGroup.HealthCheckIntervalSeconds
healthCheck["path"] = *targetGroup.HealthCheckPath
healthCheck["port"] = *targetGroup.HealthCheckPort
healthCheck["protocol"] = *targetGroup.HealthCheckProtocol
healthCheck["timeout"] = *targetGroup.HealthCheckTimeoutSeconds
healthCheck["healthy_threshold"] = *targetGroup.HealthyThresholdCount
healthCheck["unhealthy_threshold"] = *targetGroup.UnhealthyThresholdCount
healthCheck["matcher"] = *targetGroup.Matcher.HttpCode
d.Set("health_check", []interface{}{healthCheck})
attrResp, err := elbconn.DescribeTargetGroupAttributes(&elbv2.DescribeTargetGroupAttributesInput{
TargetGroupArn: aws.String(d.Id()),
})
if err != nil {
return errwrap.Wrapf("Error retrieving Target Group Attributes: {{err}}", err)
}
stickinessMap := map[string]interface{}{}
for _, attr := range attrResp.Attributes {
switch *attr.Key {
case "stickiness.enabled":
enabled, err := strconv.ParseBool(*attr.Value)
if err != nil {
return fmt.Errorf("Error converting stickiness.enabled to bool: %s", *attr.Value)
}
stickinessMap["enabled"] = enabled
case "stickiness.type":
stickinessMap["type"] = *attr.Value
case "stickiness.lb_cookie.duration_seconds":
duration, err := strconv.Atoi(*attr.Value)
if err != nil {
return fmt.Errorf("Error converting stickiness.lb_cookie.duration_seconds to int: %s", *attr.Value)
}
stickinessMap["cookie_duration"] = duration
case "deregistration_delay.timeout_seconds":
timeout, err := strconv.Atoi(*attr.Value)
if err != nil {
return fmt.Errorf("Error converting deregistration_delay.timeout_seconds to int: %s", *attr.Value)
}
d.Set("deregistration_delay", timeout)
}
}
if err := d.Set("stickiness", []interface{}{stickinessMap}); err != nil {
return err
}
tagsResp, err := elbconn.DescribeTags(&elbv2.DescribeTagsInput{
ResourceArns: []*string{aws.String(d.Id())},
})
if err != nil {
return errwrap.Wrapf("Error retrieving Target Group Tags: {{err}}", err)
}
for _, t := range tagsResp.TagDescriptions {
if *t.ResourceArn == d.Id() {
if err := d.Set("tags", tagsToMapELBv2(t.Tags)); err != nil {
return err
}
}
}
return nil
}
func resourceAwsAlbTargetGroupUpdate(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
if err := setElbV2Tags(elbconn, d); err != nil {
return errwrap.Wrapf("Error Modifying Tags on ALB Target Group: {{err}}", err)
}
if d.HasChange("health_check") {
healthChecks := d.Get("health_check").([]interface{})
var params *elbv2.ModifyTargetGroupInput
if len(healthChecks) == 1 {
healthCheck := healthChecks[0].(map[string]interface{})
params = &elbv2.ModifyTargetGroupInput{
TargetGroupArn: aws.String(d.Id()),
HealthCheckIntervalSeconds: aws.Int64(int64(healthCheck["interval"].(int))),
HealthCheckPath: aws.String(healthCheck["path"].(string)),
HealthCheckPort: aws.String(healthCheck["port"].(string)),
HealthCheckProtocol: aws.String(healthCheck["protocol"].(string)),
HealthCheckTimeoutSeconds: aws.Int64(int64(healthCheck["timeout"].(int))),
HealthyThresholdCount: aws.Int64(int64(healthCheck["healthy_threshold"].(int))),
UnhealthyThresholdCount: aws.Int64(int64(healthCheck["unhealthy_threshold"].(int))),
Matcher: &elbv2.Matcher{
HttpCode: aws.String(healthCheck["matcher"].(string)),
},
}
} else {
params = &elbv2.ModifyTargetGroupInput{
TargetGroupArn: aws.String(d.Id()),
}
}
_, err := elbconn.ModifyTargetGroup(params)
if err != nil {
return errwrap.Wrapf("Error modifying Target Group: {{err}}", err)
}
}
var attrs []*elbv2.TargetGroupAttribute
if d.HasChange("deregistration_delay") {
attrs = append(attrs, &elbv2.TargetGroupAttribute{
Key: aws.String("deregistration_delay.timeout_seconds"),
Value: aws.String(fmt.Sprintf("%d", d.Get("deregistration_delay").(int))),
})
}
if d.HasChange("stickiness") {
stickinessBlocks := d.Get("stickiness").([]interface{})
if len(stickinessBlocks) == 1 {
stickiness := stickinessBlocks[0].(map[string]interface{})
attrs = append(attrs,
&elbv2.TargetGroupAttribute{
Key: aws.String("stickiness.enabled"),
Value: aws.String(strconv.FormatBool(stickiness["enabled"].(bool))),
},
&elbv2.TargetGroupAttribute{
Key: aws.String("stickiness.type"),
Value: aws.String(stickiness["type"].(string)),
},
&elbv2.TargetGroupAttribute{
Key: aws.String("stickiness.lb_cookie.duration_seconds"),
Value: aws.String(fmt.Sprintf("%d", stickiness["cookie_duration"].(int))),
})
} else if len(stickinessBlocks) == 0 {
attrs = append(attrs, &elbv2.TargetGroupAttribute{
Key: aws.String("stickiness.enabled"),
Value: aws.String("false"),
})
}
}
if len(attrs) > 0 {
params := &elbv2.ModifyTargetGroupAttributesInput{
TargetGroupArn: aws.String(d.Id()),
Attributes: attrs,
}
_, err := elbconn.ModifyTargetGroupAttributes(params)
if err != nil {
return errwrap.Wrapf("Error modifying Target Group Attributes: {{err}}", err)
}
}
return resourceAwsAlbTargetGroupRead(d, meta)
}
func resourceAwsAlbTargetGroupDelete(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
_, err := elbconn.DeleteTargetGroup(&elbv2.DeleteTargetGroupInput{
TargetGroupArn: aws.String(d.Id()),
})
if err != nil {
return errwrap.Wrapf("Error deleting Target Group: {{err}}", err)
}
return nil
}
func isTargetGroupNotFound(err error) bool {
elberr, ok := err.(awserr.Error)
return ok && elberr.Code() == "TargetGroupNotFound"
}
func validateAwsAlbTargetGroupHealthCheckPath(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if len(value) > 1024 {
errors = append(errors, fmt.Errorf(
"%q cannot be longer than 1024 characters: %q", k, value))
}
return
}
func validateAwsAlbTargetGroupHealthCheckPort(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if value == "traffic-port" {
return
}
port, err := strconv.Atoi(value)
if err != nil {
errors = append(errors, fmt.Errorf("%q must be a valid port number (1-65536) or %q", k, "traffic-port"))
}
if port < 1 || port > 65536 {
errors = append(errors, fmt.Errorf("%q must be a valid port number (1-65536) or %q", k, "traffic-port"))
}
return
}
func validateAwsAlbTargetGroupHealthCheckHealthyThreshold(v interface{}, k string) (ws []string, errors []error) {
value := v.(int)
if value < 2 || value > 10 {
errors = append(errors, fmt.Errorf("%q must be an integer between 2 and 10", k))
}
return
}
func validateAwsAlbTargetGroupHealthCheckTimeout(v interface{}, k string) (ws []string, errors []error) {
value := v.(int)
if value < 2 || value > 60 {
errors = append(errors, fmt.Errorf("%q must be an integer between 2 and 60", k))
}
return
}
func validateAwsAlbTargetGroupHealthCheckProtocol(v interface{}, k string) (ws []string, errors []error) {
value := strings.ToLower(v.(string))
if value == "http" || value == "https" {
return
}
errors = append(errors, fmt.Errorf("%q must be either %q or %q", k, "HTTP", "HTTPS"))
return
}
func validateAwsAlbTargetGroupPort(v interface{}, k string) (ws []string, errors []error) {
port := v.(int)
if port < 1 || port > 65536 {
errors = append(errors, fmt.Errorf("%q must be a valid port number (1-65536)", k))
}
return
}
func validateAwsAlbTargetGroupProtocol(v interface{}, k string) (ws []string, errors []error) {
protocol := strings.ToLower(v.(string))
if protocol == "http" || protocol == "https" {
return
}
errors = append(errors, fmt.Errorf("%q must be either %q or %q", k, "HTTP", "HTTPS"))
return
}
func validateAwsAlbTargetGroupDeregistrationDelay(v interface{}, k string) (ws []string, errors []error) {
delay := v.(int)
if delay < 0 || delay > 3600 {
errors = append(errors, fmt.Errorf("%q must be in the range 0-3600 seconds", k))
}
return
}
func validateAwsAlbTargetGroupStickinessType(v interface{}, k string) (ws []string, errors []error) {
stickinessType := v.(string)
if stickinessType != "lb_cookie" {
errors = append(errors, fmt.Errorf("%q must have the value %q", k, "lb_cookie"))
}
return
}
func validateAwsAlbTargetGroupStickinessCookieDuration(v interface{}, k string) (ws []string, errors []error) {
duration := v.(int)
if duration < 1 || duration > 604800 {
errors = append(errors, fmt.Errorf("%q must be a between 1 second and 1 week (1-604800 seconds))", k))
}
return
}
func albTargetGroupSuffixFromARN(arn *string) string {
if arn == nil {
return ""
}
if arnComponents := regexp.MustCompile(`arn:.*:targetgroup/(.*)`).FindAllStringSubmatch(*arn, -1); len(arnComponents) == 1 {
if len(arnComponents[0]) == 2 {
return fmt.Sprintf("targetgroup/%s", arnComponents[0][1])
}
}
return ""
}

View File

@ -0,0 +1,141 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/elbv2"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsAlbTargetGroupAttachment() *schema.Resource {
return &schema.Resource{
Create: resourceAwsAlbAttachmentCreate,
Read: resourceAwsAlbAttachmentRead,
Delete: resourceAwsAlbAttachmentDelete,
Schema: map[string]*schema.Schema{
"target_group_arn": {
Type: schema.TypeString,
ForceNew: true,
Required: true,
},
"target_id": {
Type: schema.TypeString,
ForceNew: true,
Required: true,
},
"port": {
Type: schema.TypeInt,
ForceNew: true,
Optional: true,
},
},
}
}
func resourceAwsAlbAttachmentCreate(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
target := &elbv2.TargetDescription{
Id: aws.String(d.Get("target_id").(string)),
}
if v, ok := d.GetOk("port"); ok {
target.Port = aws.Int64(int64(v.(int)))
}
params := &elbv2.RegisterTargetsInput{
TargetGroupArn: aws.String(d.Get("target_group_arn").(string)),
Targets: []*elbv2.TargetDescription{target},
}
log.Printf("[INFO] Registering Target %s with Target Group %s", d.Get("target_id").(string),
d.Get("target_group_arn").(string))
_, err := elbconn.RegisterTargets(params)
if err != nil {
return errwrap.Wrapf("Error registering targets with target group: {{err}}", err)
}
d.SetId(resource.PrefixedUniqueId(fmt.Sprintf("%s-", d.Get("target_group_arn"))))
return nil
}
func resourceAwsAlbAttachmentDelete(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
target := &elbv2.TargetDescription{
Id: aws.String(d.Get("target_id").(string)),
}
if v, ok := d.GetOk("port"); ok {
target.Port = aws.Int64(int64(v.(int)))
}
params := &elbv2.DeregisterTargetsInput{
TargetGroupArn: aws.String(d.Get("target_group_arn").(string)),
Targets: []*elbv2.TargetDescription{target},
}
_, err := elbconn.DeregisterTargets(params)
if err != nil && !isTargetGroupNotFound(err) {
return errwrap.Wrapf("Error deregistering Targets: {{err}}", err)
}
d.SetId("")
return nil
}
// resourceAwsAlbAttachmentRead requires all of the fields in order to describe the correct
// target, so there is no work to do beyond ensuring that the target and group still exist.
func resourceAwsAlbAttachmentRead(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
target := &elbv2.TargetDescription{
Id: aws.String(d.Get("target_id").(string)),
}
if v, ok := d.GetOk("port"); ok {
target.Port = aws.Int64(int64(v.(int)))
}
resp, err := elbconn.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{
TargetGroupArn: aws.String(d.Get("target_group_arn").(string)),
Targets: []*elbv2.TargetDescription{target},
})
if err != nil {
if isTargetGroupNotFound(err) {
log.Printf("[WARN] Target group does not exist, removing target attachment %s", d.Id())
d.SetId("")
return nil
}
if isInvalidTarget(err) {
log.Printf("[WARN] Target does not exist, removing target attachment %s", d.Id())
d.SetId("")
return nil
}
return errwrap.Wrapf("Error reading Target Health: {{err}}", err)
}
if len(resp.TargetHealthDescriptions) != 1 {
log.Printf("[WARN] Target does not exist, removing target attachment %s", d.Id())
d.SetId("")
return nil
}
return nil
}
func isInvalidTarget(err error) bool {
elberr, ok := err.(awserr.Error)
return ok && elberr.Code() == "InvalidTarget"
}

View File

@ -0,0 +1,562 @@
package aws
import (
"bytes"
"errors"
"fmt"
"log"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
const (
AWSAMIRetryTimeout = 40 * time.Minute
AWSAMIDeleteRetryTimeout = 90 * time.Minute
AWSAMIRetryDelay = 5 * time.Second
AWSAMIRetryMinTimeout = 3 * time.Second
)
func resourceAwsAmi() *schema.Resource {
// Our schema is shared also with aws_ami_copy and aws_ami_from_instance
resourceSchema := resourceAwsAmiCommonSchema(false)
return &schema.Resource{
Create: resourceAwsAmiCreate,
Schema: resourceSchema,
// The Read, Update and Delete operations are shared with aws_ami_copy
// and aws_ami_from_instance, since they differ only in how the image
// is created.
Read: resourceAwsAmiRead,
Update: resourceAwsAmiUpdate,
Delete: resourceAwsAmiDelete,
}
}
func resourceAwsAmiCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AWSClient).ec2conn
req := &ec2.RegisterImageInput{
Name: aws.String(d.Get("name").(string)),
Description: aws.String(d.Get("description").(string)),
Architecture: aws.String(d.Get("architecture").(string)),
ImageLocation: aws.String(d.Get("image_location").(string)),
RootDeviceName: aws.String(d.Get("root_device_name").(string)),
SriovNetSupport: aws.String(d.Get("sriov_net_support").(string)),
VirtualizationType: aws.String(d.Get("virtualization_type").(string)),
}
if kernelId := d.Get("kernel_id").(string); kernelId != "" {
req.KernelId = aws.String(kernelId)
}
if ramdiskId := d.Get("ramdisk_id").(string); ramdiskId != "" {
req.RamdiskId = aws.String(ramdiskId)
}
ebsBlockDevsSet := d.Get("ebs_block_device").(*schema.Set)
ephemeralBlockDevsSet := d.Get("ephemeral_block_device").(*schema.Set)
for _, ebsBlockDevI := range ebsBlockDevsSet.List() {
ebsBlockDev := ebsBlockDevI.(map[string]interface{})
blockDev := &ec2.BlockDeviceMapping{
DeviceName: aws.String(ebsBlockDev["device_name"].(string)),
Ebs: &ec2.EbsBlockDevice{
DeleteOnTermination: aws.Bool(ebsBlockDev["delete_on_termination"].(bool)),
VolumeType: aws.String(ebsBlockDev["volume_type"].(string)),
},
}
if iops, ok := ebsBlockDev["iops"]; ok {
if iop := iops.(int); iop != 0 {
blockDev.Ebs.Iops = aws.Int64(int64(iop))
}
}
if size, ok := ebsBlockDev["volume_size"]; ok {
if s := size.(int); s != 0 {
blockDev.Ebs.VolumeSize = aws.Int64(int64(s))
}
}
encrypted := ebsBlockDev["encrypted"].(bool)
if snapshotId := ebsBlockDev["snapshot_id"].(string); snapshotId != "" {
blockDev.Ebs.SnapshotId = aws.String(snapshotId)
if encrypted {
return errors.New("can't set both 'snapshot_id' and 'encrypted'")
}
} else if encrypted {
blockDev.Ebs.Encrypted = aws.Bool(true)
}
req.BlockDeviceMappings = append(req.BlockDeviceMappings, blockDev)
}
for _, ephemeralBlockDevI := range ephemeralBlockDevsSet.List() {
ephemeralBlockDev := ephemeralBlockDevI.(map[string]interface{})
blockDev := &ec2.BlockDeviceMapping{
DeviceName: aws.String(ephemeralBlockDev["device_name"].(string)),
VirtualName: aws.String(ephemeralBlockDev["virtual_name"].(string)),
}
req.BlockDeviceMappings = append(req.BlockDeviceMappings, blockDev)
}
res, err := client.RegisterImage(req)
if err != nil {
return err
}
id := *res.ImageId
d.SetId(id)
d.Partial(true) // make sure we record the id even if the rest of this gets interrupted
d.Set("id", id)
d.Set("manage_ebs_block_devices", false)
d.SetPartial("id")
d.SetPartial("manage_ebs_block_devices")
d.Partial(false)
_, err = resourceAwsAmiWaitForAvailable(id, client)
if err != nil {
return err
}
return resourceAwsAmiUpdate(d, meta)
}
func resourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AWSClient).ec2conn
id := d.Id()
req := &ec2.DescribeImagesInput{
ImageIds: []*string{aws.String(id)},
}
res, err := client.DescribeImages(req)
if err != nil {
if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidAMIID.NotFound" {
log.Printf("[DEBUG] %s no longer exists, so we'll drop it from the state", id)
d.SetId("")
return nil
}
return err
}
if len(res.Images) != 1 {
d.SetId("")
return nil
}
image := res.Images[0]
state := *image.State
if state == "pending" {
// This could happen if a user manually adds an image we didn't create
// to the state. We'll wait for the image to become available
// before we continue. We should never take this branch in normal
// circumstances since we would've waited for availability during
// the "Create" step.
image, err = resourceAwsAmiWaitForAvailable(id, client)
if err != nil {
return err
}
state = *image.State
}
if state == "deregistered" {
d.SetId("")
return nil
}
if state != "available" {
return fmt.Errorf("AMI has become %s", state)
}
d.Set("name", image.Name)
d.Set("description", image.Description)
d.Set("image_location", image.ImageLocation)
d.Set("architecture", image.Architecture)
d.Set("kernel_id", image.KernelId)
d.Set("ramdisk_id", image.RamdiskId)
d.Set("root_device_name", image.RootDeviceName)
d.Set("sriov_net_support", image.SriovNetSupport)
d.Set("virtualization_type", image.VirtualizationType)
var ebsBlockDevs []map[string]interface{}
var ephemeralBlockDevs []map[string]interface{}
for _, blockDev := range image.BlockDeviceMappings {
if blockDev.Ebs != nil {
ebsBlockDev := map[string]interface{}{
"device_name": *blockDev.DeviceName,
"delete_on_termination": *blockDev.Ebs.DeleteOnTermination,
"encrypted": *blockDev.Ebs.Encrypted,
"iops": 0,
"volume_size": int(*blockDev.Ebs.VolumeSize),
"volume_type": *blockDev.Ebs.VolumeType,
}
if blockDev.Ebs.Iops != nil {
ebsBlockDev["iops"] = int(*blockDev.Ebs.Iops)
}
// The snapshot ID might not be set.
if blockDev.Ebs.SnapshotId != nil {
ebsBlockDev["snapshot_id"] = *blockDev.Ebs.SnapshotId
}
ebsBlockDevs = append(ebsBlockDevs, ebsBlockDev)
} else {
ephemeralBlockDevs = append(ephemeralBlockDevs, map[string]interface{}{
"device_name": *blockDev.DeviceName,
"virtual_name": *blockDev.VirtualName,
})
}
}
d.Set("ebs_block_device", ebsBlockDevs)
d.Set("ephemeral_block_device", ephemeralBlockDevs)
d.Set("tags", tagsToMap(image.Tags))
return nil
}
func resourceAwsAmiUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AWSClient).ec2conn
d.Partial(true)
if err := setTags(client, d); err != nil {
return err
} else {
d.SetPartial("tags")
}
if d.Get("description").(string) != "" {
_, err := client.ModifyImageAttribute(&ec2.ModifyImageAttributeInput{
ImageId: aws.String(d.Id()),
Description: &ec2.AttributeValue{
Value: aws.String(d.Get("description").(string)),
},
})
if err != nil {
return err
}
d.SetPartial("description")
}
d.Partial(false)
return resourceAwsAmiRead(d, meta)
}
func resourceAwsAmiDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AWSClient).ec2conn
req := &ec2.DeregisterImageInput{
ImageId: aws.String(d.Id()),
}
_, err := client.DeregisterImage(req)
if err != nil {
return err
}
// If we're managing the EBS snapshots then we need to delete those too.
if d.Get("manage_ebs_snapshots").(bool) {
errs := map[string]error{}
ebsBlockDevsSet := d.Get("ebs_block_device").(*schema.Set)
req := &ec2.DeleteSnapshotInput{}
for _, ebsBlockDevI := range ebsBlockDevsSet.List() {
ebsBlockDev := ebsBlockDevI.(map[string]interface{})
snapshotId := ebsBlockDev["snapshot_id"].(string)
if snapshotId != "" {
req.SnapshotId = aws.String(snapshotId)
_, err := client.DeleteSnapshot(req)
if err != nil {
errs[snapshotId] = err
}
}
}
if len(errs) > 0 {
errParts := []string{"Errors while deleting associated EBS snapshots:"}
for snapshotId, err := range errs {
errParts = append(errParts, fmt.Sprintf("%s: %s", snapshotId, err))
}
errParts = append(errParts, "These are no longer managed by Terraform and must be deleted manually.")
return errors.New(strings.Join(errParts, "\n"))
}
}
// Verify that the image is actually removed, if not we need to wait for it to be removed
if err := resourceAwsAmiWaitForDestroy(d.Id(), client); err != nil {
return err
}
// No error, ami was deleted successfully
d.SetId("")
return nil
}
func AMIStateRefreshFunc(client *ec2.EC2, id string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
emptyResp := &ec2.DescribeImagesOutput{}
resp, err := client.DescribeImages(&ec2.DescribeImagesInput{ImageIds: []*string{aws.String(id)}})
if err != nil {
if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidAMIID.NotFound" {
return emptyResp, "destroyed", nil
} else if resp != nil && len(resp.Images) == 0 {
return emptyResp, "destroyed", nil
} else {
return emptyResp, "", fmt.Errorf("Error on refresh: %+v", err)
}
}
if resp == nil || resp.Images == nil || len(resp.Images) == 0 {
return emptyResp, "destroyed", nil
}
// AMI is valid, so return it's state
return resp.Images[0], *resp.Images[0].State, nil
}
}
func resourceAwsAmiWaitForDestroy(id string, client *ec2.EC2) error {
log.Printf("Waiting for AMI %s to be deleted...", id)
stateConf := &resource.StateChangeConf{
Pending: []string{"available", "pending", "failed"},
Target: []string{"destroyed"},
Refresh: AMIStateRefreshFunc(client, id),
Timeout: AWSAMIDeleteRetryTimeout,
Delay: AWSAMIRetryDelay,
MinTimeout: AWSAMIRetryTimeout,
}
_, err := stateConf.WaitForState()
if err != nil {
return fmt.Errorf("Error waiting for AMI (%s) to be deleted: %v", id, err)
}
return nil
}
func resourceAwsAmiWaitForAvailable(id string, client *ec2.EC2) (*ec2.Image, error) {
log.Printf("Waiting for AMI %s to become available...", id)
stateConf := &resource.StateChangeConf{
Pending: []string{"pending"},
Target: []string{"available"},
Refresh: AMIStateRefreshFunc(client, id),
Timeout: AWSAMIRetryTimeout,
Delay: AWSAMIRetryDelay,
MinTimeout: AWSAMIRetryMinTimeout,
}
info, err := stateConf.WaitForState()
if err != nil {
return nil, fmt.Errorf("Error waiting for AMI (%s) to be ready: %v", id, err)
}
return info.(*ec2.Image), nil
}
func resourceAwsAmiCommonSchema(computed bool) map[string]*schema.Schema {
// The "computed" parameter controls whether we're making
// a schema for an AMI that's been implicitly registered (aws_ami_copy, aws_ami_from_instance)
// or whether we're making a schema for an explicit registration (aws_ami).
// When set, almost every attribute is marked as "computed".
// When not set, only the "id" attribute is computed.
// "name" and "description" are never computed, since they must always
// be provided by the user.
var virtualizationTypeDefault interface{}
var deleteEbsOnTerminationDefault interface{}
var sriovNetSupportDefault interface{}
var architectureDefault interface{}
var volumeTypeDefault interface{}
if !computed {
virtualizationTypeDefault = "paravirtual"
deleteEbsOnTerminationDefault = true
sriovNetSupportDefault = "simple"
architectureDefault = "x86_64"
volumeTypeDefault = "standard"
}
return map[string]*schema.Schema{
"id": {
Type: schema.TypeString,
Computed: true,
},
"image_location": {
Type: schema.TypeString,
Optional: !computed,
Computed: true,
ForceNew: !computed,
},
"architecture": {
Type: schema.TypeString,
Optional: !computed,
Computed: computed,
ForceNew: !computed,
Default: architectureDefault,
},
"description": {
Type: schema.TypeString,
Optional: true,
},
"kernel_id": {
Type: schema.TypeString,
Optional: !computed,
Computed: computed,
ForceNew: !computed,
},
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"ramdisk_id": {
Type: schema.TypeString,
Optional: !computed,
Computed: computed,
ForceNew: !computed,
},
"root_device_name": {
Type: schema.TypeString,
Optional: !computed,
Computed: computed,
ForceNew: !computed,
},
"sriov_net_support": {
Type: schema.TypeString,
Optional: !computed,
Computed: computed,
ForceNew: !computed,
Default: sriovNetSupportDefault,
},
"virtualization_type": {
Type: schema.TypeString,
Optional: !computed,
Computed: computed,
ForceNew: !computed,
Default: virtualizationTypeDefault,
},
// The following block device attributes intentionally mimick the
// corresponding attributes on aws_instance, since they have the
// same meaning.
// However, we don't use root_block_device here because the constraint
// on which root device attributes can be overridden for an instance to
// not apply when registering an AMI.
"ebs_block_device": {
Type: schema.TypeSet,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"delete_on_termination": {
Type: schema.TypeBool,
Optional: !computed,
Default: deleteEbsOnTerminationDefault,
ForceNew: !computed,
Computed: computed,
},
"device_name": {
Type: schema.TypeString,
Required: !computed,
ForceNew: !computed,
Computed: computed,
},
"encrypted": {
Type: schema.TypeBool,
Optional: !computed,
Computed: computed,
ForceNew: !computed,
},
"iops": {
Type: schema.TypeInt,
Optional: !computed,
Computed: computed,
ForceNew: !computed,
},
"snapshot_id": {
Type: schema.TypeString,
Optional: !computed,
Computed: computed,
ForceNew: !computed,
},
"volume_size": {
Type: schema.TypeInt,
Optional: !computed,
Computed: true,
ForceNew: !computed,
},
"volume_type": {
Type: schema.TypeString,
Optional: !computed,
Computed: computed,
ForceNew: !computed,
Default: volumeTypeDefault,
},
},
},
Set: func(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["snapshot_id"].(string)))
return hashcode.String(buf.String())
},
},
"ephemeral_block_device": {
Type: schema.TypeSet,
Optional: true,
Computed: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"device_name": {
Type: schema.TypeString,
Required: !computed,
Computed: computed,
},
"virtual_name": {
Type: schema.TypeString,
Required: !computed,
Computed: computed,
},
},
},
Set: func(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["virtual_name"].(string)))
return hashcode.String(buf.String())
},
},
"tags": tagsSchema(),
// Not a public attribute; used to let the aws_ami_copy and aws_ami_from_instance
// resources record that they implicitly created new EBS snapshots that we should
// now manage. Not set by aws_ami, since the snapshots used there are presumed to
// be independently managed.
"manage_ebs_snapshots": {
Type: schema.TypeBool,
Computed: true,
ForceNew: true,
},
}
}

View File

@ -0,0 +1,90 @@
package aws
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsAmiCopy() *schema.Resource {
// Inherit all of the common AMI attributes from aws_ami, since we're
// implicitly creating an aws_ami resource.
resourceSchema := resourceAwsAmiCommonSchema(true)
// Additional attributes unique to the copy operation.
resourceSchema["source_ami_id"] = &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
}
resourceSchema["source_ami_region"] = &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
}
resourceSchema["encrypted"] = &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: false,
ForceNew: true,
}
resourceSchema["kms_key_id"] = &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validateArn,
}
return &schema.Resource{
Create: resourceAwsAmiCopyCreate,
Schema: resourceSchema,
// The remaining operations are shared with the generic aws_ami resource,
// since the aws_ami_copy resource only differs in how it's created.
Read: resourceAwsAmiRead,
Update: resourceAwsAmiUpdate,
Delete: resourceAwsAmiDelete,
}
}
func resourceAwsAmiCopyCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AWSClient).ec2conn
req := &ec2.CopyImageInput{
Name: aws.String(d.Get("name").(string)),
Description: aws.String(d.Get("description").(string)),
SourceImageId: aws.String(d.Get("source_ami_id").(string)),
SourceRegion: aws.String(d.Get("source_ami_region").(string)),
Encrypted: aws.Bool(d.Get("encrypted").(bool)),
}
if v, ok := d.GetOk("kms_key_id"); ok {
req.KmsKeyId = aws.String(v.(string))
}
res, err := client.CopyImage(req)
if err != nil {
return err
}
id := *res.ImageId
d.SetId(id)
d.Partial(true) // make sure we record the id even if the rest of this gets interrupted
d.Set("id", id)
d.Set("manage_ebs_snapshots", true)
d.SetPartial("id")
d.SetPartial("manage_ebs_snapshots")
d.Partial(false)
_, err = resourceAwsAmiWaitForAvailable(id, client)
if err != nil {
return err
}
return resourceAwsAmiUpdate(d, meta)
}

View File

@ -0,0 +1,70 @@
package aws
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsAmiFromInstance() *schema.Resource {
// Inherit all of the common AMI attributes from aws_ami, since we're
// implicitly creating an aws_ami resource.
resourceSchema := resourceAwsAmiCommonSchema(true)
// Additional attributes unique to the copy operation.
resourceSchema["source_instance_id"] = &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
}
resourceSchema["snapshot_without_reboot"] = &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
}
return &schema.Resource{
Create: resourceAwsAmiFromInstanceCreate,
Schema: resourceSchema,
// The remaining operations are shared with the generic aws_ami resource,
// since the aws_ami_copy resource only differs in how it's created.
Read: resourceAwsAmiRead,
Update: resourceAwsAmiUpdate,
Delete: resourceAwsAmiDelete,
}
}
func resourceAwsAmiFromInstanceCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AWSClient).ec2conn
req := &ec2.CreateImageInput{
Name: aws.String(d.Get("name").(string)),
Description: aws.String(d.Get("description").(string)),
InstanceId: aws.String(d.Get("source_instance_id").(string)),
NoReboot: aws.Bool(d.Get("snapshot_without_reboot").(bool)),
}
res, err := client.CreateImage(req)
if err != nil {
return err
}
id := *res.ImageId
d.SetId(id)
d.Partial(true) // make sure we record the id even if the rest of this gets interrupted
d.Set("id", id)
d.Set("manage_ebs_snapshots", true)
d.SetPartial("id")
d.SetPartial("manage_ebs_snapshots")
d.Partial(false)
_, err = resourceAwsAmiWaitForAvailable(id, client)
if err != nil {
return err
}
return resourceAwsAmiUpdate(d, meta)
}

View File

@ -0,0 +1,114 @@
package aws
import (
"fmt"
"log"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsAmiLaunchPermission() *schema.Resource {
return &schema.Resource{
Exists: resourceAwsAmiLaunchPermissionExists,
Create: resourceAwsAmiLaunchPermissionCreate,
Read: resourceAwsAmiLaunchPermissionRead,
Delete: resourceAwsAmiLaunchPermissionDelete,
Schema: map[string]*schema.Schema{
"image_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"account_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
},
}
}
func resourceAwsAmiLaunchPermissionExists(d *schema.ResourceData, meta interface{}) (bool, error) {
conn := meta.(*AWSClient).ec2conn
image_id := d.Get("image_id").(string)
account_id := d.Get("account_id").(string)
return hasLaunchPermission(conn, image_id, account_id)
}
func resourceAwsAmiLaunchPermissionCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
image_id := d.Get("image_id").(string)
account_id := d.Get("account_id").(string)
_, err := conn.ModifyImageAttribute(&ec2.ModifyImageAttributeInput{
ImageId: aws.String(image_id),
Attribute: aws.String("launchPermission"),
LaunchPermission: &ec2.LaunchPermissionModifications{
Add: []*ec2.LaunchPermission{
&ec2.LaunchPermission{UserId: aws.String(account_id)},
},
},
})
if err != nil {
return fmt.Errorf("error creating ami launch permission: %s", err)
}
d.SetId(fmt.Sprintf("%s-%s", image_id, account_id))
return nil
}
func resourceAwsAmiLaunchPermissionRead(d *schema.ResourceData, meta interface{}) error {
return nil
}
func resourceAwsAmiLaunchPermissionDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
image_id := d.Get("image_id").(string)
account_id := d.Get("account_id").(string)
_, err := conn.ModifyImageAttribute(&ec2.ModifyImageAttributeInput{
ImageId: aws.String(image_id),
Attribute: aws.String("launchPermission"),
LaunchPermission: &ec2.LaunchPermissionModifications{
Remove: []*ec2.LaunchPermission{
&ec2.LaunchPermission{UserId: aws.String(account_id)},
},
},
})
if err != nil {
return fmt.Errorf("error removing ami launch permission: %s", err)
}
return nil
}
func hasLaunchPermission(conn *ec2.EC2, image_id string, account_id string) (bool, error) {
attrs, err := conn.DescribeImageAttribute(&ec2.DescribeImageAttributeInput{
ImageId: aws.String(image_id),
Attribute: aws.String("launchPermission"),
})
if err != nil {
// When an AMI disappears out from under a launch permission resource, we will
// see either InvalidAMIID.NotFound or InvalidAMIID.Unavailable.
if ec2err, ok := err.(awserr.Error); ok && strings.HasPrefix(ec2err.Code(), "InvalidAMIID") {
log.Printf("[DEBUG] %s no longer exists, so we'll drop launch permission for %s from the state", image_id, account_id)
return false, nil
}
return false, err
}
for _, lp := range attrs.LaunchPermissions {
if *lp.UserId == account_id {
return true, nil
}
}
return false, nil
}

View File

@ -0,0 +1,127 @@
package aws
import (
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsApiGatewayAccount() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayAccountUpdate,
Read: resourceAwsApiGatewayAccountRead,
Update: resourceAwsApiGatewayAccountUpdate,
Delete: resourceAwsApiGatewayAccountDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"cloudwatch_role_arn": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"throttle_settings": &schema.Schema{
Type: schema.TypeList,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"burst_limit": &schema.Schema{
Type: schema.TypeInt,
Computed: true,
},
"rate_limit": &schema.Schema{
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
}
}
func resourceAwsApiGatewayAccountRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[INFO] Reading API Gateway Account %s", d.Id())
account, err := conn.GetAccount(&apigateway.GetAccountInput{})
if err != nil {
return err
}
log.Printf("[DEBUG] Received API Gateway Account: %s", account)
if _, ok := d.GetOk("cloudwatch_role_arn"); ok {
// CloudwatchRoleArn cannot be empty nor made empty via API
// This resource can however be useful w/out defining cloudwatch_role_arn
// (e.g. for referencing throttle_settings)
d.Set("cloudwatch_role_arn", account.CloudwatchRoleArn)
}
d.Set("throttle_settings", flattenApiGatewayThrottleSettings(account.ThrottleSettings))
return nil
}
func resourceAwsApiGatewayAccountUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
input := apigateway.UpdateAccountInput{}
operations := make([]*apigateway.PatchOperation, 0)
if d.HasChange("cloudwatch_role_arn") {
arn := d.Get("cloudwatch_role_arn").(string)
if len(arn) > 0 {
// Unfortunately AWS API doesn't allow empty ARNs,
// even though that's default settings for new AWS accounts
// BadRequestException: The role ARN is not well formed
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/cloudwatchRoleArn"),
Value: aws.String(arn),
})
}
}
input.PatchOperations = operations
log.Printf("[INFO] Updating API Gateway Account: %s", input)
// Retry due to eventual consistency of IAM
expectedErrMsg := "The role ARN does not have required permissions set to API Gateway"
otherErrMsg := "API Gateway could not successfully write to CloudWatch Logs using the ARN specified"
var out *apigateway.Account
var err error
err = resource.Retry(2*time.Minute, func() *resource.RetryError {
out, err = conn.UpdateAccount(&input)
if err != nil {
if isAWSErr(err, "BadRequestException", expectedErrMsg) ||
isAWSErr(err, "BadRequestException", otherErrMsg) {
log.Printf("[DEBUG] Retrying API Gateway Account update: %s", err)
return resource.RetryableError(err)
}
return resource.NonRetryableError(err)
}
return nil
})
if err != nil {
return fmt.Errorf("Updating API Gateway Account failed: %s", err)
}
log.Printf("[DEBUG] API Gateway Account updated: %s", out)
d.SetId("api-gateway-account")
return resourceAwsApiGatewayAccountRead(d, meta)
}
func resourceAwsApiGatewayAccountDelete(d *schema.ResourceData, meta interface{}) error {
// There is no API for "deleting" account or resetting it to "default" settings
d.SetId("")
return nil
}

View File

@ -0,0 +1,202 @@
package aws
import (
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsApiGatewayApiKey() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayApiKeyCreate,
Read: resourceAwsApiGatewayApiKeyRead,
Update: resourceAwsApiGatewayApiKeyUpdate,
Delete: resourceAwsApiGatewayApiKeyDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"description": {
Type: schema.TypeString,
Optional: true,
Default: "Managed by Terraform",
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Default: true,
},
"stage_key": {
Type: schema.TypeSet,
Optional: true,
Deprecated: "Since the API Gateway usage plans feature was launched on August 11, 2016, usage plans are now required to associate an API key with an API stage",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"rest_api_id": {
Type: schema.TypeString,
Required: true,
},
"stage_name": {
Type: schema.TypeString,
Required: true,
},
},
},
},
"created_date": {
Type: schema.TypeString,
Computed: true,
},
"last_updated_date": {
Type: schema.TypeString,
Computed: true,
},
"value": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
Sensitive: true,
ValidateFunc: validateApiGatewayApiKeyValue,
},
},
}
}
func resourceAwsApiGatewayApiKeyCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Creating API Gateway API Key")
apiKey, err := conn.CreateApiKey(&apigateway.CreateApiKeyInput{
Name: aws.String(d.Get("name").(string)),
Description: aws.String(d.Get("description").(string)),
Enabled: aws.Bool(d.Get("enabled").(bool)),
Value: aws.String(d.Get("value").(string)),
StageKeys: expandApiGatewayStageKeys(d),
})
if err != nil {
return fmt.Errorf("Error creating API Gateway: %s", err)
}
d.SetId(*apiKey.Id)
return resourceAwsApiGatewayApiKeyRead(d, meta)
}
func resourceAwsApiGatewayApiKeyRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Reading API Gateway API Key: %s", d.Id())
apiKey, err := conn.GetApiKey(&apigateway.GetApiKeyInput{
ApiKey: aws.String(d.Id()),
IncludeValue: aws.Bool(true),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
d.SetId("")
return nil
}
return err
}
d.Set("name", apiKey.Name)
d.Set("description", apiKey.Description)
d.Set("enabled", apiKey.Enabled)
d.Set("stage_key", flattenApiGatewayStageKeys(apiKey.StageKeys))
d.Set("value", apiKey.Value)
if err := d.Set("created_date", apiKey.CreatedDate.Format(time.RFC3339)); err != nil {
log.Printf("[DEBUG] Error setting created_date: %s", err)
}
if err := d.Set("last_updated_date", apiKey.LastUpdatedDate.Format(time.RFC3339)); err != nil {
log.Printf("[DEBUG] Error setting last_updated_date: %s", err)
}
return nil
}
func resourceAwsApiGatewayApiKeyUpdateOperations(d *schema.ResourceData) []*apigateway.PatchOperation {
operations := make([]*apigateway.PatchOperation, 0)
if d.HasChange("enabled") {
isEnabled := "false"
if d.Get("enabled").(bool) {
isEnabled = "true"
}
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/enabled"),
Value: aws.String(isEnabled),
})
}
if d.HasChange("description") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/description"),
Value: aws.String(d.Get("description").(string)),
})
}
if d.HasChange("stage_key") {
operations = append(operations, expandApiGatewayStageKeyOperations(d)...)
}
return operations
}
func resourceAwsApiGatewayApiKeyUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Updating API Gateway API Key: %s", d.Id())
_, err := conn.UpdateApiKey(&apigateway.UpdateApiKeyInput{
ApiKey: aws.String(d.Id()),
PatchOperations: resourceAwsApiGatewayApiKeyUpdateOperations(d),
})
if err != nil {
return err
}
return resourceAwsApiGatewayApiKeyRead(d, meta)
}
func resourceAwsApiGatewayApiKeyDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Deleting API Gateway API Key: %s", d.Id())
return resource.Retry(5*time.Minute, func() *resource.RetryError {
_, err := conn.DeleteApiKey(&apigateway.DeleteApiKeyInput{
ApiKey: aws.String(d.Id()),
})
if err == nil {
return nil
}
if apigatewayErr, ok := err.(awserr.Error); ok && apigatewayErr.Code() == "NotFoundException" {
return nil
}
return resource.NonRetryableError(err)
})
}

View File

@ -0,0 +1,212 @@
package aws
import (
"fmt"
"log"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsApiGatewayAuthorizer() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayAuthorizerCreate,
Read: resourceAwsApiGatewayAuthorizerRead,
Update: resourceAwsApiGatewayAuthorizerUpdate,
Delete: resourceAwsApiGatewayAuthorizerDelete,
Schema: map[string]*schema.Schema{
"authorizer_uri": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"identity_source": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: "method.request.header.Authorization",
},
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"rest_api_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: "TOKEN",
},
"authorizer_credentials": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"authorizer_result_ttl_in_seconds": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validateIntegerInRange(0, 3600),
},
"identity_validation_expression": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
},
}
}
func resourceAwsApiGatewayAuthorizerCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
input := apigateway.CreateAuthorizerInput{
AuthorizerUri: aws.String(d.Get("authorizer_uri").(string)),
IdentitySource: aws.String(d.Get("identity_source").(string)),
Name: aws.String(d.Get("name").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
Type: aws.String(d.Get("type").(string)),
}
if v, ok := d.GetOk("authorizer_credentials"); ok {
input.AuthorizerCredentials = aws.String(v.(string))
}
if v, ok := d.GetOk("authorizer_result_ttl_in_seconds"); ok {
input.AuthorizerResultTtlInSeconds = aws.Int64(int64(v.(int)))
}
if v, ok := d.GetOk("identity_validation_expression"); ok {
input.IdentityValidationExpression = aws.String(v.(string))
}
log.Printf("[INFO] Creating API Gateway Authorizer: %s", input)
out, err := conn.CreateAuthorizer(&input)
if err != nil {
return fmt.Errorf("Error creating API Gateway Authorizer: %s", err)
}
d.SetId(*out.Id)
return resourceAwsApiGatewayAuthorizerRead(d, meta)
}
func resourceAwsApiGatewayAuthorizerRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[INFO] Reading API Gateway Authorizer %s", d.Id())
input := apigateway.GetAuthorizerInput{
AuthorizerId: aws.String(d.Id()),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
}
authorizer, err := conn.GetAuthorizer(&input)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
log.Printf("[WARN] No API Gateway Authorizer found: %s", input)
d.SetId("")
return nil
}
return err
}
log.Printf("[DEBUG] Received API Gateway Authorizer: %s", authorizer)
d.Set("authorizer_credentials", authorizer.AuthorizerCredentials)
d.Set("authorizer_result_ttl_in_seconds", authorizer.AuthorizerResultTtlInSeconds)
d.Set("authorizer_uri", authorizer.AuthorizerUri)
d.Set("identity_source", authorizer.IdentitySource)
d.Set("identity_validation_expression", authorizer.IdentityValidationExpression)
d.Set("name", authorizer.Name)
d.Set("type", authorizer.Type)
return nil
}
func resourceAwsApiGatewayAuthorizerUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
input := apigateway.UpdateAuthorizerInput{
AuthorizerId: aws.String(d.Id()),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
}
operations := make([]*apigateway.PatchOperation, 0)
if d.HasChange("authorizer_uri") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/authorizerUri"),
Value: aws.String(d.Get("authorizer_uri").(string)),
})
}
if d.HasChange("identity_source") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/identitySource"),
Value: aws.String(d.Get("identity_source").(string)),
})
}
if d.HasChange("name") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/name"),
Value: aws.String(d.Get("name").(string)),
})
}
if d.HasChange("type") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/type"),
Value: aws.String(d.Get("type").(string)),
})
}
if d.HasChange("authorizer_credentials") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/authorizerCredentials"),
Value: aws.String(d.Get("authorizer_credentials").(string)),
})
}
if d.HasChange("authorizer_result_ttl_in_seconds") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/authorizerResultTtlInSeconds"),
Value: aws.String(fmt.Sprintf("%d", d.Get("authorizer_result_ttl_in_seconds").(int))),
})
}
if d.HasChange("identity_validation_expression") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/identityValidationExpression"),
Value: aws.String(d.Get("identity_validation_expression").(string)),
})
}
input.PatchOperations = operations
log.Printf("[INFO] Updating API Gateway Authorizer: %s", input)
_, err := conn.UpdateAuthorizer(&input)
if err != nil {
return fmt.Errorf("Updating API Gateway Authorizer failed: %s", err)
}
return resourceAwsApiGatewayAuthorizerRead(d, meta)
}
func resourceAwsApiGatewayAuthorizerDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
input := apigateway.DeleteAuthorizerInput{
AuthorizerId: aws.String(d.Id()),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
}
log.Printf("[INFO] Deleting API Gateway Authorizer: %s", input)
_, err := conn.DeleteAuthorizer(&input)
if err != nil {
// XXX: Figure out a way to delete the method that depends on the authorizer first
// otherwise the authorizer will be dangling until the API is deleted
if !strings.Contains(err.Error(), "ConflictException") {
return fmt.Errorf("Deleting API Gateway Authorizer failed: %s", err)
}
}
return nil
}

View File

@ -0,0 +1,146 @@
package aws
import (
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
const emptyBasePathMappingValue = "(none)"
func resourceAwsApiGatewayBasePathMapping() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayBasePathMappingCreate,
Read: resourceAwsApiGatewayBasePathMappingRead,
Delete: resourceAwsApiGatewayBasePathMappingDelete,
Schema: map[string]*schema.Schema{
"api_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"base_path": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"stage_name": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"domain_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
},
}
}
func resourceAwsApiGatewayBasePathMappingCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
err := resource.Retry(30*time.Second, func() *resource.RetryError {
_, err := conn.CreateBasePathMapping(&apigateway.CreateBasePathMappingInput{
RestApiId: aws.String(d.Get("api_id").(string)),
DomainName: aws.String(d.Get("domain_name").(string)),
BasePath: aws.String(d.Get("base_path").(string)),
Stage: aws.String(d.Get("stage_name").(string)),
})
if err != nil {
if err, ok := err.(awserr.Error); ok && err.Code() != "BadRequestException" {
return resource.NonRetryableError(err)
}
return resource.RetryableError(
fmt.Errorf("Error creating Gateway base path mapping: %s", err),
)
}
return nil
})
if err != nil {
return fmt.Errorf("Error creating Gateway base path mapping: %s", err)
}
id := fmt.Sprintf("%s/%s", d.Get("domain_name").(string), d.Get("base_path").(string))
d.SetId(id)
return resourceAwsApiGatewayBasePathMappingRead(d, meta)
}
func resourceAwsApiGatewayBasePathMappingRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
domainName := d.Get("domain_name").(string)
basePath := d.Get("base_path").(string)
if domainName == "" {
return nil
}
if basePath == "" {
basePath = emptyBasePathMappingValue
}
mapping, err := conn.GetBasePathMapping(&apigateway.GetBasePathMappingInput{
DomainName: aws.String(domainName),
BasePath: aws.String(basePath),
})
if err != nil {
if err, ok := err.(awserr.Error); ok && err.Code() == "NotFoundException" {
log.Printf("[WARN] API gateway base path mapping %s has vanished\n", d.Id())
d.SetId("")
return nil
}
return fmt.Errorf("Error reading Gateway base path mapping: %s", err)
}
mappingBasePath := *mapping.BasePath
if mappingBasePath == emptyBasePathMappingValue {
mappingBasePath = ""
}
d.Set("base_path", mappingBasePath)
d.Set("api_id", mapping.RestApiId)
d.Set("stage_name", mapping.Stage)
return nil
}
func resourceAwsApiGatewayBasePathMappingDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
basePath := d.Get("base_path").(string)
if basePath == "" {
basePath = emptyBasePathMappingValue
}
_, err := conn.DeleteBasePathMapping(&apigateway.DeleteBasePathMappingInput{
DomainName: aws.String(d.Get("domain_name").(string)),
BasePath: aws.String(basePath),
})
if err != nil {
if err, ok := err.(awserr.Error); ok && err.Code() == "NotFoundException" {
return nil
}
return err
}
return nil
}

View File

@ -0,0 +1,125 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsApiGatewayClientCertificate() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayClientCertificateCreate,
Read: resourceAwsApiGatewayClientCertificateRead,
Update: resourceAwsApiGatewayClientCertificateUpdate,
Delete: resourceAwsApiGatewayClientCertificateDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"description": {
Type: schema.TypeString,
Optional: true,
},
"created_date": {
Type: schema.TypeString,
Computed: true,
},
"expiration_date": {
Type: schema.TypeString,
Computed: true,
},
"pem_encoded_certificate": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceAwsApiGatewayClientCertificateCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
input := apigateway.GenerateClientCertificateInput{}
if v, ok := d.GetOk("description"); ok {
input.Description = aws.String(v.(string))
}
log.Printf("[DEBUG] Generating API Gateway Client Certificate: %s", input)
out, err := conn.GenerateClientCertificate(&input)
if err != nil {
return fmt.Errorf("Failed to generate client certificate: %s", err)
}
d.SetId(*out.ClientCertificateId)
return resourceAwsApiGatewayClientCertificateRead(d, meta)
}
func resourceAwsApiGatewayClientCertificateRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
input := apigateway.GetClientCertificateInput{
ClientCertificateId: aws.String(d.Id()),
}
out, err := conn.GetClientCertificate(&input)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
log.Printf("[WARN] API Gateway Client Certificate %s not found, removing", d.Id())
d.SetId("")
return nil
}
return err
}
log.Printf("[DEBUG] Received API Gateway Client Certificate: %s", out)
d.Set("description", out.Description)
d.Set("created_date", out.CreatedDate)
d.Set("expiration_date", out.ExpirationDate)
d.Set("pem_encoded_certificate", out.PemEncodedCertificate)
return nil
}
func resourceAwsApiGatewayClientCertificateUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
operations := make([]*apigateway.PatchOperation, 0)
if d.HasChange("description") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/description"),
Value: aws.String(d.Get("description").(string)),
})
}
input := apigateway.UpdateClientCertificateInput{
ClientCertificateId: aws.String(d.Id()),
PatchOperations: operations,
}
log.Printf("[DEBUG] Updating API Gateway Client Certificate: %s", input)
_, err := conn.UpdateClientCertificate(&input)
if err != nil {
return fmt.Errorf("Updating API Gateway Client Certificate failed: %s", err)
}
return resourceAwsApiGatewayClientCertificateRead(d, meta)
}
func resourceAwsApiGatewayClientCertificateDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Deleting API Gateway Client Certificate: %s", d.Id())
input := apigateway.DeleteClientCertificateInput{
ClientCertificateId: aws.String(d.Id()),
}
_, err := conn.DeleteClientCertificate(&input)
if err != nil {
return fmt.Errorf("Deleting API Gateway Client Certificate failed: %s", err)
}
return nil
}

View File

@ -0,0 +1,200 @@
package aws
import (
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsApiGatewayDeployment() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayDeploymentCreate,
Read: resourceAwsApiGatewayDeploymentRead,
Update: resourceAwsApiGatewayDeploymentUpdate,
Delete: resourceAwsApiGatewayDeploymentDelete,
Schema: map[string]*schema.Schema{
"rest_api_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"stage_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"description": {
Type: schema.TypeString,
Optional: true,
},
"stage_description": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"variables": {
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
Elem: schema.TypeString,
},
"created_date": {
Type: schema.TypeString,
Computed: true,
},
"invoke_url": {
Type: schema.TypeString,
Computed: true,
},
"execution_arn": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceAwsApiGatewayDeploymentCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
// Create the gateway
log.Printf("[DEBUG] Creating API Gateway Deployment")
variables := make(map[string]string)
for k, v := range d.Get("variables").(map[string]interface{}) {
variables[k] = v.(string)
}
var err error
deployment, err := conn.CreateDeployment(&apigateway.CreateDeploymentInput{
RestApiId: aws.String(d.Get("rest_api_id").(string)),
StageName: aws.String(d.Get("stage_name").(string)),
Description: aws.String(d.Get("description").(string)),
StageDescription: aws.String(d.Get("stage_description").(string)),
Variables: aws.StringMap(variables),
})
if err != nil {
return fmt.Errorf("Error creating API Gateway Deployment: %s", err)
}
d.SetId(*deployment.Id)
log.Printf("[DEBUG] API Gateway Deployment ID: %s", d.Id())
return resourceAwsApiGatewayDeploymentRead(d, meta)
}
func resourceAwsApiGatewayDeploymentRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Reading API Gateway Deployment %s", d.Id())
restApiId := d.Get("rest_api_id").(string)
out, err := conn.GetDeployment(&apigateway.GetDeploymentInput{
RestApiId: aws.String(restApiId),
DeploymentId: aws.String(d.Id()),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
d.SetId("")
return nil
}
return err
}
log.Printf("[DEBUG] Received API Gateway Deployment: %s", out)
d.Set("description", out.Description)
region := meta.(*AWSClient).region
stageName := d.Get("stage_name").(string)
d.Set("invoke_url", buildApiGatewayInvokeURL(restApiId, region, stageName))
accountId := meta.(*AWSClient).accountid
arn, err := buildApiGatewayExecutionARN(restApiId, region, accountId)
if err != nil {
return err
}
d.Set("execution_arn", arn+"/"+stageName)
if err := d.Set("created_date", out.CreatedDate.Format(time.RFC3339)); err != nil {
log.Printf("[DEBUG] Error setting created_date: %s", err)
}
return nil
}
func resourceAwsApiGatewayDeploymentUpdateOperations(d *schema.ResourceData) []*apigateway.PatchOperation {
operations := make([]*apigateway.PatchOperation, 0)
if d.HasChange("description") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/description"),
Value: aws.String(d.Get("description").(string)),
})
}
return operations
}
func resourceAwsApiGatewayDeploymentUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Updating API Gateway API Key: %s", d.Id())
_, err := conn.UpdateDeployment(&apigateway.UpdateDeploymentInput{
DeploymentId: aws.String(d.Id()),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
PatchOperations: resourceAwsApiGatewayDeploymentUpdateOperations(d),
})
if err != nil {
return err
}
return resourceAwsApiGatewayDeploymentRead(d, meta)
}
func resourceAwsApiGatewayDeploymentDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Deleting API Gateway Deployment: %s", d.Id())
return resource.Retry(5*time.Minute, func() *resource.RetryError {
log.Printf("[DEBUG] schema is %#v", d)
if _, err := conn.DeleteStage(&apigateway.DeleteStageInput{
StageName: aws.String(d.Get("stage_name").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
}); err == nil {
return nil
}
_, err := conn.DeleteDeployment(&apigateway.DeleteDeploymentInput{
DeploymentId: aws.String(d.Id()),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
})
if err == nil {
return nil
}
apigatewayErr, ok := err.(awserr.Error)
if apigatewayErr.Code() == "NotFoundException" {
return nil
}
if !ok {
return resource.NonRetryableError(err)
}
return resource.NonRetryableError(err)
})
}

View File

@ -0,0 +1,210 @@
package aws
import (
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsApiGatewayDomainName() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayDomainNameCreate,
Read: resourceAwsApiGatewayDomainNameRead,
Update: resourceAwsApiGatewayDomainNameUpdate,
Delete: resourceAwsApiGatewayDomainNameDelete,
Schema: map[string]*schema.Schema{
//According to AWS Documentation, ACM will be the only way to add certificates
//to ApiGateway DomainNames. When this happens, we will be deprecating all certificate methods
//except certificate_arn. We are not quite sure when this will happen.
"certificate_body": {
Type: schema.TypeString,
ForceNew: true,
Optional: true,
ConflictsWith: []string{"certificate_arn"},
},
"certificate_chain": {
Type: schema.TypeString,
ForceNew: true,
Optional: true,
ConflictsWith: []string{"certificate_arn"},
},
"certificate_name": {
Type: schema.TypeString,
Optional: true,
ConflictsWith: []string{"certificate_arn"},
},
"certificate_private_key": {
Type: schema.TypeString,
ForceNew: true,
Optional: true,
Sensitive: true,
ConflictsWith: []string{"certificate_arn"},
},
"domain_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"certificate_arn": {
Type: schema.TypeString,
Optional: true,
ConflictsWith: []string{"certificate_body", "certificate_chain", "certificate_name", "certificate_private_key"},
},
"cloudfront_domain_name": {
Type: schema.TypeString,
Computed: true,
},
"certificate_upload_date": {
Type: schema.TypeString,
Computed: true,
},
"cloudfront_zone_id": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceAwsApiGatewayDomainNameCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Creating API Gateway Domain Name")
params := &apigateway.CreateDomainNameInput{
DomainName: aws.String(d.Get("domain_name").(string)),
}
if v, ok := d.GetOk("certificate_arn"); ok {
params.CertificateArn = aws.String(v.(string))
}
if v, ok := d.GetOk("certificate_name"); ok {
params.CertificateName = aws.String(v.(string))
}
if v, ok := d.GetOk("certificate_body"); ok {
params.CertificateBody = aws.String(v.(string))
}
if v, ok := d.GetOk("certificate_chain"); ok {
params.CertificateChain = aws.String(v.(string))
}
if v, ok := d.GetOk("certificate_private_key"); ok {
params.CertificatePrivateKey = aws.String(v.(string))
}
domainName, err := conn.CreateDomainName(params)
if err != nil {
return fmt.Errorf("Error creating API Gateway Domain Name: %s", err)
}
d.SetId(*domainName.DomainName)
d.Set("cloudfront_domain_name", domainName.DistributionDomainName)
d.Set("cloudfront_zone_id", cloudFrontRoute53ZoneID)
return resourceAwsApiGatewayDomainNameRead(d, meta)
}
func resourceAwsApiGatewayDomainNameRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Reading API Gateway Domain Name %s", d.Id())
domainName, err := conn.GetDomainName(&apigateway.GetDomainNameInput{
DomainName: aws.String(d.Id()),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
log.Printf("[WARN] API gateway domain name %s has vanished\n", d.Id())
d.SetId("")
return nil
}
return err
}
d.Set("certificate_name", domainName.CertificateName)
if err := d.Set("certificate_upload_date", domainName.CertificateUploadDate.Format(time.RFC3339)); err != nil {
log.Printf("[DEBUG] Error setting certificate_upload_date: %s", err)
}
d.Set("cloudfront_domain_name", domainName.DistributionDomainName)
d.Set("domain_name", domainName.DomainName)
d.Set("certificate_arn", domainName.CertificateArn)
return nil
}
func resourceAwsApiGatewayDomainNameUpdateOperations(d *schema.ResourceData) []*apigateway.PatchOperation {
operations := make([]*apigateway.PatchOperation, 0)
if d.HasChange("certificate_name") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/certificateName"),
Value: aws.String(d.Get("certificate_name").(string)),
})
}
if d.HasChange("certificate_arn") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/certificateArn"),
Value: aws.String(d.Get("certificate_arn").(string)),
})
}
return operations
}
func resourceAwsApiGatewayDomainNameUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Updating API Gateway Domain Name %s", d.Id())
_, err := conn.UpdateDomainName(&apigateway.UpdateDomainNameInput{
DomainName: aws.String(d.Id()),
PatchOperations: resourceAwsApiGatewayDomainNameUpdateOperations(d),
})
if err != nil {
return err
}
return resourceAwsApiGatewayDomainNameRead(d, meta)
}
func resourceAwsApiGatewayDomainNameDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Deleting API Gateway Domain Name: %s", d.Id())
return resource.Retry(5*time.Minute, func() *resource.RetryError {
_, err := conn.DeleteDomainName(&apigateway.DeleteDomainNameInput{
DomainName: aws.String(d.Id()),
})
if err == nil {
return nil
}
if apigatewayErr, ok := err.(awserr.Error); ok && apigatewayErr.Code() == "NotFoundException" {
return nil
}
return resource.NonRetryableError(err)
})
}

View File

@ -0,0 +1,348 @@
package aws
import (
"encoding/json"
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"strings"
)
func resourceAwsApiGatewayIntegration() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayIntegrationCreate,
Read: resourceAwsApiGatewayIntegrationRead,
Update: resourceAwsApiGatewayIntegrationUpdate,
Delete: resourceAwsApiGatewayIntegrationDelete,
Schema: map[string]*schema.Schema{
"rest_api_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"resource_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"http_method": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateHTTPMethod,
},
"type": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateApiGatewayIntegrationType,
},
"uri": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"credentials": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"integration_http_method": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validateHTTPMethod,
},
"request_templates": {
Type: schema.TypeMap,
Optional: true,
Elem: schema.TypeString,
},
"request_parameters": {
Type: schema.TypeMap,
Elem: schema.TypeString,
Optional: true,
ConflictsWith: []string{"request_parameters_in_json"},
},
"request_parameters_in_json": {
Type: schema.TypeString,
Optional: true,
ConflictsWith: []string{"request_parameters"},
Deprecated: "Use field request_parameters instead",
},
"content_handling": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validateApiGatewayIntegrationContentHandling,
},
"passthrough_behavior": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validateApiGatewayIntegrationPassthroughBehavior,
},
},
}
}
func resourceAwsApiGatewayIntegrationCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Print("[DEBUG] Creating API Gateway Integration")
var integrationHttpMethod *string
if v, ok := d.GetOk("integration_http_method"); ok {
integrationHttpMethod = aws.String(v.(string))
}
var uri *string
if v, ok := d.GetOk("uri"); ok {
uri = aws.String(v.(string))
}
templates := make(map[string]string)
for k, v := range d.Get("request_templates").(map[string]interface{}) {
templates[k] = v.(string)
}
parameters := make(map[string]string)
if kv, ok := d.GetOk("request_parameters"); ok {
for k, v := range kv.(map[string]interface{}) {
parameters[k] = v.(string)
}
}
if v, ok := d.GetOk("request_parameters_in_json"); ok {
if err := json.Unmarshal([]byte(v.(string)), &parameters); err != nil {
return fmt.Errorf("Error unmarshaling request_parameters_in_json: %s", err)
}
}
var passthroughBehavior *string
if v, ok := d.GetOk("passthrough_behavior"); ok {
passthroughBehavior = aws.String(v.(string))
}
var credentials *string
if val, ok := d.GetOk("credentials"); ok {
credentials = aws.String(val.(string))
}
var contentHandling *string
if val, ok := d.GetOk("content_handling"); ok {
contentHandling = aws.String(val.(string))
}
_, err := conn.PutIntegration(&apigateway.PutIntegrationInput{
HttpMethod: aws.String(d.Get("http_method").(string)),
ResourceId: aws.String(d.Get("resource_id").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
Type: aws.String(d.Get("type").(string)),
IntegrationHttpMethod: integrationHttpMethod,
Uri: uri,
RequestParameters: aws.StringMap(parameters),
RequestTemplates: aws.StringMap(templates),
Credentials: credentials,
CacheNamespace: nil,
CacheKeyParameters: nil,
PassthroughBehavior: passthroughBehavior,
ContentHandling: contentHandling,
})
if err != nil {
return fmt.Errorf("Error creating API Gateway Integration: %s", err)
}
d.SetId(fmt.Sprintf("agi-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string)))
return resourceAwsApiGatewayIntegrationRead(d, meta)
}
func resourceAwsApiGatewayIntegrationRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Reading API Gateway Integration: %s", d.Id())
integration, err := conn.GetIntegration(&apigateway.GetIntegrationInput{
HttpMethod: aws.String(d.Get("http_method").(string)),
ResourceId: aws.String(d.Get("resource_id").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
d.SetId("")
return nil
}
return err
}
log.Printf("[DEBUG] Received API Gateway Integration: %s", integration)
d.SetId(fmt.Sprintf("agi-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string)))
// AWS converts "" to null on their side, convert it back
if v, ok := integration.RequestTemplates["application/json"]; ok && v == nil {
integration.RequestTemplates["application/json"] = aws.String("")
}
d.Set("request_templates", aws.StringValueMap(integration.RequestTemplates))
d.Set("type", integration.Type)
d.Set("request_parameters", aws.StringValueMap(integration.RequestParameters))
d.Set("request_parameters_in_json", aws.StringValueMap(integration.RequestParameters))
d.Set("passthrough_behavior", integration.PassthroughBehavior)
if integration.Uri != nil {
d.Set("uri", integration.Uri)
}
if integration.Credentials != nil {
d.Set("credentials", integration.Credentials)
}
if integration.ContentHandling != nil {
d.Set("content_handling", integration.ContentHandling)
}
return nil
}
func resourceAwsApiGatewayIntegrationUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Updating API Gateway Integration: %s", d.Id())
operations := make([]*apigateway.PatchOperation, 0)
// https://docs.aws.amazon.com/apigateway/api-reference/link-relation/integration-update/#remarks
// According to the above documentation, only a few parts are addable / removable.
if d.HasChange("request_templates") {
o, n := d.GetChange("request_templates")
prefix := "requestTemplates"
os := o.(map[string]interface{})
ns := n.(map[string]interface{})
// Handle Removal
for k := range os {
if _, ok := ns[k]; !ok {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("remove"),
Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))),
})
}
}
for k, v := range ns {
// Handle replaces
if _, ok := os[k]; ok {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))),
Value: aws.String(v.(string)),
})
}
// Handle additions
if _, ok := os[k]; !ok {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("add"),
Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))),
Value: aws.String(v.(string)),
})
}
}
}
if d.HasChange("request_parameters") {
o, n := d.GetChange("request_parameters")
prefix := "requestParameters"
os := o.(map[string]interface{})
ns := n.(map[string]interface{})
// Handle Removal
for k := range os {
if _, ok := ns[k]; !ok {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("remove"),
Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))),
})
}
}
for k, v := range ns {
// Handle replaces
if _, ok := os[k]; ok {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))),
Value: aws.String(v.(string)),
})
}
// Handle additions
if _, ok := os[k]; !ok {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("add"),
Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))),
Value: aws.String(v.(string)),
})
}
}
}
params := &apigateway.UpdateIntegrationInput{
HttpMethod: aws.String(d.Get("http_method").(string)),
ResourceId: aws.String(d.Get("resource_id").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
PatchOperations: operations,
}
_, err := conn.UpdateIntegration(params)
if err != nil {
return fmt.Errorf("Error updating API Gateway Integration: %s", err)
}
d.SetId(fmt.Sprintf("agi-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string)))
return resourceAwsApiGatewayIntegrationRead(d, meta)
}
func resourceAwsApiGatewayIntegrationDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Deleting API Gateway Integration: %s", d.Id())
return resource.Retry(5*time.Minute, func() *resource.RetryError {
_, err := conn.DeleteIntegration(&apigateway.DeleteIntegrationInput{
HttpMethod: aws.String(d.Get("http_method").(string)),
ResourceId: aws.String(d.Get("resource_id").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
})
if err == nil {
return nil
}
apigatewayErr, ok := err.(awserr.Error)
if apigatewayErr.Code() == "NotFoundException" {
return nil
}
if !ok {
return resource.NonRetryableError(err)
}
return resource.NonRetryableError(err)
})
}

View File

@ -0,0 +1,184 @@
package aws
import (
"encoding/json"
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsApiGatewayIntegrationResponse() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayIntegrationResponseCreate,
Read: resourceAwsApiGatewayIntegrationResponseRead,
Update: resourceAwsApiGatewayIntegrationResponseCreate,
Delete: resourceAwsApiGatewayIntegrationResponseDelete,
Schema: map[string]*schema.Schema{
"rest_api_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"resource_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"http_method": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateHTTPMethod,
},
"status_code": {
Type: schema.TypeString,
Required: true,
},
"selection_pattern": {
Type: schema.TypeString,
Optional: true,
},
"response_templates": {
Type: schema.TypeMap,
Optional: true,
Elem: schema.TypeString,
},
"response_parameters": {
Type: schema.TypeMap,
Elem: schema.TypeString,
Optional: true,
ConflictsWith: []string{"response_parameters_in_json"},
},
"response_parameters_in_json": {
Type: schema.TypeString,
Optional: true,
ConflictsWith: []string{"response_parameters"},
Deprecated: "Use field response_parameters instead",
},
"content_handling": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateApiGatewayIntegrationContentHandling,
},
},
}
}
func resourceAwsApiGatewayIntegrationResponseCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
templates := make(map[string]string)
for k, v := range d.Get("response_templates").(map[string]interface{}) {
templates[k] = v.(string)
}
parameters := make(map[string]string)
if kv, ok := d.GetOk("response_parameters"); ok {
for k, v := range kv.(map[string]interface{}) {
parameters[k] = v.(string)
}
}
if v, ok := d.GetOk("response_parameters_in_json"); ok {
if err := json.Unmarshal([]byte(v.(string)), &parameters); err != nil {
return fmt.Errorf("Error unmarshaling response_parameters_in_json: %s", err)
}
}
var contentHandling *string
if val, ok := d.GetOk("content_handling"); ok {
contentHandling = aws.String(val.(string))
}
input := apigateway.PutIntegrationResponseInput{
HttpMethod: aws.String(d.Get("http_method").(string)),
ResourceId: aws.String(d.Get("resource_id").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
StatusCode: aws.String(d.Get("status_code").(string)),
ResponseTemplates: aws.StringMap(templates),
ResponseParameters: aws.StringMap(parameters),
ContentHandling: contentHandling,
}
if v, ok := d.GetOk("selection_pattern"); ok {
input.SelectionPattern = aws.String(v.(string))
}
_, err := conn.PutIntegrationResponse(&input)
if err != nil {
return fmt.Errorf("Error creating API Gateway Integration Response: %s", err)
}
d.SetId(fmt.Sprintf("agir-%s-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string), d.Get("status_code").(string)))
log.Printf("[DEBUG] API Gateway Integration Response ID: %s", d.Id())
return resourceAwsApiGatewayIntegrationResponseRead(d, meta)
}
func resourceAwsApiGatewayIntegrationResponseRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Reading API Gateway Integration Response %s", d.Id())
integrationResponse, err := conn.GetIntegrationResponse(&apigateway.GetIntegrationResponseInput{
HttpMethod: aws.String(d.Get("http_method").(string)),
ResourceId: aws.String(d.Get("resource_id").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
StatusCode: aws.String(d.Get("status_code").(string)),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
d.SetId("")
return nil
}
return err
}
log.Printf("[DEBUG] Received API Gateway Integration Response: %s", integrationResponse)
d.SetId(fmt.Sprintf("agir-%s-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string), d.Get("status_code").(string)))
d.Set("response_templates", integrationResponse.ResponseTemplates)
d.Set("selection_pattern", integrationResponse.SelectionPattern)
d.Set("response_parameters", aws.StringValueMap(integrationResponse.ResponseParameters))
d.Set("response_parameters_in_json", aws.StringValueMap(integrationResponse.ResponseParameters))
return nil
}
func resourceAwsApiGatewayIntegrationResponseDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Deleting API Gateway Integration Response: %s", d.Id())
return resource.Retry(5*time.Minute, func() *resource.RetryError {
_, err := conn.DeleteIntegrationResponse(&apigateway.DeleteIntegrationResponseInput{
HttpMethod: aws.String(d.Get("http_method").(string)),
ResourceId: aws.String(d.Get("resource_id").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
StatusCode: aws.String(d.Get("status_code").(string)),
})
if err == nil {
return nil
}
apigatewayErr, ok := err.(awserr.Error)
if apigatewayErr.Code() == "NotFoundException" {
return nil
}
if !ok {
return resource.NonRetryableError(err)
}
return resource.NonRetryableError(err)
})
}

View File

@ -0,0 +1,270 @@
package aws
import (
"encoding/json"
"fmt"
"log"
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsApiGatewayMethod() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayMethodCreate,
Read: resourceAwsApiGatewayMethodRead,
Update: resourceAwsApiGatewayMethodUpdate,
Delete: resourceAwsApiGatewayMethodDelete,
Schema: map[string]*schema.Schema{
"rest_api_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"resource_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"http_method": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateHTTPMethod,
},
"authorization": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"authorizer_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"api_key_required": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"request_models": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
Elem: schema.TypeString,
},
"request_parameters": &schema.Schema{
Type: schema.TypeMap,
Elem: schema.TypeBool,
Optional: true,
ConflictsWith: []string{"request_parameters_in_json"},
},
"request_parameters_in_json": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ConflictsWith: []string{"request_parameters"},
Deprecated: "Use field request_parameters instead",
},
},
}
}
func resourceAwsApiGatewayMethodCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
input := apigateway.PutMethodInput{
AuthorizationType: aws.String(d.Get("authorization").(string)),
HttpMethod: aws.String(d.Get("http_method").(string)),
ResourceId: aws.String(d.Get("resource_id").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
ApiKeyRequired: aws.Bool(d.Get("api_key_required").(bool)),
}
models := make(map[string]string)
for k, v := range d.Get("request_models").(map[string]interface{}) {
models[k] = v.(string)
}
if len(models) > 0 {
input.RequestModels = aws.StringMap(models)
}
parameters := make(map[string]bool)
if kv, ok := d.GetOk("request_parameters"); ok {
for k, v := range kv.(map[string]interface{}) {
parameters[k], ok = v.(bool)
if !ok {
value, _ := strconv.ParseBool(v.(string))
parameters[k] = value
}
}
input.RequestParameters = aws.BoolMap(parameters)
}
if v, ok := d.GetOk("request_parameters_in_json"); ok {
if err := json.Unmarshal([]byte(v.(string)), &parameters); err != nil {
return fmt.Errorf("Error unmarshaling request_parameters_in_json: %s", err)
}
input.RequestParameters = aws.BoolMap(parameters)
}
if v, ok := d.GetOk("authorizer_id"); ok {
input.AuthorizerId = aws.String(v.(string))
}
_, err := conn.PutMethod(&input)
if err != nil {
return fmt.Errorf("Error creating API Gateway Method: %s", err)
}
d.SetId(fmt.Sprintf("agm-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string)))
log.Printf("[DEBUG] API Gateway Method ID: %s", d.Id())
return nil
}
func resourceAwsApiGatewayMethodRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Reading API Gateway Method %s", d.Id())
out, err := conn.GetMethod(&apigateway.GetMethodInput{
HttpMethod: aws.String(d.Get("http_method").(string)),
ResourceId: aws.String(d.Get("resource_id").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
d.SetId("")
return nil
}
return err
}
log.Printf("[DEBUG] Received API Gateway Method: %s", out)
d.SetId(fmt.Sprintf("agm-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string)))
d.Set("request_parameters", aws.BoolValueMap(out.RequestParameters))
d.Set("request_parameters_in_json", aws.BoolValueMap(out.RequestParameters))
d.Set("api_key_required", out.ApiKeyRequired)
d.Set("authorization_type", out.AuthorizationType)
d.Set("authorizer_id", out.AuthorizerId)
d.Set("request_models", aws.StringValueMap(out.RequestModels))
return nil
}
func resourceAwsApiGatewayMethodUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Reading API Gateway Method %s", d.Id())
operations := make([]*apigateway.PatchOperation, 0)
if d.HasChange("resource_id") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/resourceId"),
Value: aws.String(d.Get("resource_id").(string)),
})
}
if d.HasChange("request_models") {
operations = append(operations, expandApiGatewayRequestResponseModelOperations(d, "request_models", "requestModels")...)
}
if d.HasChange("request_parameters_in_json") {
ops, err := deprecatedExpandApiGatewayMethodParametersJSONOperations(d, "request_parameters_in_json", "requestParameters")
if err != nil {
return err
}
operations = append(operations, ops...)
}
if d.HasChange("request_parameters") {
parameters := make(map[string]bool)
var ok bool
for k, v := range d.Get("request_parameters").(map[string]interface{}) {
parameters[k], ok = v.(bool)
if !ok {
value, _ := strconv.ParseBool(v.(string))
parameters[k] = value
}
}
ops, err := expandApiGatewayMethodParametersOperations(d, "request_parameters", "requestParameters")
if err != nil {
return err
}
operations = append(operations, ops...)
}
if d.HasChange("authorization") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/authorizationType"),
Value: aws.String(d.Get("authorization").(string)),
})
}
if d.HasChange("authorizer_id") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/authorizerId"),
Value: aws.String(d.Get("authorizer_id").(string)),
})
}
if d.HasChange("api_key_required") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/apiKeyRequired"),
Value: aws.String(fmt.Sprintf("%t", d.Get("api_key_required").(bool))),
})
}
method, err := conn.UpdateMethod(&apigateway.UpdateMethodInput{
HttpMethod: aws.String(d.Get("http_method").(string)),
ResourceId: aws.String(d.Get("resource_id").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
PatchOperations: operations,
})
if err != nil {
return err
}
log.Printf("[DEBUG] Received API Gateway Method: %s", method)
return resourceAwsApiGatewayMethodRead(d, meta)
}
func resourceAwsApiGatewayMethodDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Deleting API Gateway Method: %s", d.Id())
return resource.Retry(5*time.Minute, func() *resource.RetryError {
_, err := conn.DeleteMethod(&apigateway.DeleteMethodInput{
HttpMethod: aws.String(d.Get("http_method").(string)),
ResourceId: aws.String(d.Get("resource_id").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
})
if err == nil {
return nil
}
apigatewayErr, ok := err.(awserr.Error)
if apigatewayErr.Code() == "NotFoundException" {
return nil
}
if !ok {
return resource.NonRetryableError(err)
}
return resource.NonRetryableError(err)
})
}

View File

@ -0,0 +1,210 @@
package aws
import (
"encoding/json"
"fmt"
"log"
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsApiGatewayMethodResponse() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayMethodResponseCreate,
Read: resourceAwsApiGatewayMethodResponseRead,
Update: resourceAwsApiGatewayMethodResponseUpdate,
Delete: resourceAwsApiGatewayMethodResponseDelete,
Schema: map[string]*schema.Schema{
"rest_api_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"resource_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"http_method": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateHTTPMethod,
},
"status_code": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"response_models": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
Elem: schema.TypeString,
},
"response_parameters": &schema.Schema{
Type: schema.TypeMap,
Elem: schema.TypeBool,
Optional: true,
ConflictsWith: []string{"response_parameters_in_json"},
},
"response_parameters_in_json": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ConflictsWith: []string{"response_parameters"},
Deprecated: "Use field response_parameters instead",
},
},
}
}
func resourceAwsApiGatewayMethodResponseCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
models := make(map[string]string)
for k, v := range d.Get("response_models").(map[string]interface{}) {
models[k] = v.(string)
}
parameters := make(map[string]bool)
if kv, ok := d.GetOk("response_parameters"); ok {
for k, v := range kv.(map[string]interface{}) {
parameters[k], ok = v.(bool)
if !ok {
value, _ := strconv.ParseBool(v.(string))
parameters[k] = value
}
}
}
if v, ok := d.GetOk("response_parameters_in_json"); ok {
if err := json.Unmarshal([]byte(v.(string)), &parameters); err != nil {
return fmt.Errorf("Error unmarshaling request_parameters_in_json: %s", err)
}
}
_, err := conn.PutMethodResponse(&apigateway.PutMethodResponseInput{
HttpMethod: aws.String(d.Get("http_method").(string)),
ResourceId: aws.String(d.Get("resource_id").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
StatusCode: aws.String(d.Get("status_code").(string)),
ResponseModels: aws.StringMap(models),
ResponseParameters: aws.BoolMap(parameters),
})
if err != nil {
return fmt.Errorf("Error creating API Gateway Method Response: %s", err)
}
d.SetId(fmt.Sprintf("agmr-%s-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string), d.Get("status_code").(string)))
log.Printf("[DEBUG] API Gateway Method ID: %s", d.Id())
return nil
}
func resourceAwsApiGatewayMethodResponseRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Reading API Gateway Method %s", d.Id())
methodResponse, err := conn.GetMethodResponse(&apigateway.GetMethodResponseInput{
HttpMethod: aws.String(d.Get("http_method").(string)),
ResourceId: aws.String(d.Get("resource_id").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
StatusCode: aws.String(d.Get("status_code").(string)),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
d.SetId("")
return nil
}
return err
}
log.Printf("[DEBUG] Received API Gateway Method: %s", methodResponse)
d.Set("response_models", aws.StringValueMap(methodResponse.ResponseModels))
d.Set("response_parameters", aws.BoolValueMap(methodResponse.ResponseParameters))
d.Set("response_parameters_in_json", aws.BoolValueMap(methodResponse.ResponseParameters))
d.SetId(fmt.Sprintf("agmr-%s-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string), d.Get("status_code").(string)))
return nil
}
func resourceAwsApiGatewayMethodResponseUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Updating API Gateway Method Response %s", d.Id())
operations := make([]*apigateway.PatchOperation, 0)
if d.HasChange("response_models") {
operations = append(operations, expandApiGatewayRequestResponseModelOperations(d, "response_models", "responseModels")...)
}
if d.HasChange("response_parameters_in_json") {
ops, err := deprecatedExpandApiGatewayMethodParametersJSONOperations(d, "response_parameters_in_json", "responseParameters")
if err != nil {
return err
}
operations = append(operations, ops...)
}
if d.HasChange("response_parameters") {
ops, err := expandApiGatewayMethodParametersOperations(d, "response_parameters", "responseParameters")
if err != nil {
return err
}
operations = append(operations, ops...)
}
out, err := conn.UpdateMethodResponse(&apigateway.UpdateMethodResponseInput{
HttpMethod: aws.String(d.Get("http_method").(string)),
ResourceId: aws.String(d.Get("resource_id").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
StatusCode: aws.String(d.Get("status_code").(string)),
PatchOperations: operations,
})
if err != nil {
return err
}
log.Printf("[DEBUG] Received API Gateway Method Response: %s", out)
return resourceAwsApiGatewayMethodResponseRead(d, meta)
}
func resourceAwsApiGatewayMethodResponseDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Deleting API Gateway Method Response: %s", d.Id())
return resource.Retry(5*time.Minute, func() *resource.RetryError {
_, err := conn.DeleteMethodResponse(&apigateway.DeleteMethodResponseInput{
HttpMethod: aws.String(d.Get("http_method").(string)),
ResourceId: aws.String(d.Get("resource_id").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
StatusCode: aws.String(d.Get("status_code").(string)),
})
if err == nil {
return nil
}
apigatewayErr, ok := err.(awserr.Error)
if apigatewayErr.Code() == "NotFoundException" {
return nil
}
if !ok {
return resource.NonRetryableError(err)
}
return resource.NonRetryableError(err)
})
}

View File

@ -0,0 +1,248 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsApiGatewayMethodSettings() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayMethodSettingsUpdate,
Read: resourceAwsApiGatewayMethodSettingsRead,
Update: resourceAwsApiGatewayMethodSettingsUpdate,
Delete: resourceAwsApiGatewayMethodSettingsDelete,
Schema: map[string]*schema.Schema{
"rest_api_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"stage_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"method_path": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"settings": {
Type: schema.TypeList,
Required: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"metrics_enabled": {
Type: schema.TypeBool,
Optional: true,
},
"logging_level": {
Type: schema.TypeString,
Optional: true,
},
"data_trace_enabled": {
Type: schema.TypeBool,
Optional: true,
},
"throttling_burst_limit": {
Type: schema.TypeInt,
Optional: true,
},
"throttling_rate_limit": {
Type: schema.TypeFloat,
Optional: true,
},
"caching_enabled": {
Type: schema.TypeBool,
Optional: true,
},
"cache_ttl_in_seconds": {
Type: schema.TypeInt,
Optional: true,
},
"cache_data_encrypted": {
Type: schema.TypeBool,
Optional: true,
},
"require_authorization_for_cache_control": {
Type: schema.TypeBool,
Optional: true,
},
"unauthorized_cache_control_header_strategy": {
Type: schema.TypeString,
Optional: true,
},
},
},
},
},
}
}
func resourceAwsApiGatewayMethodSettingsRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Reading API Gateway Method Settings %s", d.Id())
input := apigateway.GetStageInput{
RestApiId: aws.String(d.Get("rest_api_id").(string)),
StageName: aws.String(d.Get("stage_name").(string)),
}
stage, err := conn.GetStage(&input)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
log.Printf("[WARN] API Gateway Stage %s not found, removing method settings", d.Id())
d.SetId("")
return nil
}
return err
}
log.Printf("[DEBUG] Received API Gateway Stage: %s", stage)
methodPath := d.Get("method_path").(string)
settings, ok := stage.MethodSettings[methodPath]
if !ok {
log.Printf("[WARN] API Gateway Method Settings for %q not found, removing", methodPath)
d.SetId("")
return nil
}
d.Set("settings.0.metrics_enabled", settings.MetricsEnabled)
d.Set("settings.0.logging_level", settings.LoggingLevel)
d.Set("settings.0.data_trace_enabled", settings.DataTraceEnabled)
d.Set("settings.0.throttling_burst_limit", settings.ThrottlingBurstLimit)
d.Set("settings.0.throttling_rate_limit", settings.ThrottlingRateLimit)
d.Set("settings.0.caching_enabled", settings.CachingEnabled)
d.Set("settings.0.cache_ttl_in_seconds", settings.CacheTtlInSeconds)
d.Set("settings.0.cache_data_encrypted", settings.CacheDataEncrypted)
d.Set("settings.0.require_authorization_for_cache_control", settings.RequireAuthorizationForCacheControl)
d.Set("settings.0.unauthorized_cache_control_header_strategy", settings.UnauthorizedCacheControlHeaderStrategy)
return nil
}
func resourceAwsApiGatewayMethodSettingsUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
methodPath := d.Get("method_path").(string)
prefix := fmt.Sprintf("/%s/", methodPath)
ops := make([]*apigateway.PatchOperation, 0)
if d.HasChange("settings.0.metrics_enabled") {
ops = append(ops, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String(prefix + "metrics/enabled"),
Value: aws.String(fmt.Sprintf("%t", d.Get("settings.0.metrics_enabled").(bool))),
})
}
if d.HasChange("settings.0.logging_level") {
ops = append(ops, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String(prefix + "logging/loglevel"),
Value: aws.String(d.Get("settings.0.logging_level").(string)),
})
}
if d.HasChange("settings.0.data_trace_enabled") {
ops = append(ops, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String(prefix + "logging/dataTrace"),
Value: aws.String(fmt.Sprintf("%t", d.Get("settings.0.data_trace_enabled").(bool))),
})
}
if d.HasChange("settings.0.throttling_burst_limit") {
ops = append(ops, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String(prefix + "throttling/burstLimit"),
Value: aws.String(fmt.Sprintf("%d", d.Get("settings.0.throttling_burst_limit").(int))),
})
}
if d.HasChange("settings.0.throttling_rate_limit") {
ops = append(ops, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String(prefix + "throttling/rateLimit"),
Value: aws.String(fmt.Sprintf("%f", d.Get("settings.0.throttling_rate_limit").(float64))),
})
}
if d.HasChange("settings.0.caching_enabled") {
ops = append(ops, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String(prefix + "caching/enabled"),
Value: aws.String(fmt.Sprintf("%t", d.Get("settings.0.caching_enabled").(bool))),
})
}
if d.HasChange("settings.0.cache_ttl_in_seconds") {
ops = append(ops, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String(prefix + "caching/ttlInSeconds"),
Value: aws.String(fmt.Sprintf("%d", d.Get("settings.0.cache_ttl_in_seconds").(int))),
})
}
if d.HasChange("settings.0.cache_data_encrypted") {
ops = append(ops, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String(prefix + "caching/dataEncrypted"),
Value: aws.String(fmt.Sprintf("%d", d.Get("settings.0.cache_data_encrypted").(int))),
})
}
if d.HasChange("settings.0.require_authorization_for_cache_control") {
ops = append(ops, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String(prefix + "caching/requireAuthorizationForCacheControl"),
Value: aws.String(fmt.Sprintf("%t", d.Get("settings.0.require_authorization_for_cache_control").(bool))),
})
}
if d.HasChange("settings.0.unauthorized_cache_control_header_strategy") {
ops = append(ops, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String(prefix + "caching/unauthorizedCacheControlHeaderStrategy"),
Value: aws.String(d.Get("settings.0.unauthorized_cache_control_header_strategy").(string)),
})
}
restApiId := d.Get("rest_api_id").(string)
stageName := d.Get("stage_name").(string)
input := apigateway.UpdateStageInput{
RestApiId: aws.String(restApiId),
StageName: aws.String(stageName),
PatchOperations: ops,
}
log.Printf("[DEBUG] Updating API Gateway Stage: %s", input)
_, err := conn.UpdateStage(&input)
if err != nil {
return fmt.Errorf("Updating API Gateway Stage failed: %s", err)
}
d.SetId(restApiId + "-" + stageName + "-" + methodPath)
return resourceAwsApiGatewayMethodSettingsRead(d, meta)
}
func resourceAwsApiGatewayMethodSettingsDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Deleting API Gateway Method Settings: %s", d.Id())
input := apigateway.UpdateStageInput{
RestApiId: aws.String(d.Get("rest_api_id").(string)),
StageName: aws.String(d.Get("stage_name").(string)),
PatchOperations: []*apigateway.PatchOperation{
{
Op: aws.String("remove"),
Path: aws.String(fmt.Sprintf("/%s", d.Get("method_path").(string))),
},
},
}
log.Printf("[DEBUG] Updating API Gateway Stage: %s", input)
_, err := conn.UpdateStage(&input)
if err != nil {
return fmt.Errorf("Updating API Gateway Stage failed: %s", err)
}
return nil
}

View File

@ -0,0 +1,168 @@
package aws
import (
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsApiGatewayModel() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayModelCreate,
Read: resourceAwsApiGatewayModelRead,
Update: resourceAwsApiGatewayModelUpdate,
Delete: resourceAwsApiGatewayModelDelete,
Schema: map[string]*schema.Schema{
"rest_api_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"description": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"schema": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"content_type": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
},
}
}
func resourceAwsApiGatewayModelCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Creating API Gateway Model")
var description *string
if v, ok := d.GetOk("description"); ok {
description = aws.String(v.(string))
}
var schema *string
if v, ok := d.GetOk("schema"); ok {
schema = aws.String(v.(string))
}
var err error
model, err := conn.CreateModel(&apigateway.CreateModelInput{
Name: aws.String(d.Get("name").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
ContentType: aws.String(d.Get("content_type").(string)),
Description: description,
Schema: schema,
})
if err != nil {
return fmt.Errorf("Error creating API Gateway Model: %s", err)
}
d.SetId(*model.Id)
return nil
}
func resourceAwsApiGatewayModelRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Reading API Gateway Model %s", d.Id())
out, err := conn.GetModel(&apigateway.GetModelInput{
ModelName: aws.String(d.Get("name").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
d.SetId("")
return nil
}
return err
}
log.Printf("[DEBUG] Received API Gateway Model: %s", out)
d.SetId(*out.Id)
d.Set("description", out.Description)
d.Set("schema", out.Schema)
d.Set("content_type", out.ContentType)
return nil
}
func resourceAwsApiGatewayModelUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Reading API Gateway Model %s", d.Id())
operations := make([]*apigateway.PatchOperation, 0)
if d.HasChange("description") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/description"),
Value: aws.String(d.Get("description").(string)),
})
}
if d.HasChange("schema") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/schema"),
Value: aws.String(d.Get("schema").(string)),
})
}
out, err := conn.UpdateModel(&apigateway.UpdateModelInput{
ModelName: aws.String(d.Get("name").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
PatchOperations: operations,
})
if err != nil {
return err
}
log.Printf("[DEBUG] Received API Gateway Model: %s", out)
return resourceAwsApiGatewayModelRead(d, meta)
}
func resourceAwsApiGatewayModelDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Deleting API Gateway Model: %s", d.Id())
return resource.Retry(5*time.Minute, func() *resource.RetryError {
log.Printf("[DEBUG] schema is %#v", d)
_, err := conn.DeleteModel(&apigateway.DeleteModelInput{
ModelName: aws.String(d.Get("name").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
})
if err == nil {
return nil
}
apigatewayErr, ok := err.(awserr.Error)
if apigatewayErr.Code() == "NotFoundException" {
return nil
}
if !ok {
return resource.NonRetryableError(err)
}
return resource.NonRetryableError(err)
})
}

View File

@ -0,0 +1,149 @@
package aws
import (
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsApiGatewayResource() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayResourceCreate,
Read: resourceAwsApiGatewayResourceRead,
Update: resourceAwsApiGatewayResourceUpdate,
Delete: resourceAwsApiGatewayResourceDelete,
Schema: map[string]*schema.Schema{
"rest_api_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"parent_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"path_part": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"path": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceAwsApiGatewayResourceCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Creating API Gateway Resource for API %s", d.Get("rest_api_id").(string))
var err error
resource, err := conn.CreateResource(&apigateway.CreateResourceInput{
ParentId: aws.String(d.Get("parent_id").(string)),
PathPart: aws.String(d.Get("path_part").(string)),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
})
if err != nil {
return fmt.Errorf("Error creating API Gateway Resource: %s", err)
}
d.SetId(*resource.Id)
d.Set("path", resource.Path)
return nil
}
func resourceAwsApiGatewayResourceRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Reading API Gateway Resource %s", d.Id())
resource, err := conn.GetResource(&apigateway.GetResourceInput{
ResourceId: aws.String(d.Id()),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
d.SetId("")
return nil
}
return err
}
d.Set("parent_id", resource.ParentId)
d.Set("path_part", resource.PathPart)
d.Set("path", resource.Path)
return nil
}
func resourceAwsApiGatewayResourceUpdateOperations(d *schema.ResourceData) []*apigateway.PatchOperation {
operations := make([]*apigateway.PatchOperation, 0)
if d.HasChange("path_part") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/pathPart"),
Value: aws.String(d.Get("path_part").(string)),
})
}
if d.HasChange("parent_id") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/parentId"),
Value: aws.String(d.Get("parent_id").(string)),
})
}
return operations
}
func resourceAwsApiGatewayResourceUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Updating API Gateway Resource %s", d.Id())
_, err := conn.UpdateResource(&apigateway.UpdateResourceInput{
ResourceId: aws.String(d.Id()),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
PatchOperations: resourceAwsApiGatewayResourceUpdateOperations(d),
})
if err != nil {
return err
}
return resourceAwsApiGatewayResourceRead(d, meta)
}
func resourceAwsApiGatewayResourceDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Deleting API Gateway Resource: %s", d.Id())
return resource.Retry(5*time.Minute, func() *resource.RetryError {
log.Printf("[DEBUG] schema is %#v", d)
_, err := conn.DeleteResource(&apigateway.DeleteResourceInput{
ResourceId: aws.String(d.Id()),
RestApiId: aws.String(d.Get("rest_api_id").(string)),
})
if err == nil {
return nil
}
if apigatewayErr, ok := err.(awserr.Error); ok && apigatewayErr.Code() == "NotFoundException" {
return nil
}
return resource.NonRetryableError(err)
})
}

View File

@ -0,0 +1,189 @@
package aws
import (
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsApiGatewayRestApi() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayRestApiCreate,
Read: resourceAwsApiGatewayRestApiRead,
Update: resourceAwsApiGatewayRestApiUpdate,
Delete: resourceAwsApiGatewayRestApiDelete,
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
},
"description": {
Type: schema.TypeString,
Optional: true,
},
"binary_media_types": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"root_resource_id": {
Type: schema.TypeString,
Computed: true,
},
"created_date": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceAwsApiGatewayRestApiCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Creating API Gateway")
var description *string
if d.Get("description").(string) != "" {
description = aws.String(d.Get("description").(string))
}
params := &apigateway.CreateRestApiInput{
Name: aws.String(d.Get("name").(string)),
Description: description,
}
binaryMediaTypes, binaryMediaTypesOk := d.GetOk("binary_media_types")
if binaryMediaTypesOk {
params.BinaryMediaTypes = expandStringList(binaryMediaTypes.([]interface{}))
}
gateway, err := conn.CreateRestApi(params)
if err != nil {
return fmt.Errorf("Error creating API Gateway: %s", err)
}
d.SetId(*gateway.Id)
if err = resourceAwsApiGatewayRestApiRefreshResources(d, meta); err != nil {
return err
}
return resourceAwsApiGatewayRestApiRead(d, meta)
}
func resourceAwsApiGatewayRestApiRefreshResources(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
resp, err := conn.GetResources(&apigateway.GetResourcesInput{
RestApiId: aws.String(d.Id()),
})
if err != nil {
return err
}
for _, item := range resp.Items {
if *item.Path == "/" {
d.Set("root_resource_id", item.Id)
break
}
}
return nil
}
func resourceAwsApiGatewayRestApiRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Reading API Gateway %s", d.Id())
api, err := conn.GetRestApi(&apigateway.GetRestApiInput{
RestApiId: aws.String(d.Id()),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
d.SetId("")
return nil
}
return err
}
d.Set("name", api.Name)
d.Set("description", api.Description)
d.Set("binary_media_types", api.BinaryMediaTypes)
if err := d.Set("created_date", api.CreatedDate.Format(time.RFC3339)); err != nil {
log.Printf("[DEBUG] Error setting created_date: %s", err)
}
return nil
}
func resourceAwsApiGatewayRestApiUpdateOperations(d *schema.ResourceData) []*apigateway.PatchOperation {
operations := make([]*apigateway.PatchOperation, 0)
if d.HasChange("name") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/name"),
Value: aws.String(d.Get("name").(string)),
})
}
if d.HasChange("description") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/description"),
Value: aws.String(d.Get("description").(string)),
})
}
return operations
}
func resourceAwsApiGatewayRestApiUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Updating API Gateway %s", d.Id())
_, err := conn.UpdateRestApi(&apigateway.UpdateRestApiInput{
RestApiId: aws.String(d.Id()),
PatchOperations: resourceAwsApiGatewayRestApiUpdateOperations(d),
})
if err != nil {
return err
}
log.Printf("[DEBUG] Updated API Gateway %s", d.Id())
return resourceAwsApiGatewayRestApiRead(d, meta)
}
func resourceAwsApiGatewayRestApiDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Deleting API Gateway: %s", d.Id())
return resource.Retry(10*time.Minute, func() *resource.RetryError {
_, err := conn.DeleteRestApi(&apigateway.DeleteRestApiInput{
RestApiId: aws.String(d.Id()),
})
if err == nil {
return nil
}
if apigatewayErr, ok := err.(awserr.Error); ok && apigatewayErr.Code() == "NotFoundException" {
return nil
}
return resource.NonRetryableError(err)
})
}

View File

@ -0,0 +1,342 @@
package aws
import (
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsApiGatewayStage() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayStageCreate,
Read: resourceAwsApiGatewayStageRead,
Update: resourceAwsApiGatewayStageUpdate,
Delete: resourceAwsApiGatewayStageDelete,
Schema: map[string]*schema.Schema{
"cache_cluster_enabled": {
Type: schema.TypeBool,
Optional: true,
},
"cache_cluster_size": {
Type: schema.TypeString,
Optional: true,
},
"client_certificate_id": {
Type: schema.TypeString,
Optional: true,
},
"deployment_id": {
Type: schema.TypeString,
Required: true,
},
"description": {
Type: schema.TypeString,
Optional: true,
},
"documentation_version": {
Type: schema.TypeString,
Optional: true,
},
"rest_api_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"stage_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"variables": {
Type: schema.TypeMap,
Optional: true,
},
},
}
}
func resourceAwsApiGatewayStageCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
d.Partial(true)
input := apigateway.CreateStageInput{
RestApiId: aws.String(d.Get("rest_api_id").(string)),
StageName: aws.String(d.Get("stage_name").(string)),
DeploymentId: aws.String(d.Get("deployment_id").(string)),
}
waitForCache := false
if v, ok := d.GetOk("cache_cluster_enabled"); ok {
input.CacheClusterEnabled = aws.Bool(v.(bool))
waitForCache = true
}
if v, ok := d.GetOk("cache_cluster_size"); ok {
input.CacheClusterSize = aws.String(v.(string))
waitForCache = true
}
if v, ok := d.GetOk("description"); ok {
input.Description = aws.String(v.(string))
}
if v, ok := d.GetOk("documentation_version"); ok {
input.DocumentationVersion = aws.String(v.(string))
}
if vars, ok := d.GetOk("variables"); ok {
variables := make(map[string]string, 0)
for k, v := range vars.(map[string]interface{}) {
variables[k] = v.(string)
}
input.Variables = aws.StringMap(variables)
}
out, err := conn.CreateStage(&input)
if err != nil {
return fmt.Errorf("Error creating API Gateway Stage: %s", err)
}
d.SetId(fmt.Sprintf("ags-%s-%s", d.Get("rest_api_id").(string), d.Get("stage_name").(string)))
d.SetPartial("rest_api_id")
d.SetPartial("stage_name")
d.SetPartial("deployment_id")
d.SetPartial("description")
d.SetPartial("variables")
if waitForCache && *out.CacheClusterStatus != "NOT_AVAILABLE" {
stateConf := &resource.StateChangeConf{
Pending: []string{
"CREATE_IN_PROGRESS",
"DELETE_IN_PROGRESS",
"FLUSH_IN_PROGRESS",
},
Target: []string{"AVAILABLE"},
Refresh: apiGatewayStageCacheRefreshFunc(conn,
d.Get("rest_api_id").(string),
d.Get("stage_name").(string)),
Timeout: 90 * time.Minute,
}
_, err := stateConf.WaitForState()
if err != nil {
return err
}
}
d.SetPartial("cache_cluster_enabled")
d.SetPartial("cache_cluster_size")
d.Partial(false)
if _, ok := d.GetOk("client_certificate_id"); ok {
return resourceAwsApiGatewayStageUpdate(d, meta)
}
return resourceAwsApiGatewayStageRead(d, meta)
}
func resourceAwsApiGatewayStageRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Reading API Gateway Stage %s", d.Id())
input := apigateway.GetStageInput{
RestApiId: aws.String(d.Get("rest_api_id").(string)),
StageName: aws.String(d.Get("stage_name").(string)),
}
stage, err := conn.GetStage(&input)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
log.Printf("[WARN] API Gateway Stage %s not found, removing", d.Id())
d.SetId("")
return nil
}
return err
}
log.Printf("[DEBUG] Received API Gateway Stage: %s", stage)
d.Set("client_certificate_id", stage.ClientCertificateId)
if stage.CacheClusterStatus != nil && *stage.CacheClusterStatus == "DELETE_IN_PROGRESS" {
d.Set("cache_cluster_enabled", false)
d.Set("cache_cluster_size", nil)
} else {
d.Set("cache_cluster_enabled", stage.CacheClusterEnabled)
d.Set("cache_cluster_size", stage.CacheClusterSize)
}
d.Set("deployment_id", stage.DeploymentId)
d.Set("description", stage.Description)
d.Set("documentation_version", stage.DocumentationVersion)
d.Set("variables", aws.StringValueMap(stage.Variables))
return nil
}
func resourceAwsApiGatewayStageUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
d.Partial(true)
operations := make([]*apigateway.PatchOperation, 0)
waitForCache := false
if d.HasChange("cache_cluster_enabled") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/cacheClusterEnabled"),
Value: aws.String(fmt.Sprintf("%t", d.Get("cache_cluster_enabled").(bool))),
})
waitForCache = true
}
if d.HasChange("cache_cluster_size") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/cacheClusterSize"),
Value: aws.String(d.Get("cache_cluster_size").(string)),
})
waitForCache = true
}
if d.HasChange("client_certificate_id") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/clientCertificateId"),
Value: aws.String(d.Get("client_certificate_id").(string)),
})
}
if d.HasChange("deployment_id") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/deploymentId"),
Value: aws.String(d.Get("deployment_id").(string)),
})
}
if d.HasChange("description") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/description"),
Value: aws.String(d.Get("description").(string)),
})
}
if d.HasChange("documentation_version") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/documentationVersion"),
Value: aws.String(d.Get("documentation_version").(string)),
})
}
if d.HasChange("variables") {
o, n := d.GetChange("variables")
oldV := o.(map[string]interface{})
newV := n.(map[string]interface{})
operations = append(operations, diffVariablesOps("/variables/", oldV, newV)...)
}
input := apigateway.UpdateStageInput{
RestApiId: aws.String(d.Get("rest_api_id").(string)),
StageName: aws.String(d.Get("stage_name").(string)),
PatchOperations: operations,
}
log.Printf("[DEBUG] Updating API Gateway Stage: %s", input)
out, err := conn.UpdateStage(&input)
if err != nil {
return fmt.Errorf("Updating API Gateway Stage failed: %s", err)
}
d.SetPartial("client_certificate_id")
d.SetPartial("deployment_id")
d.SetPartial("description")
d.SetPartial("variables")
if waitForCache && *out.CacheClusterStatus != "NOT_AVAILABLE" {
stateConf := &resource.StateChangeConf{
Pending: []string{
"CREATE_IN_PROGRESS",
"FLUSH_IN_PROGRESS",
},
Target: []string{
"AVAILABLE",
// There's an AWS API bug (raised & confirmed in Sep 2016 by support)
// which causes the stage to remain in deletion state forever
"DELETE_IN_PROGRESS",
},
Refresh: apiGatewayStageCacheRefreshFunc(conn,
d.Get("rest_api_id").(string),
d.Get("stage_name").(string)),
Timeout: 30 * time.Minute,
}
_, err := stateConf.WaitForState()
if err != nil {
return err
}
}
d.SetPartial("cache_cluster_enabled")
d.SetPartial("cache_cluster_size")
d.Partial(false)
return resourceAwsApiGatewayStageRead(d, meta)
}
func diffVariablesOps(prefix string, oldVars, newVars map[string]interface{}) []*apigateway.PatchOperation {
ops := make([]*apigateway.PatchOperation, 0)
for k, _ := range oldVars {
if _, ok := newVars[k]; !ok {
ops = append(ops, &apigateway.PatchOperation{
Op: aws.String("remove"),
Path: aws.String(prefix + k),
})
}
}
for k, v := range newVars {
newValue := v.(string)
if oldV, ok := oldVars[k]; ok {
oldValue := oldV.(string)
if oldValue == newValue {
continue
}
}
ops = append(ops, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String(prefix + k),
Value: aws.String(newValue),
})
}
return ops
}
func apiGatewayStageCacheRefreshFunc(conn *apigateway.APIGateway, apiId, stageName string) func() (interface{}, string, error) {
return func() (interface{}, string, error) {
input := apigateway.GetStageInput{
RestApiId: aws.String(apiId),
StageName: aws.String(stageName),
}
out, err := conn.GetStage(&input)
if err != nil {
return 42, "", err
}
return out, *out.CacheClusterStatus, nil
}
}
func resourceAwsApiGatewayStageDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Deleting API Gateway Stage: %s", d.Id())
input := apigateway.DeleteStageInput{
RestApiId: aws.String(d.Get("rest_api_id").(string)),
StageName: aws.String(d.Get("stage_name").(string)),
}
_, err := conn.DeleteStage(&input)
if err != nil {
return fmt.Errorf("Deleting API Gateway Stage failed: %s", err)
}
return nil
}

View File

@ -0,0 +1,499 @@
package aws
import (
"fmt"
"log"
"strconv"
"time"
"errors"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsApiGatewayUsagePlan() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayUsagePlanCreate,
Read: resourceAwsApiGatewayUsagePlanRead,
Update: resourceAwsApiGatewayUsagePlanUpdate,
Delete: resourceAwsApiGatewayUsagePlanDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true, // Required since not addable nor removable afterwards
},
"description": {
Type: schema.TypeString,
Optional: true,
},
"api_stages": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"api_id": {
Type: schema.TypeString,
Required: true,
},
"stage": {
Type: schema.TypeString,
Required: true,
},
},
},
},
"quota_settings": {
Type: schema.TypeSet,
MaxItems: 1,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"limit": {
Type: schema.TypeInt,
Required: true, // Required as not removable singularly
},
"offset": {
Type: schema.TypeInt,
Default: 0,
Optional: true,
},
"period": {
Type: schema.TypeString,
Required: true, // Required as not removable
ValidateFunc: validateApiGatewayUsagePlanQuotaSettingsPeriod,
},
},
},
},
"throttle_settings": {
Type: schema.TypeSet,
MaxItems: 1,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"burst_limit": {
Type: schema.TypeInt,
Default: 0,
Optional: true,
},
"rate_limit": {
Type: schema.TypeInt,
Default: 0,
Optional: true,
},
},
},
},
"product_code": {
Type: schema.TypeString,
Optional: true,
},
},
}
}
func resourceAwsApiGatewayUsagePlanCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Print("[DEBUG] Creating API Gateway Usage Plan")
params := &apigateway.CreateUsagePlanInput{
Name: aws.String(d.Get("name").(string)),
}
if v, ok := d.GetOk("description"); ok {
params.Description = aws.String(v.(string))
}
if s, ok := d.GetOk("api_stages"); ok {
stages := s.([]interface{})
as := make([]*apigateway.ApiStage, 0)
for _, v := range stages {
sv := v.(map[string]interface{})
stage := &apigateway.ApiStage{}
if v, ok := sv["api_id"].(string); ok && v != "" {
stage.ApiId = aws.String(v)
}
if v, ok := sv["stage"].(string); ok && v != "" {
stage.Stage = aws.String(v)
}
as = append(as, stage)
}
if len(as) > 0 {
params.ApiStages = as
}
}
if v, ok := d.GetOk("quota_settings"); ok {
settings := v.(*schema.Set).List()
q, ok := settings[0].(map[string]interface{})
if errors := validateApiGatewayUsagePlanQuotaSettings(q); len(errors) > 0 {
return fmt.Errorf("Error validating the quota settings: %v", errors)
}
if !ok {
return errors.New("At least one field is expected inside quota_settings")
}
qs := &apigateway.QuotaSettings{}
if sv, ok := q["limit"].(int); ok {
qs.Limit = aws.Int64(int64(sv))
}
if sv, ok := q["offset"].(int); ok {
qs.Offset = aws.Int64(int64(sv))
}
if sv, ok := q["period"].(string); ok && sv != "" {
qs.Period = aws.String(sv)
}
params.Quota = qs
}
if v, ok := d.GetOk("throttle_settings"); ok {
settings := v.(*schema.Set).List()
q, ok := settings[0].(map[string]interface{})
if !ok {
return errors.New("At least one field is expected inside throttle_settings")
}
ts := &apigateway.ThrottleSettings{}
if sv, ok := q["burst_limit"].(int); ok {
ts.BurstLimit = aws.Int64(int64(sv))
}
if sv, ok := q["rate_limit"].(float64); ok {
ts.RateLimit = aws.Float64(float64(sv))
}
params.Throttle = ts
}
up, err := conn.CreateUsagePlan(params)
if err != nil {
return fmt.Errorf("Error creating API Gateway Usage Plan: %s", err)
}
d.SetId(*up.Id)
// Handle case of adding the product code since not addable when
// creating the Usage Plan initially.
if v, ok := d.GetOk("product_code"); ok {
updateParameters := &apigateway.UpdateUsagePlanInput{
UsagePlanId: aws.String(d.Id()),
PatchOperations: []*apigateway.PatchOperation{
{
Op: aws.String("add"),
Path: aws.String("/productCode"),
Value: aws.String(v.(string)),
},
},
}
up, err = conn.UpdateUsagePlan(updateParameters)
if err != nil {
return fmt.Errorf("Error creating the API Gateway Usage Plan product code: %s", err)
}
}
return resourceAwsApiGatewayUsagePlanRead(d, meta)
}
func resourceAwsApiGatewayUsagePlanRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Reading API Gateway Usage Plan: %s", d.Id())
up, err := conn.GetUsagePlan(&apigateway.GetUsagePlanInput{
UsagePlanId: aws.String(d.Id()),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
d.SetId("")
return nil
}
return err
}
d.Set("name", up.Name)
d.Set("description", up.Description)
d.Set("product_code", up.ProductCode)
if up.ApiStages != nil {
if err := d.Set("api_stages", flattenApiGatewayUsageApiStages(up.ApiStages)); err != nil {
return fmt.Errorf("[DEBUG] Error setting api_stages error: %#v", err)
}
}
if up.Throttle != nil {
if err := d.Set("throttle_settings", flattenApiGatewayUsagePlanThrottling(up.Throttle)); err != nil {
return fmt.Errorf("[DEBUG] Error setting throttle_settings error: %#v", err)
}
}
if up.Quota != nil {
if err := d.Set("quota_settings", flattenApiGatewayUsagePlanQuota(up.Quota)); err != nil {
return fmt.Errorf("[DEBUG] Error setting quota_settings error: %#v", err)
}
}
return nil
}
func resourceAwsApiGatewayUsagePlanUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Print("[DEBUG] Updating API Gateway Usage Plan")
operations := make([]*apigateway.PatchOperation, 0)
if d.HasChange("name") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/name"),
Value: aws.String(d.Get("name").(string)),
})
}
if d.HasChange("description") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/description"),
Value: aws.String(d.Get("description").(string)),
})
}
if d.HasChange("product_code") {
v, ok := d.GetOk("product_code")
if ok {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/productCode"),
Value: aws.String(v.(string)),
})
} else {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("remove"),
Path: aws.String("/productCode"),
})
}
}
if d.HasChange("api_stages") {
o, n := d.GetChange("api_stages")
old := o.([]interface{})
new := n.([]interface{})
// Remove every stages associated. Simpler to remove and add new ones,
// since there are no replacings.
for _, v := range old {
m := v.(map[string]interface{})
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("remove"),
Path: aws.String("/apiStages"),
Value: aws.String(fmt.Sprintf("%s:%s", m["api_id"].(string), m["stage"].(string))),
})
}
// Handle additions
if len(new) > 0 {
for _, v := range new {
m := v.(map[string]interface{})
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("add"),
Path: aws.String("/apiStages"),
Value: aws.String(fmt.Sprintf("%s:%s", m["api_id"].(string), m["stage"].(string))),
})
}
}
}
if d.HasChange("throttle_settings") {
o, n := d.GetChange("throttle_settings")
os := o.(*schema.Set)
ns := n.(*schema.Set)
diff := ns.Difference(os).List()
// Handle Removal
if len(diff) == 0 {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("remove"),
Path: aws.String("/throttle"),
})
}
if len(diff) > 0 {
d := diff[0].(map[string]interface{})
// Handle Replaces
if o != nil && n != nil {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/throttle/rateLimit"),
Value: aws.String(strconv.Itoa(d["rate_limit"].(int))),
})
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/throttle/burstLimit"),
Value: aws.String(strconv.Itoa(d["burst_limit"].(int))),
})
}
// Handle Additions
if o == nil && n != nil {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("add"),
Path: aws.String("/throttle/rateLimit"),
Value: aws.String(strconv.Itoa(d["rate_limit"].(int))),
})
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("add"),
Path: aws.String("/throttle/burstLimit"),
Value: aws.String(strconv.Itoa(d["burst_limit"].(int))),
})
}
}
}
if d.HasChange("quota_settings") {
o, n := d.GetChange("quota_settings")
os := o.(*schema.Set)
ns := n.(*schema.Set)
diff := ns.Difference(os).List()
// Handle Removal
if len(diff) == 0 {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("remove"),
Path: aws.String("/quota"),
})
}
if len(diff) > 0 {
d := diff[0].(map[string]interface{})
if errors := validateApiGatewayUsagePlanQuotaSettings(d); len(errors) > 0 {
return fmt.Errorf("Error validating the quota settings: %v", errors)
}
// Handle Replaces
if o != nil && n != nil {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/quota/limit"),
Value: aws.String(strconv.Itoa(d["limit"].(int))),
})
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/quota/offset"),
Value: aws.String(strconv.Itoa(d["offset"].(int))),
})
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/quota/period"),
Value: aws.String(d["period"].(string)),
})
}
// Handle Additions
if o == nil && n != nil {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("add"),
Path: aws.String("/quota/limit"),
Value: aws.String(strconv.Itoa(d["limit"].(int))),
})
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("add"),
Path: aws.String("/quota/offset"),
Value: aws.String(strconv.Itoa(d["offset"].(int))),
})
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("add"),
Path: aws.String("/quota/period"),
Value: aws.String(d["period"].(string)),
})
}
}
}
params := &apigateway.UpdateUsagePlanInput{
UsagePlanId: aws.String(d.Id()),
PatchOperations: operations,
}
_, err := conn.UpdateUsagePlan(params)
if err != nil {
return fmt.Errorf("Error updating API Gateway Usage Plan: %s", err)
}
return resourceAwsApiGatewayUsagePlanRead(d, meta)
}
func resourceAwsApiGatewayUsagePlanDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
// Removing existing api stages associated
if apistages, ok := d.GetOk("api_stages"); ok {
log.Printf("[DEBUG] Deleting API Stages associated with Usage Plan: %s", d.Id())
stages := apistages.([]interface{})
operations := []*apigateway.PatchOperation{}
for _, v := range stages {
sv := v.(map[string]interface{})
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("remove"),
Path: aws.String("/apiStages"),
Value: aws.String(fmt.Sprintf("%s:%s", sv["api_id"].(string), sv["stage"].(string))),
})
}
_, err := conn.UpdateUsagePlan(&apigateway.UpdateUsagePlanInput{
UsagePlanId: aws.String(d.Id()),
PatchOperations: operations,
})
if err != nil {
return fmt.Errorf("Error removing API Stages associated with Usage Plan: %s", err)
}
}
log.Printf("[DEBUG] Deleting API Gateway Usage Plan: %s", d.Id())
return resource.Retry(5*time.Minute, func() *resource.RetryError {
_, err := conn.DeleteUsagePlan(&apigateway.DeleteUsagePlanInput{
UsagePlanId: aws.String(d.Id()),
})
if err == nil {
return nil
}
return resource.NonRetryableError(err)
})
}

View File

@ -0,0 +1,114 @@
package aws
import (
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsApiGatewayUsagePlanKey() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayUsagePlanKeyCreate,
Read: resourceAwsApiGatewayUsagePlanKeyRead,
Delete: resourceAwsApiGatewayUsagePlanKeyDelete,
Schema: map[string]*schema.Schema{
"key_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"key_type": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"usage_plan_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"value": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceAwsApiGatewayUsagePlanKeyCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Print("[DEBUG] Creating API Gateway Usage Plan Key")
params := &apigateway.CreateUsagePlanKeyInput{
KeyId: aws.String(d.Get("key_id").(string)),
KeyType: aws.String(d.Get("key_type").(string)),
UsagePlanId: aws.String(d.Get("usage_plan_id").(string)),
}
up, err := conn.CreateUsagePlanKey(params)
if err != nil {
return fmt.Errorf("Error creating API Gateway Usage Plan Key: %s", err)
}
d.SetId(*up.Id)
return resourceAwsApiGatewayUsagePlanKeyRead(d, meta)
}
func resourceAwsApiGatewayUsagePlanKeyRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Reading API Gateway Usage Plan Key: %s", d.Id())
up, err := conn.GetUsagePlanKey(&apigateway.GetUsagePlanKeyInput{
UsagePlanId: aws.String(d.Get("usage_plan_id").(string)),
KeyId: aws.String(d.Get("key_id").(string)),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
d.SetId("")
return nil
}
return err
}
d.Set("name", up.Name)
d.Set("value", up.Value)
return nil
}
func resourceAwsApiGatewayUsagePlanKeyDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
log.Printf("[DEBUG] Deleting API Gateway Usage Plan Key: %s", d.Id())
return resource.Retry(5*time.Minute, func() *resource.RetryError {
_, err := conn.DeleteUsagePlanKey(&apigateway.DeleteUsagePlanKeyInput{
UsagePlanId: aws.String(d.Get("usage_plan_id").(string)),
KeyId: aws.String(d.Get("key_id").(string)),
})
if err == nil {
return nil
}
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
return nil
}
return resource.NonRetryableError(err)
})
}

Some files were not shown because too many files have changed in this diff Show More