diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/LICENSE b/vendor/github.com/terraform-providers/terraform-provider-aws/LICENSE new file mode 100644 index 000000000..a612ad981 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/auth_helpers.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/auth_helpers.go new file mode 100644 index 000000000..e808d4d39 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/auth_helpers.go @@ -0,0 +1,217 @@ +package aws + +import ( + "errors" + "fmt" + "log" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + awsCredentials "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-cleanhttp" +) + +func GetAccountInfo(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string) (string, string, error) { + // If we have creds from instance profile, we can use metadata API + if authProviderName == ec2rolecreds.ProviderName { + log.Println("[DEBUG] Trying to get account ID via AWS Metadata API") + + cfg := &aws.Config{} + setOptionalEndpoint(cfg) + sess, err := session.NewSession(cfg) + if err != nil { + return "", "", errwrap.Wrapf("Error creating AWS session: {{err}}", err) + } + + metadataClient := ec2metadata.New(sess) + info, err := metadataClient.IAMInfo() + if err != nil { + // This can be triggered when no IAM Role is assigned + // or AWS just happens to return invalid response + return "", "", fmt.Errorf("Failed getting EC2 IAM info: %s", err) + } + + return parseAccountInfoFromArn(info.InstanceProfileArn) + } + + // Then try IAM GetUser + log.Println("[DEBUG] Trying to get account ID via iam:GetUser") + outUser, err := iamconn.GetUser(nil) + if err == nil { + return parseAccountInfoFromArn(*outUser.User.Arn) + } + + awsErr, ok := err.(awserr.Error) + // AccessDenied and ValidationError can be raised + // if credentials belong to federated profile, so we ignore these + if !ok || (awsErr.Code() != "AccessDenied" && awsErr.Code() != "ValidationError" && awsErr.Code() != "InvalidClientTokenId") { + return "", "", fmt.Errorf("Failed getting account ID via 'iam:GetUser': %s", err) + } + log.Printf("[DEBUG] Getting account ID via iam:GetUser failed: %s", err) + + // Then try STS GetCallerIdentity + log.Println("[DEBUG] Trying to get account ID via sts:GetCallerIdentity") + outCallerIdentity, err := stsconn.GetCallerIdentity(&sts.GetCallerIdentityInput{}) + if err == nil { + return parseAccountInfoFromArn(*outCallerIdentity.Arn) + } + log.Printf("[DEBUG] Getting account ID via sts:GetCallerIdentity failed: %s", err) + + // Then try IAM ListRoles + log.Println("[DEBUG] Trying to get account ID via iam:ListRoles") + outRoles, err := iamconn.ListRoles(&iam.ListRolesInput{ + MaxItems: aws.Int64(int64(1)), + }) + if err != nil { + return "", "", fmt.Errorf("Failed getting account ID via 'iam:ListRoles': %s", err) + } + + if len(outRoles.Roles) < 1 { + return "", "", errors.New("Failed getting account ID via 'iam:ListRoles': No roles available") + } + + return parseAccountInfoFromArn(*outRoles.Roles[0].Arn) +} + +func parseAccountInfoFromArn(arn string) (string, string, error) { + parts := strings.Split(arn, ":") + if len(parts) < 5 { + return "", "", fmt.Errorf("Unable to parse ID from invalid ARN: %q", arn) + } + return parts[1], parts[4], nil +} + +// This function is responsible for reading credentials from the +// environment in the case that they're not explicitly specified +// in the Terraform configuration. +func GetCredentials(c *Config) (*awsCredentials.Credentials, error) { + // build a chain provider, lazy-evaulated by aws-sdk + providers := []awsCredentials.Provider{ + &awsCredentials.StaticProvider{Value: awsCredentials.Value{ + AccessKeyID: c.AccessKey, + SecretAccessKey: c.SecretKey, + SessionToken: c.Token, + }}, + &awsCredentials.EnvProvider{}, + &awsCredentials.SharedCredentialsProvider{ + Filename: c.CredsFilename, + Profile: c.Profile, + }, + } + + // Build isolated HTTP client to avoid issues with globally-shared settings + client := cleanhttp.DefaultClient() + + // Keep the timeout low as we don't want to wait in non-EC2 environments + client.Timeout = 100 * time.Millisecond + cfg := &aws.Config{ + HTTPClient: client, + } + usedEndpoint := setOptionalEndpoint(cfg) + + if !c.SkipMetadataApiCheck { + // Real AWS should reply to a simple metadata request. + // We check it actually does to ensure something else didn't just + // happen to be listening on the same IP:Port + metadataClient := ec2metadata.New(session.New(cfg)) + if metadataClient.Available() { + providers = append(providers, &ec2rolecreds.EC2RoleProvider{ + Client: metadataClient, + }) + log.Print("[INFO] AWS EC2 instance detected via default metadata" + + " API endpoint, EC2RoleProvider added to the auth chain") + } else { + if usedEndpoint == "" { + usedEndpoint = "default location" + } + log.Printf("[INFO] Ignoring AWS metadata API endpoint at %s "+ + "as it doesn't return any instance-id", usedEndpoint) + } + } + + // This is the "normal" flow (i.e. not assuming a role) + if c.AssumeRoleARN == "" { + return awsCredentials.NewChainCredentials(providers), nil + } + + // Otherwise we need to construct and STS client with the main credentials, and verify + // that we can assume the defined role. + log.Printf("[INFO] Attempting to AssumeRole %s (SessionName: %q, ExternalId: %q, Policy: %q)", + c.AssumeRoleARN, c.AssumeRoleSessionName, c.AssumeRoleExternalID, c.AssumeRolePolicy) + + creds := awsCredentials.NewChainCredentials(providers) + cp, err := creds.Get() + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" { + return nil, errors.New(`No valid credential sources found for AWS Provider. + Please see https://terraform.io/docs/providers/aws/index.html for more information on + providing credentials for the AWS Provider`) + } + + return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err) + } + + log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName) + + awsConfig := &aws.Config{ + Credentials: creds, + Region: aws.String(c.Region), + MaxRetries: aws.Int(c.MaxRetries), + HTTPClient: cleanhttp.DefaultClient(), + S3ForcePathStyle: aws.Bool(c.S3ForcePathStyle), + } + + stsclient := sts.New(session.New(awsConfig)) + assumeRoleProvider := &stscreds.AssumeRoleProvider{ + Client: stsclient, + RoleARN: c.AssumeRoleARN, + } + if c.AssumeRoleSessionName != "" { + assumeRoleProvider.RoleSessionName = c.AssumeRoleSessionName + } + if c.AssumeRoleExternalID != "" { + assumeRoleProvider.ExternalID = aws.String(c.AssumeRoleExternalID) + } + if c.AssumeRolePolicy != "" { + assumeRoleProvider.Policy = aws.String(c.AssumeRolePolicy) + } + + providers = []awsCredentials.Provider{assumeRoleProvider} + + assumeRoleCreds := awsCredentials.NewChainCredentials(providers) + _, err = assumeRoleCreds.Get() + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" { + return nil, fmt.Errorf("The role %q cannot be assumed.\n\n"+ + " There are a number of possible causes of this - the most common are:\n"+ + " * The credentials used in order to assume the role are invalid\n"+ + " * The credentials do not have appropriate permission to assume the role\n"+ + " * The role ARN is not valid", + c.AssumeRoleARN) + } + + return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err) + } + + return assumeRoleCreds, nil +} + +func setOptionalEndpoint(cfg *aws.Config) string { + endpoint := os.Getenv("AWS_METADATA_URL") + if endpoint != "" { + log.Printf("[INFO] Setting custom metadata endpoint: %q", endpoint) + cfg.Endpoint = aws.String(endpoint) + return endpoint + } + return "" +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/autoscaling_tags.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/autoscaling_tags.go new file mode 100644 index 000000000..5c0911505 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/autoscaling_tags.go @@ -0,0 +1,317 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "regexp" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +// autoscalingTagSchema returns the schema to use for the tag element. +func autoscalingTagSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "propagate_at_launch": &schema.Schema{ + Type: schema.TypeBool, + Required: true, + }, + }, + }, + Set: autoscalingTagToHash, + } +} + +func autoscalingTagToHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["key"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["value"].(string))) + buf.WriteString(fmt.Sprintf("%t-", m["propagate_at_launch"].(bool))) + + return hashcode.String(buf.String()) +} + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tag" +func setAutoscalingTags(conn *autoscaling.AutoScaling, d *schema.ResourceData) error { + resourceID := d.Get("name").(string) + var createTags, removeTags []*autoscaling.Tag + + if d.HasChange("tag") || d.HasChange("tags") { + oraw, nraw := d.GetChange("tag") + o := setToMapByKey(oraw.(*schema.Set), "key") + n := setToMapByKey(nraw.(*schema.Set), "key") + + old, err := autoscalingTagsFromMap(o, resourceID) + if err != nil { + return err + } + + new, err := autoscalingTagsFromMap(n, resourceID) + if err != nil { + return err + } + + c, r, err := diffAutoscalingTags(old, new, resourceID) + if err != nil { + return err + } + + createTags = append(createTags, c...) + removeTags = append(removeTags, r...) + + oraw, nraw = d.GetChange("tags") + old, err = autoscalingTagsFromList(oraw.([]interface{}), resourceID) + if err != nil { + return err + } + + new, err = autoscalingTagsFromList(nraw.([]interface{}), resourceID) + if err != nil { + return err + } + + c, r, err = diffAutoscalingTags(old, new, resourceID) + if err != nil { + return err + } + + createTags = append(createTags, c...) + removeTags = append(removeTags, r...) + } + + // Set tags + if len(removeTags) > 0 { + log.Printf("[DEBUG] Removing autoscaling tags: %#v", removeTags) + + remove := autoscaling.DeleteTagsInput{ + Tags: removeTags, + } + + if _, err := conn.DeleteTags(&remove); err != nil { + return err + } + } + + if len(createTags) > 0 { + log.Printf("[DEBUG] Creating autoscaling tags: %#v", createTags) + + create := autoscaling.CreateOrUpdateTagsInput{ + Tags: createTags, + } + + if _, err := conn.CreateOrUpdateTags(&create); err != nil { + return err + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffAutoscalingTags(oldTags, newTags []*autoscaling.Tag, resourceID string) ([]*autoscaling.Tag, []*autoscaling.Tag, error) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + tag := map[string]interface{}{ + "key": *t.Key, + "value": *t.Value, + "propagate_at_launch": *t.PropagateAtLaunch, + } + create[*t.Key] = tag + } + + // Build the list of what to remove + var remove []*autoscaling.Tag + for _, t := range oldTags { + old, ok := create[*t.Key].(map[string]interface{}) + + if !ok || old["value"] != *t.Value || old["propagate_at_launch"] != *t.PropagateAtLaunch { + // Delete it! + remove = append(remove, t) + } + } + + createTags, err := autoscalingTagsFromMap(create, resourceID) + if err != nil { + return nil, nil, err + } + + return createTags, remove, nil +} + +func autoscalingTagsFromList(vs []interface{}, resourceID string) ([]*autoscaling.Tag, error) { + result := make([]*autoscaling.Tag, 0, len(vs)) + for _, tag := range vs { + attr, ok := tag.(map[string]interface{}) + if !ok { + continue + } + + t, err := autoscalingTagFromMap(attr, resourceID) + if err != nil { + return nil, err + } + + if t != nil { + result = append(result, t) + } + } + return result, nil +} + +// tagsFromMap returns the tags for the given map of data. +func autoscalingTagsFromMap(m map[string]interface{}, resourceID string) ([]*autoscaling.Tag, error) { + result := make([]*autoscaling.Tag, 0, len(m)) + for _, v := range m { + attr, ok := v.(map[string]interface{}) + if !ok { + continue + } + + t, err := autoscalingTagFromMap(attr, resourceID) + if err != nil { + return nil, err + } + + if t != nil { + result = append(result, t) + } + } + + return result, nil +} + +func autoscalingTagFromMap(attr map[string]interface{}, resourceID string) (*autoscaling.Tag, error) { + if _, ok := attr["key"]; !ok { + return nil, fmt.Errorf("%s: invalid tag attributes: key missing", resourceID) + } + + if _, ok := attr["value"]; !ok { + return nil, fmt.Errorf("%s: invalid tag attributes: value missing", resourceID) + } + + if _, ok := attr["propagate_at_launch"]; !ok { + return nil, fmt.Errorf("%s: invalid tag attributes: propagate_at_launch missing", resourceID) + } + + var propagateAtLaunch bool + var err error + + if v, ok := attr["propagate_at_launch"].(bool); ok { + propagateAtLaunch = v + } + + if v, ok := attr["propagate_at_launch"].(string); ok { + if propagateAtLaunch, err = strconv.ParseBool(v); err != nil { + return nil, fmt.Errorf( + "%s: invalid tag attribute: invalid value for propagate_at_launch: %s", + resourceID, + v, + ) + } + } + + t := &autoscaling.Tag{ + Key: aws.String(attr["key"].(string)), + Value: aws.String(attr["value"].(string)), + PropagateAtLaunch: aws.Bool(propagateAtLaunch), + ResourceId: aws.String(resourceID), + ResourceType: aws.String("auto-scaling-group"), + } + + if tagIgnoredAutoscaling(t) { + return nil, nil + } + + return t, nil +} + +// autoscalingTagsToMap turns the list of tags into a map. +func autoscalingTagsToMap(ts []*autoscaling.Tag) map[string]interface{} { + tags := make(map[string]interface{}) + for _, t := range ts { + tag := map[string]interface{}{ + "key": *t.Key, + "value": *t.Value, + "propagate_at_launch": *t.PropagateAtLaunch, + } + tags[*t.Key] = tag + } + + return tags +} + +// autoscalingTagDescriptionsToMap turns the list of tags into a map. +func autoscalingTagDescriptionsToMap(ts *[]*autoscaling.TagDescription) map[string]map[string]interface{} { + tags := make(map[string]map[string]interface{}) + for _, t := range *ts { + tag := map[string]interface{}{ + "key": *t.Key, + "value": *t.Value, + "propagate_at_launch": *t.PropagateAtLaunch, + } + tags[*t.Key] = tag + } + + return tags +} + +// autoscalingTagDescriptionsToSlice turns the list of tags into a slice. +func autoscalingTagDescriptionsToSlice(ts []*autoscaling.TagDescription) []map[string]interface{} { + tags := make([]map[string]interface{}, 0, len(ts)) + for _, t := range ts { + tags = append(tags, map[string]interface{}{ + "key": *t.Key, + "value": *t.Value, + "propagate_at_launch": *t.PropagateAtLaunch, + }) + } + + return tags +} + +func setToMapByKey(s *schema.Set, key string) map[string]interface{} { + result := make(map[string]interface{}) + for _, rawData := range s.List() { + data := rawData.(map[string]interface{}) + result[data[key].(string)] = data + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredAutoscaling(t *autoscaling.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + if r, _ := regexp.MatchString(v, *t.Key); r == true { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/awserr.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/awserr.go new file mode 100644 index 000000000..8fc056801 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/awserr.go @@ -0,0 +1,14 @@ +package aws + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +func isAWSErr(err error, code string, message string) bool { + if err, ok := err.(awserr.Error); ok { + return err.Code() == code && strings.Contains(err.Message(), message) + } + return false +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/cloudfront_distribution_configuration_structure.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/cloudfront_distribution_configuration_structure.go new file mode 100644 index 000000000..a47217647 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/cloudfront_distribution_configuration_structure.go @@ -0,0 +1,1140 @@ +// CloudFront DistributionConfig structure helpers. +// +// These functions assist in pulling in data from Terraform resource +// configuration for the aws_cloudfront_distribution resource, as there are +// several sub-fields that require their own data type, and do not necessarily +// 1-1 translate to resource configuration. + +package aws + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/terraform/flatmap" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +// cloudFrontRoute53ZoneID defines the route 53 zone ID for CloudFront. This +// is used to set the zone_id attribute. +const cloudFrontRoute53ZoneID = "Z2FDTNDATAQYW2" + +// Define Sort interface for []*string so we can ensure the order of +// geo_restrictions.locations +type StringPtrSlice []*string + +func (p StringPtrSlice) Len() int { return len(p) } +func (p StringPtrSlice) Less(i, j int) bool { return *p[i] < *p[j] } +func (p StringPtrSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// Assemble the *cloudfront.DistributionConfig variable. Calls out to various +// expander functions to convert attributes and sub-attributes to the various +// complex structures which are necessary to properly build the +// DistributionConfig structure. +// +// Used by the aws_cloudfront_distribution Create and Update functions. +func expandDistributionConfig(d *schema.ResourceData) *cloudfront.DistributionConfig { + distributionConfig := &cloudfront.DistributionConfig{ + CacheBehaviors: expandCacheBehaviors(d.Get("cache_behavior").(*schema.Set)), + CustomErrorResponses: expandCustomErrorResponses(d.Get("custom_error_response").(*schema.Set)), + DefaultCacheBehavior: expandDefaultCacheBehavior(d.Get("default_cache_behavior").(*schema.Set).List()[0].(map[string]interface{})), + Enabled: aws.Bool(d.Get("enabled").(bool)), + IsIPV6Enabled: aws.Bool(d.Get("is_ipv6_enabled").(bool)), + HttpVersion: aws.String(d.Get("http_version").(string)), + Origins: expandOrigins(d.Get("origin").(*schema.Set)), + PriceClass: aws.String(d.Get("price_class").(string)), + } + // This sets CallerReference if it's still pending computation (ie: new resource) + if v, ok := d.GetOk("caller_reference"); ok == false { + distributionConfig.CallerReference = aws.String(time.Now().Format(time.RFC3339Nano)) + } else { + distributionConfig.CallerReference = aws.String(v.(string)) + } + if v, ok := d.GetOk("comment"); ok { + distributionConfig.Comment = aws.String(v.(string)) + } else { + distributionConfig.Comment = aws.String("") + } + if v, ok := d.GetOk("default_root_object"); ok { + distributionConfig.DefaultRootObject = aws.String(v.(string)) + } else { + distributionConfig.DefaultRootObject = aws.String("") + } + if v, ok := d.GetOk("logging_config"); ok { + distributionConfig.Logging = expandLoggingConfig(v.(*schema.Set).List()[0].(map[string]interface{})) + } else { + distributionConfig.Logging = expandLoggingConfig(nil) + } + if v, ok := d.GetOk("aliases"); ok { + distributionConfig.Aliases = expandAliases(v.(*schema.Set)) + } else { + distributionConfig.Aliases = expandAliases(schema.NewSet(aliasesHash, []interface{}{})) + } + if v, ok := d.GetOk("restrictions"); ok { + distributionConfig.Restrictions = expandRestrictions(v.(*schema.Set).List()[0].(map[string]interface{})) + } + if v, ok := d.GetOk("viewer_certificate"); ok { + distributionConfig.ViewerCertificate = expandViewerCertificate(v.(*schema.Set).List()[0].(map[string]interface{})) + } + if v, ok := d.GetOk("web_acl_id"); ok { + distributionConfig.WebACLId = aws.String(v.(string)) + } else { + distributionConfig.WebACLId = aws.String("") + } + + return distributionConfig +} + +// Unpack the *cloudfront.DistributionConfig variable and set resource data. +// Calls out to flatten functions to convert the DistributionConfig +// sub-structures to their respective attributes in the +// aws_cloudfront_distribution resource. +// +// Used by the aws_cloudfront_distribution Read function. +func flattenDistributionConfig(d *schema.ResourceData, distributionConfig *cloudfront.DistributionConfig) error { + var err error + + d.Set("enabled", distributionConfig.Enabled) + d.Set("is_ipv6_enabled", distributionConfig.IsIPV6Enabled) + d.Set("price_class", distributionConfig.PriceClass) + d.Set("hosted_zone_id", cloudFrontRoute53ZoneID) + + err = d.Set("default_cache_behavior", flattenDefaultCacheBehavior(distributionConfig.DefaultCacheBehavior)) + if err != nil { + return err + } + err = d.Set("viewer_certificate", flattenViewerCertificate(distributionConfig.ViewerCertificate)) + if err != nil { + return err + } + + if distributionConfig.CallerReference != nil { + d.Set("caller_reference", distributionConfig.CallerReference) + } + if distributionConfig.Comment != nil { + if *distributionConfig.Comment != "" { + d.Set("comment", distributionConfig.Comment) + } + } + if distributionConfig.DefaultRootObject != nil { + d.Set("default_root_object", distributionConfig.DefaultRootObject) + } + if distributionConfig.HttpVersion != nil { + d.Set("http_version", distributionConfig.HttpVersion) + } + if distributionConfig.WebACLId != nil { + d.Set("web_acl_id", distributionConfig.WebACLId) + } + + if distributionConfig.CustomErrorResponses != nil { + err = d.Set("custom_error_response", flattenCustomErrorResponses(distributionConfig.CustomErrorResponses)) + if err != nil { + return err + } + } + if distributionConfig.CacheBehaviors != nil { + err = d.Set("cache_behavior", flattenCacheBehaviors(distributionConfig.CacheBehaviors)) + if err != nil { + return err + } + } + + if distributionConfig.Logging != nil && *distributionConfig.Logging.Enabled { + err = d.Set("logging_config", flattenLoggingConfig(distributionConfig.Logging)) + } else { + err = d.Set("logging_config", schema.NewSet(loggingConfigHash, []interface{}{})) + } + if err != nil { + return err + } + + if distributionConfig.Aliases != nil { + err = d.Set("aliases", flattenAliases(distributionConfig.Aliases)) + if err != nil { + return err + } + } + if distributionConfig.Restrictions != nil { + err = d.Set("restrictions", flattenRestrictions(distributionConfig.Restrictions)) + if err != nil { + return err + } + } + if *distributionConfig.Origins.Quantity > 0 { + err = d.Set("origin", flattenOrigins(distributionConfig.Origins)) + if err != nil { + return err + } + } + + return nil +} + +func expandDefaultCacheBehavior(m map[string]interface{}) *cloudfront.DefaultCacheBehavior { + cb := expandCacheBehavior(m) + var dcb cloudfront.DefaultCacheBehavior + + simpleCopyStruct(cb, &dcb) + return &dcb +} + +func flattenDefaultCacheBehavior(dcb *cloudfront.DefaultCacheBehavior) *schema.Set { + m := make(map[string]interface{}) + var cb cloudfront.CacheBehavior + + simpleCopyStruct(dcb, &cb) + m = flattenCacheBehavior(&cb) + return schema.NewSet(defaultCacheBehaviorHash, []interface{}{m}) +} + +// Assemble the hash for the aws_cloudfront_distribution default_cache_behavior +// TypeSet attribute. +func defaultCacheBehaviorHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%t-", m["compress"].(bool))) + buf.WriteString(fmt.Sprintf("%s-", m["viewer_protocol_policy"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["target_origin_id"].(string))) + buf.WriteString(fmt.Sprintf("%d-", forwardedValuesHash(m["forwarded_values"].(*schema.Set).List()[0].(map[string]interface{})))) + buf.WriteString(fmt.Sprintf("%d-", m["min_ttl"].(int))) + if d, ok := m["trusted_signers"]; ok { + for _, e := range sortInterfaceSlice(d.([]interface{})) { + buf.WriteString(fmt.Sprintf("%s-", e.(string))) + } + } + if d, ok := m["max_ttl"]; ok { + buf.WriteString(fmt.Sprintf("%d-", d.(int))) + } + if d, ok := m["smooth_streaming"]; ok { + buf.WriteString(fmt.Sprintf("%t-", d.(bool))) + } + if d, ok := m["default_ttl"]; ok { + buf.WriteString(fmt.Sprintf("%d-", d.(int))) + } + if d, ok := m["allowed_methods"]; ok { + for _, e := range sortInterfaceSlice(d.([]interface{})) { + buf.WriteString(fmt.Sprintf("%s-", e.(string))) + } + } + if d, ok := m["cached_methods"]; ok { + for _, e := range sortInterfaceSlice(d.([]interface{})) { + buf.WriteString(fmt.Sprintf("%s-", e.(string))) + } + } + if d, ok := m["lambda_function_association"]; ok { + var associations []interface{} + switch d.(type) { + case *schema.Set: + associations = d.(*schema.Set).List() + default: + associations = d.([]interface{}) + } + for _, lfa := range associations { + buf.WriteString(fmt.Sprintf("%d-", lambdaFunctionAssociationHash(lfa.(map[string]interface{})))) + } + } + return hashcode.String(buf.String()) +} + +func expandCacheBehaviors(s *schema.Set) *cloudfront.CacheBehaviors { + var qty int64 + var items []*cloudfront.CacheBehavior + for _, v := range s.List() { + items = append(items, expandCacheBehavior(v.(map[string]interface{}))) + qty++ + } + return &cloudfront.CacheBehaviors{ + Quantity: aws.Int64(qty), + Items: items, + } +} + +func flattenCacheBehaviors(cbs *cloudfront.CacheBehaviors) *schema.Set { + s := []interface{}{} + for _, v := range cbs.Items { + s = append(s, flattenCacheBehavior(v)) + } + return schema.NewSet(cacheBehaviorHash, s) +} + +func expandCacheBehavior(m map[string]interface{}) *cloudfront.CacheBehavior { + cb := &cloudfront.CacheBehavior{ + Compress: aws.Bool(m["compress"].(bool)), + ViewerProtocolPolicy: aws.String(m["viewer_protocol_policy"].(string)), + TargetOriginId: aws.String(m["target_origin_id"].(string)), + ForwardedValues: expandForwardedValues(m["forwarded_values"].(*schema.Set).List()[0].(map[string]interface{})), + MinTTL: aws.Int64(int64(m["min_ttl"].(int))), + MaxTTL: aws.Int64(int64(m["max_ttl"].(int))), + DefaultTTL: aws.Int64(int64(m["default_ttl"].(int))), + } + if v, ok := m["trusted_signers"]; ok { + cb.TrustedSigners = expandTrustedSigners(v.([]interface{})) + } else { + cb.TrustedSigners = expandTrustedSigners([]interface{}{}) + } + + if v, ok := m["lambda_function_association"]; ok { + cb.LambdaFunctionAssociations = expandLambdaFunctionAssociations(v.(*schema.Set).List()) + } + + if v, ok := m["smooth_streaming"]; ok { + cb.SmoothStreaming = aws.Bool(v.(bool)) + } + if v, ok := m["allowed_methods"]; ok { + cb.AllowedMethods = expandAllowedMethods(v.([]interface{})) + } + if v, ok := m["cached_methods"]; ok { + cb.AllowedMethods.CachedMethods = expandCachedMethods(v.([]interface{})) + } + if v, ok := m["path_pattern"]; ok { + cb.PathPattern = aws.String(v.(string)) + } + return cb +} + +func flattenCacheBehavior(cb *cloudfront.CacheBehavior) map[string]interface{} { + m := make(map[string]interface{}) + + m["compress"] = *cb.Compress + m["viewer_protocol_policy"] = *cb.ViewerProtocolPolicy + m["target_origin_id"] = *cb.TargetOriginId + m["forwarded_values"] = schema.NewSet(forwardedValuesHash, []interface{}{flattenForwardedValues(cb.ForwardedValues)}) + m["min_ttl"] = int(*cb.MinTTL) + + if len(cb.TrustedSigners.Items) > 0 { + m["trusted_signers"] = flattenTrustedSigners(cb.TrustedSigners) + } + if len(cb.LambdaFunctionAssociations.Items) > 0 { + m["lambda_function_association"] = flattenLambdaFunctionAssociations(cb.LambdaFunctionAssociations) + } + if cb.MaxTTL != nil { + m["max_ttl"] = int(*cb.MaxTTL) + } + if cb.SmoothStreaming != nil { + m["smooth_streaming"] = *cb.SmoothStreaming + } + if cb.DefaultTTL != nil { + m["default_ttl"] = int(*cb.DefaultTTL) + } + if cb.AllowedMethods != nil { + m["allowed_methods"] = flattenAllowedMethods(cb.AllowedMethods) + } + if cb.AllowedMethods.CachedMethods != nil { + m["cached_methods"] = flattenCachedMethods(cb.AllowedMethods.CachedMethods) + } + if cb.PathPattern != nil { + m["path_pattern"] = *cb.PathPattern + } + return m +} + +// Assemble the hash for the aws_cloudfront_distribution cache_behavior +// TypeSet attribute. +func cacheBehaviorHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%t-", m["compress"].(bool))) + buf.WriteString(fmt.Sprintf("%s-", m["viewer_protocol_policy"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["target_origin_id"].(string))) + buf.WriteString(fmt.Sprintf("%d-", forwardedValuesHash(m["forwarded_values"].(*schema.Set).List()[0].(map[string]interface{})))) + buf.WriteString(fmt.Sprintf("%d-", m["min_ttl"].(int))) + if d, ok := m["trusted_signers"]; ok { + for _, e := range sortInterfaceSlice(d.([]interface{})) { + buf.WriteString(fmt.Sprintf("%s-", e.(string))) + } + } + if d, ok := m["max_ttl"]; ok { + buf.WriteString(fmt.Sprintf("%d-", d.(int))) + } + if d, ok := m["smooth_streaming"]; ok { + buf.WriteString(fmt.Sprintf("%t-", d.(bool))) + } + if d, ok := m["default_ttl"]; ok { + buf.WriteString(fmt.Sprintf("%d-", d.(int))) + } + if d, ok := m["allowed_methods"]; ok { + for _, e := range sortInterfaceSlice(d.([]interface{})) { + buf.WriteString(fmt.Sprintf("%s-", e.(string))) + } + } + if d, ok := m["cached_methods"]; ok { + for _, e := range sortInterfaceSlice(d.([]interface{})) { + buf.WriteString(fmt.Sprintf("%s-", e.(string))) + } + } + if d, ok := m["path_pattern"]; ok { + buf.WriteString(fmt.Sprintf("%s-", d)) + } + if d, ok := m["lambda_function_association"]; ok { + var associations []interface{} + switch d.(type) { + case *schema.Set: + associations = d.(*schema.Set).List() + default: + associations = d.([]interface{}) + } + for _, lfa := range associations { + buf.WriteString(fmt.Sprintf("%d-", lambdaFunctionAssociationHash(lfa.(map[string]interface{})))) + } + } + return hashcode.String(buf.String()) +} + +func expandTrustedSigners(s []interface{}) *cloudfront.TrustedSigners { + var ts cloudfront.TrustedSigners + if len(s) > 0 { + ts.Quantity = aws.Int64(int64(len(s))) + ts.Items = expandStringList(s) + ts.Enabled = aws.Bool(true) + } else { + ts.Quantity = aws.Int64(0) + ts.Enabled = aws.Bool(false) + } + return &ts +} + +func flattenTrustedSigners(ts *cloudfront.TrustedSigners) []interface{} { + if ts.Items != nil { + return flattenStringList(ts.Items) + } + return []interface{}{} +} + +func lambdaFunctionAssociationHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["event_type"].(string))) + buf.WriteString(fmt.Sprintf("%s", m["lambda_arn"].(string))) + return hashcode.String(buf.String()) +} + +func expandLambdaFunctionAssociations(v interface{}) *cloudfront.LambdaFunctionAssociations { + if v == nil { + return &cloudfront.LambdaFunctionAssociations{ + Quantity: aws.Int64(0), + } + } + + s := v.([]interface{}) + var lfa cloudfront.LambdaFunctionAssociations + lfa.Quantity = aws.Int64(int64(len(s))) + lfa.Items = make([]*cloudfront.LambdaFunctionAssociation, len(s)) + for i, lf := range s { + lfa.Items[i] = expandLambdaFunctionAssociation(lf.(map[string]interface{})) + } + return &lfa +} + +func expandLambdaFunctionAssociation(lf map[string]interface{}) *cloudfront.LambdaFunctionAssociation { + var lfa cloudfront.LambdaFunctionAssociation + if v, ok := lf["event_type"]; ok { + lfa.EventType = aws.String(v.(string)) + } + if v, ok := lf["lambda_arn"]; ok { + lfa.LambdaFunctionARN = aws.String(v.(string)) + } + return &lfa +} + +func flattenLambdaFunctionAssociations(lfa *cloudfront.LambdaFunctionAssociations) *schema.Set { + s := schema.NewSet(lambdaFunctionAssociationHash, []interface{}{}) + for _, v := range lfa.Items { + s.Add(flattenLambdaFunctionAssociation(v)) + } + return s +} + +func flattenLambdaFunctionAssociation(lfa *cloudfront.LambdaFunctionAssociation) map[string]interface{} { + m := map[string]interface{}{} + if lfa != nil { + m["event_type"] = *lfa.EventType + m["lambda_arn"] = *lfa.LambdaFunctionARN + } + return m +} + +func expandForwardedValues(m map[string]interface{}) *cloudfront.ForwardedValues { + fv := &cloudfront.ForwardedValues{ + QueryString: aws.Bool(m["query_string"].(bool)), + } + if v, ok := m["cookies"]; ok && v.(*schema.Set).Len() > 0 { + fv.Cookies = expandCookiePreference(v.(*schema.Set).List()[0].(map[string]interface{})) + } + if v, ok := m["headers"]; ok { + fv.Headers = expandHeaders(v.([]interface{})) + } + if v, ok := m["query_string_cache_keys"]; ok { + fv.QueryStringCacheKeys = expandQueryStringCacheKeys(v.([]interface{})) + } + return fv +} + +func flattenForwardedValues(fv *cloudfront.ForwardedValues) map[string]interface{} { + m := make(map[string]interface{}) + m["query_string"] = *fv.QueryString + if fv.Cookies != nil { + m["cookies"] = schema.NewSet(cookiePreferenceHash, []interface{}{flattenCookiePreference(fv.Cookies)}) + } + if fv.Headers != nil { + m["headers"] = flattenHeaders(fv.Headers) + } + if fv.QueryStringCacheKeys != nil { + m["query_string_cache_keys"] = flattenQueryStringCacheKeys(fv.QueryStringCacheKeys) + } + return m +} + +// Assemble the hash for the aws_cloudfront_distribution forwarded_values +// TypeSet attribute. +func forwardedValuesHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%t-", m["query_string"].(bool))) + if d, ok := m["cookies"]; ok && d.(*schema.Set).Len() > 0 { + buf.WriteString(fmt.Sprintf("%d-", cookiePreferenceHash(d.(*schema.Set).List()[0].(map[string]interface{})))) + } + if d, ok := m["headers"]; ok { + for _, e := range sortInterfaceSlice(d.([]interface{})) { + buf.WriteString(fmt.Sprintf("%s-", e.(string))) + } + } + if d, ok := m["query_string_cache_keys"]; ok { + for _, e := range sortInterfaceSlice(d.([]interface{})) { + buf.WriteString(fmt.Sprintf("%s-", e.(string))) + } + } + return hashcode.String(buf.String()) +} + +func expandHeaders(d []interface{}) *cloudfront.Headers { + return &cloudfront.Headers{ + Quantity: aws.Int64(int64(len(d))), + Items: expandStringList(d), + } +} + +func flattenHeaders(h *cloudfront.Headers) []interface{} { + if h.Items != nil { + return flattenStringList(h.Items) + } + return []interface{}{} +} + +func expandQueryStringCacheKeys(d []interface{}) *cloudfront.QueryStringCacheKeys { + return &cloudfront.QueryStringCacheKeys{ + Quantity: aws.Int64(int64(len(d))), + Items: expandStringList(d), + } +} + +func flattenQueryStringCacheKeys(k *cloudfront.QueryStringCacheKeys) []interface{} { + if k.Items != nil { + return flattenStringList(k.Items) + } + return []interface{}{} +} + +func expandCookiePreference(m map[string]interface{}) *cloudfront.CookiePreference { + cp := &cloudfront.CookiePreference{ + Forward: aws.String(m["forward"].(string)), + } + if v, ok := m["whitelisted_names"]; ok { + cp.WhitelistedNames = expandCookieNames(v.([]interface{})) + } + return cp +} + +func flattenCookiePreference(cp *cloudfront.CookiePreference) map[string]interface{} { + m := make(map[string]interface{}) + m["forward"] = *cp.Forward + if cp.WhitelistedNames != nil { + m["whitelisted_names"] = flattenCookieNames(cp.WhitelistedNames) + } + return m +} + +// Assemble the hash for the aws_cloudfront_distribution cookies +// TypeSet attribute. +func cookiePreferenceHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["forward"].(string))) + if d, ok := m["whitelisted_names"]; ok { + for _, e := range sortInterfaceSlice(d.([]interface{})) { + buf.WriteString(fmt.Sprintf("%s-", e.(string))) + } + } + return hashcode.String(buf.String()) +} + +func expandCookieNames(d []interface{}) *cloudfront.CookieNames { + return &cloudfront.CookieNames{ + Quantity: aws.Int64(int64(len(d))), + Items: expandStringList(d), + } +} + +func flattenCookieNames(cn *cloudfront.CookieNames) []interface{} { + if cn.Items != nil { + return flattenStringList(cn.Items) + } + return []interface{}{} +} + +func expandAllowedMethods(s []interface{}) *cloudfront.AllowedMethods { + return &cloudfront.AllowedMethods{ + Quantity: aws.Int64(int64(len(s))), + Items: expandStringList(s), + } +} + +func flattenAllowedMethods(am *cloudfront.AllowedMethods) []interface{} { + if am.Items != nil { + return flattenStringList(am.Items) + } + return []interface{}{} +} + +func expandCachedMethods(s []interface{}) *cloudfront.CachedMethods { + return &cloudfront.CachedMethods{ + Quantity: aws.Int64(int64(len(s))), + Items: expandStringList(s), + } +} + +func flattenCachedMethods(cm *cloudfront.CachedMethods) []interface{} { + if cm.Items != nil { + return flattenStringList(cm.Items) + } + return []interface{}{} +} + +func expandOrigins(s *schema.Set) *cloudfront.Origins { + qty := 0 + items := []*cloudfront.Origin{} + for _, v := range s.List() { + items = append(items, expandOrigin(v.(map[string]interface{}))) + qty++ + } + return &cloudfront.Origins{ + Quantity: aws.Int64(int64(qty)), + Items: items, + } +} + +func flattenOrigins(ors *cloudfront.Origins) *schema.Set { + s := []interface{}{} + for _, v := range ors.Items { + s = append(s, flattenOrigin(v)) + } + return schema.NewSet(originHash, s) +} + +func expandOrigin(m map[string]interface{}) *cloudfront.Origin { + origin := &cloudfront.Origin{ + Id: aws.String(m["origin_id"].(string)), + DomainName: aws.String(m["domain_name"].(string)), + } + if v, ok := m["custom_header"]; ok { + origin.CustomHeaders = expandCustomHeaders(v.(*schema.Set)) + } + if v, ok := m["custom_origin_config"]; ok { + if s := v.(*schema.Set).List(); len(s) > 0 { + origin.CustomOriginConfig = expandCustomOriginConfig(s[0].(map[string]interface{})) + } + } + if v, ok := m["origin_path"]; ok { + origin.OriginPath = aws.String(v.(string)) + } + if v, ok := m["s3_origin_config"]; ok { + if s := v.(*schema.Set).List(); len(s) > 0 { + origin.S3OriginConfig = expandS3OriginConfig(s[0].(map[string]interface{})) + } + } + + // if both custom and s3 origin are missing, add an empty s3 origin + // One or the other must be specified, but the S3 origin can be "empty" + if origin.S3OriginConfig == nil && origin.CustomOriginConfig == nil { + origin.S3OriginConfig = &cloudfront.S3OriginConfig{ + OriginAccessIdentity: aws.String(""), + } + } + + return origin +} + +func flattenOrigin(or *cloudfront.Origin) map[string]interface{} { + m := make(map[string]interface{}) + m["origin_id"] = *or.Id + m["domain_name"] = *or.DomainName + if or.CustomHeaders != nil { + m["custom_header"] = flattenCustomHeaders(or.CustomHeaders) + } + if or.CustomOriginConfig != nil { + m["custom_origin_config"] = schema.NewSet(customOriginConfigHash, []interface{}{flattenCustomOriginConfig(or.CustomOriginConfig)}) + } + if or.OriginPath != nil { + m["origin_path"] = *or.OriginPath + } + if or.S3OriginConfig != nil { + if or.S3OriginConfig.OriginAccessIdentity != nil && *or.S3OriginConfig.OriginAccessIdentity != "" { + m["s3_origin_config"] = schema.NewSet(s3OriginConfigHash, []interface{}{flattenS3OriginConfig(or.S3OriginConfig)}) + } + } + return m +} + +// Assemble the hash for the aws_cloudfront_distribution origin +// TypeSet attribute. +func originHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["origin_id"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["domain_name"].(string))) + if v, ok := m["custom_header"]; ok { + buf.WriteString(fmt.Sprintf("%d-", customHeadersHash(v.(*schema.Set)))) + } + if v, ok := m["custom_origin_config"]; ok { + if s := v.(*schema.Set).List(); len(s) > 0 { + buf.WriteString(fmt.Sprintf("%d-", customOriginConfigHash((s[0].(map[string]interface{}))))) + } + } + if v, ok := m["origin_path"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["s3_origin_config"]; ok { + if s := v.(*schema.Set).List(); len(s) > 0 { + buf.WriteString(fmt.Sprintf("%d-", s3OriginConfigHash((s[0].(map[string]interface{}))))) + } + } + return hashcode.String(buf.String()) +} + +func expandCustomHeaders(s *schema.Set) *cloudfront.CustomHeaders { + qty := 0 + items := []*cloudfront.OriginCustomHeader{} + for _, v := range s.List() { + items = append(items, expandOriginCustomHeader(v.(map[string]interface{}))) + qty++ + } + return &cloudfront.CustomHeaders{ + Quantity: aws.Int64(int64(qty)), + Items: items, + } +} + +func flattenCustomHeaders(chs *cloudfront.CustomHeaders) *schema.Set { + s := []interface{}{} + for _, v := range chs.Items { + s = append(s, flattenOriginCustomHeader(v)) + } + return schema.NewSet(originCustomHeaderHash, s) +} + +func expandOriginCustomHeader(m map[string]interface{}) *cloudfront.OriginCustomHeader { + return &cloudfront.OriginCustomHeader{ + HeaderName: aws.String(m["name"].(string)), + HeaderValue: aws.String(m["value"].(string)), + } +} + +func flattenOriginCustomHeader(och *cloudfront.OriginCustomHeader) map[string]interface{} { + return map[string]interface{}{ + "name": *och.HeaderName, + "value": *och.HeaderValue, + } +} + +// Helper function used by originHash to get a composite hash for all +// aws_cloudfront_distribution custom_header attributes. +func customHeadersHash(s *schema.Set) int { + var buf bytes.Buffer + for _, v := range s.List() { + buf.WriteString(fmt.Sprintf("%d-", originCustomHeaderHash(v))) + } + return hashcode.String(buf.String()) +} + +// Assemble the hash for the aws_cloudfront_distribution custom_header +// TypeSet attribute. +func originCustomHeaderHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["value"].(string))) + return hashcode.String(buf.String()) +} + +func expandCustomOriginConfig(m map[string]interface{}) *cloudfront.CustomOriginConfig { + + customOrigin := &cloudfront.CustomOriginConfig{ + OriginProtocolPolicy: aws.String(m["origin_protocol_policy"].(string)), + HTTPPort: aws.Int64(int64(m["http_port"].(int))), + HTTPSPort: aws.Int64(int64(m["https_port"].(int))), + OriginSslProtocols: expandCustomOriginConfigSSL(m["origin_ssl_protocols"].([]interface{})), + OriginReadTimeout: aws.Int64(int64(m["origin_read_timeout"].(int))), + OriginKeepaliveTimeout: aws.Int64(int64(m["origin_keepalive_timeout"].(int))), + } + + return customOrigin +} + +func flattenCustomOriginConfig(cor *cloudfront.CustomOriginConfig) map[string]interface{} { + + customOrigin := map[string]interface{}{ + "origin_protocol_policy": *cor.OriginProtocolPolicy, + "http_port": int(*cor.HTTPPort), + "https_port": int(*cor.HTTPSPort), + "origin_ssl_protocols": flattenCustomOriginConfigSSL(cor.OriginSslProtocols), + "origin_read_timeout": int(*cor.OriginReadTimeout), + "origin_keepalive_timeout": int(*cor.OriginKeepaliveTimeout), + } + + return customOrigin +} + +// Assemble the hash for the aws_cloudfront_distribution custom_origin_config +// TypeSet attribute. +func customOriginConfigHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["origin_protocol_policy"].(string))) + buf.WriteString(fmt.Sprintf("%d-", m["http_port"].(int))) + buf.WriteString(fmt.Sprintf("%d-", m["https_port"].(int))) + for _, v := range sortInterfaceSlice(m["origin_ssl_protocols"].([]interface{})) { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + buf.WriteString(fmt.Sprintf("%d-", m["origin_keepalive_timeout"].(int))) + buf.WriteString(fmt.Sprintf("%d-", m["origin_read_timeout"].(int))) + + return hashcode.String(buf.String()) +} + +func expandCustomOriginConfigSSL(s []interface{}) *cloudfront.OriginSslProtocols { + items := expandStringList(s) + return &cloudfront.OriginSslProtocols{ + Quantity: aws.Int64(int64(len(items))), + Items: items, + } +} + +func flattenCustomOriginConfigSSL(osp *cloudfront.OriginSslProtocols) []interface{} { + return flattenStringList(osp.Items) +} + +func expandS3OriginConfig(m map[string]interface{}) *cloudfront.S3OriginConfig { + return &cloudfront.S3OriginConfig{ + OriginAccessIdentity: aws.String(m["origin_access_identity"].(string)), + } +} + +func flattenS3OriginConfig(s3o *cloudfront.S3OriginConfig) map[string]interface{} { + return map[string]interface{}{ + "origin_access_identity": *s3o.OriginAccessIdentity, + } +} + +// Assemble the hash for the aws_cloudfront_distribution s3_origin_config +// TypeSet attribute. +func s3OriginConfigHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["origin_access_identity"].(string))) + return hashcode.String(buf.String()) +} + +func expandCustomErrorResponses(s *schema.Set) *cloudfront.CustomErrorResponses { + qty := 0 + items := []*cloudfront.CustomErrorResponse{} + for _, v := range s.List() { + items = append(items, expandCustomErrorResponse(v.(map[string]interface{}))) + qty++ + } + return &cloudfront.CustomErrorResponses{ + Quantity: aws.Int64(int64(qty)), + Items: items, + } +} + +func flattenCustomErrorResponses(ers *cloudfront.CustomErrorResponses) *schema.Set { + s := []interface{}{} + for _, v := range ers.Items { + s = append(s, flattenCustomErrorResponse(v)) + } + return schema.NewSet(customErrorResponseHash, s) +} + +func expandCustomErrorResponse(m map[string]interface{}) *cloudfront.CustomErrorResponse { + er := cloudfront.CustomErrorResponse{ + ErrorCode: aws.Int64(int64(m["error_code"].(int))), + } + if v, ok := m["error_caching_min_ttl"]; ok { + er.ErrorCachingMinTTL = aws.Int64(int64(v.(int))) + } + if v, ok := m["response_code"]; ok && v.(int) != 0 { + er.ResponseCode = aws.String(strconv.Itoa(v.(int))) + } else { + er.ResponseCode = aws.String("") + } + if v, ok := m["response_page_path"]; ok { + er.ResponsePagePath = aws.String(v.(string)) + } + + return &er +} + +func flattenCustomErrorResponse(er *cloudfront.CustomErrorResponse) map[string]interface{} { + m := make(map[string]interface{}) + m["error_code"] = int(*er.ErrorCode) + if er.ErrorCachingMinTTL != nil { + m["error_caching_min_ttl"] = int(*er.ErrorCachingMinTTL) + } + if er.ResponseCode != nil { + m["response_code"], _ = strconv.Atoi(*er.ResponseCode) + } + if er.ResponsePagePath != nil { + m["response_page_path"] = *er.ResponsePagePath + } + return m +} + +// Assemble the hash for the aws_cloudfront_distribution custom_error_response +// TypeSet attribute. +func customErrorResponseHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%d-", m["error_code"].(int))) + if v, ok := m["error_caching_min_ttl"]; ok { + buf.WriteString(fmt.Sprintf("%d-", v.(int))) + } + if v, ok := m["response_code"]; ok { + buf.WriteString(fmt.Sprintf("%d-", v.(int))) + } + if v, ok := m["response_page_path"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + return hashcode.String(buf.String()) +} + +func expandLoggingConfig(m map[string]interface{}) *cloudfront.LoggingConfig { + var lc cloudfront.LoggingConfig + if m != nil { + lc.Prefix = aws.String(m["prefix"].(string)) + lc.Bucket = aws.String(m["bucket"].(string)) + lc.IncludeCookies = aws.Bool(m["include_cookies"].(bool)) + lc.Enabled = aws.Bool(true) + } else { + lc.Prefix = aws.String("") + lc.Bucket = aws.String("") + lc.IncludeCookies = aws.Bool(false) + lc.Enabled = aws.Bool(false) + } + return &lc +} + +func flattenLoggingConfig(lc *cloudfront.LoggingConfig) *schema.Set { + m := make(map[string]interface{}) + m["prefix"] = *lc.Prefix + m["bucket"] = *lc.Bucket + m["include_cookies"] = *lc.IncludeCookies + return schema.NewSet(loggingConfigHash, []interface{}{m}) +} + +// Assemble the hash for the aws_cloudfront_distribution logging_config +// TypeSet attribute. +func loggingConfigHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["prefix"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["bucket"].(string))) + buf.WriteString(fmt.Sprintf("%t-", m["include_cookies"].(bool))) + return hashcode.String(buf.String()) +} + +func expandAliases(as *schema.Set) *cloudfront.Aliases { + s := as.List() + var aliases cloudfront.Aliases + if len(s) > 0 { + aliases.Quantity = aws.Int64(int64(len(s))) + aliases.Items = expandStringList(s) + } else { + aliases.Quantity = aws.Int64(0) + } + return &aliases +} + +func flattenAliases(aliases *cloudfront.Aliases) *schema.Set { + if aliases.Items != nil { + return schema.NewSet(aliasesHash, flattenStringList(aliases.Items)) + } + return schema.NewSet(aliasesHash, []interface{}{}) +} + +// Assemble the hash for the aws_cloudfront_distribution aliases +// TypeSet attribute. +func aliasesHash(v interface{}) int { + return hashcode.String(v.(string)) +} + +func expandRestrictions(m map[string]interface{}) *cloudfront.Restrictions { + return &cloudfront.Restrictions{ + GeoRestriction: expandGeoRestriction(m["geo_restriction"].(*schema.Set).List()[0].(map[string]interface{})), + } +} + +func flattenRestrictions(r *cloudfront.Restrictions) *schema.Set { + m := make(map[string]interface{}) + s := schema.NewSet(geoRestrictionHash, []interface{}{flattenGeoRestriction(r.GeoRestriction)}) + m["geo_restriction"] = s + return schema.NewSet(restrictionsHash, []interface{}{m}) +} + +// Assemble the hash for the aws_cloudfront_distribution restrictions +// TypeSet attribute. +func restrictionsHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%d-", geoRestrictionHash(m["geo_restriction"].(*schema.Set).List()[0].(map[string]interface{})))) + return hashcode.String(buf.String()) +} + +func expandGeoRestriction(m map[string]interface{}) *cloudfront.GeoRestriction { + gr := cloudfront.GeoRestriction{ + RestrictionType: aws.String(m["restriction_type"].(string)), + } + if v, ok := m["locations"]; ok { + gr.Quantity = aws.Int64(int64(len(v.([]interface{})))) + gr.Items = expandStringList(v.([]interface{})) + sort.Sort(StringPtrSlice(gr.Items)) + } else { + gr.Quantity = aws.Int64(0) + } + return &gr +} + +func flattenGeoRestriction(gr *cloudfront.GeoRestriction) map[string]interface{} { + m := make(map[string]interface{}) + + m["restriction_type"] = *gr.RestrictionType + if gr.Items != nil { + sort.Sort(StringPtrSlice(gr.Items)) + m["locations"] = flattenStringList(gr.Items) + } + return m +} + +// Assemble the hash for the aws_cloudfront_distribution geo_restriction +// TypeSet attribute. +func geoRestrictionHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + // All keys added in alphabetical order. + buf.WriteString(fmt.Sprintf("%s-", m["restriction_type"].(string))) + if v, ok := m["locations"]; ok { + for _, w := range sortInterfaceSlice(v.([]interface{})) { + buf.WriteString(fmt.Sprintf("%s-", w.(string))) + } + } + return hashcode.String(buf.String()) +} + +func expandViewerCertificate(m map[string]interface{}) *cloudfront.ViewerCertificate { + var vc cloudfront.ViewerCertificate + if v, ok := m["iam_certificate_id"]; ok && v != "" { + vc.IAMCertificateId = aws.String(v.(string)) + vc.SSLSupportMethod = aws.String(m["ssl_support_method"].(string)) + } else if v, ok := m["acm_certificate_arn"]; ok && v != "" { + vc.ACMCertificateArn = aws.String(v.(string)) + vc.SSLSupportMethod = aws.String(m["ssl_support_method"].(string)) + } else { + vc.CloudFrontDefaultCertificate = aws.Bool(m["cloudfront_default_certificate"].(bool)) + } + if v, ok := m["minimum_protocol_version"]; ok && v != "" { + vc.MinimumProtocolVersion = aws.String(v.(string)) + } + return &vc +} + +func flattenViewerCertificate(vc *cloudfront.ViewerCertificate) *schema.Set { + m := make(map[string]interface{}) + + if vc.IAMCertificateId != nil { + m["iam_certificate_id"] = *vc.IAMCertificateId + m["ssl_support_method"] = *vc.SSLSupportMethod + } + if vc.ACMCertificateArn != nil { + m["acm_certificate_arn"] = *vc.ACMCertificateArn + m["ssl_support_method"] = *vc.SSLSupportMethod + } + if vc.CloudFrontDefaultCertificate != nil { + m["cloudfront_default_certificate"] = *vc.CloudFrontDefaultCertificate + } + if vc.MinimumProtocolVersion != nil { + m["minimum_protocol_version"] = *vc.MinimumProtocolVersion + } + return schema.NewSet(viewerCertificateHash, []interface{}{m}) +} + +// Assemble the hash for the aws_cloudfront_distribution viewer_certificate +// TypeSet attribute. +func viewerCertificateHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + if v, ok := m["iam_certificate_id"]; ok && v.(string) != "" { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + buf.WriteString(fmt.Sprintf("%s-", m["ssl_support_method"].(string))) + } else if v, ok := m["acm_certificate_arn"]; ok && v.(string) != "" { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + buf.WriteString(fmt.Sprintf("%s-", m["ssl_support_method"].(string))) + } else { + buf.WriteString(fmt.Sprintf("%t-", m["cloudfront_default_certificate"].(bool))) + } + if v, ok := m["minimum_protocol_version"]; ok && v.(string) != "" { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + return hashcode.String(buf.String()) +} + +// Do a top-level copy of struct fields from one struct to another. Used to +// copy fields between CacheBehavior and DefaultCacheBehavior structs. +func simpleCopyStruct(src, dst interface{}) { + s := reflect.ValueOf(src).Elem() + d := reflect.ValueOf(dst).Elem() + + for i := 0; i < s.NumField(); i++ { + if s.Field(i).CanSet() == true { + if s.Field(i).Interface() != nil { + for j := 0; j < d.NumField(); j++ { + if d.Type().Field(j).Name == s.Type().Field(i).Name { + d.Field(j).Set(s.Field(i)) + } + } + } + } + } +} + +// Convert *cloudfront.ActiveTrustedSigners to a flatmap.Map type, which ensures +// it can probably be inserted into the schema.TypeMap type used by the +// active_trusted_signers attribute. +func flattenActiveTrustedSigners(ats *cloudfront.ActiveTrustedSigners) flatmap.Map { + m := make(map[string]interface{}) + s := []interface{}{} + m["enabled"] = *ats.Enabled + + for _, v := range ats.Items { + signer := make(map[string]interface{}) + signer["aws_account_number"] = *v.AwsAccountNumber + signer["key_pair_ids"] = aws.StringValueSlice(v.KeyPairIds.Items) + s = append(s, signer) + } + m["items"] = s + return flatmap.Flatten(m) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/config.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/config.go new file mode 100644 index 000000000..dd1149b91 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/config.go @@ -0,0 +1,510 @@ +package aws + +import ( + "crypto/tls" + "errors" + "fmt" + "log" + "net/http" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/acm" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/aws/aws-sdk-go/service/applicationautoscaling" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go/service/cloudformation" + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/aws/aws-sdk-go/service/cloudtrail" + "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go/service/cloudwatchevents" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go/service/codebuild" + "github.com/aws/aws-sdk-go/service/codecommit" + "github.com/aws/aws-sdk-go/service/codedeploy" + "github.com/aws/aws-sdk-go/service/codepipeline" + "github.com/aws/aws-sdk-go/service/cognitoidentity" + "github.com/aws/aws-sdk-go/service/configservice" + "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/aws/aws-sdk-go/service/devicefarm" + "github.com/aws/aws-sdk-go/service/directoryservice" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ecr" + "github.com/aws/aws-sdk-go/service/ecs" + "github.com/aws/aws-sdk-go/service/efs" + "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go/service/elasticbeanstalk" + elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" + "github.com/aws/aws-sdk-go/service/elastictranscoder" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/aws/aws-sdk-go/service/emr" + "github.com/aws/aws-sdk-go/service/firehose" + "github.com/aws/aws-sdk-go/service/glacier" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/inspector" + "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/aws/aws-sdk-go/service/kms" + "github.com/aws/aws-sdk-go/service/lambda" + "github.com/aws/aws-sdk-go/service/lightsail" + "github.com/aws/aws-sdk-go/service/opsworks" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go/service/redshift" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/aws/aws-sdk-go/service/sfn" + "github.com/aws/aws-sdk-go/service/simpledb" + "github.com/aws/aws-sdk-go/service/sns" + "github.com/aws/aws-sdk-go/service/sqs" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/aws/aws-sdk-go/service/wafregional" + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform/helper/logging" + "github.com/hashicorp/terraform/terraform" +) + +type Config struct { + AccessKey string + SecretKey string + CredsFilename string + Profile string + Token string + Region string + MaxRetries int + + AssumeRoleARN string + AssumeRoleExternalID string + AssumeRoleSessionName string + AssumeRolePolicy string + + AllowedAccountIds []interface{} + ForbiddenAccountIds []interface{} + + CloudFormationEndpoint string + CloudWatchEndpoint string + CloudWatchEventsEndpoint string + CloudWatchLogsEndpoint string + DynamoDBEndpoint string + DeviceFarmEndpoint string + Ec2Endpoint string + ElbEndpoint string + IamEndpoint string + KinesisEndpoint string + KmsEndpoint string + RdsEndpoint string + S3Endpoint string + SnsEndpoint string + SqsEndpoint string + Insecure bool + + SkipCredsValidation bool + SkipGetEC2Platforms bool + SkipRegionValidation bool + SkipRequestingAccountId bool + SkipMetadataApiCheck bool + S3ForcePathStyle bool +} + +type AWSClient struct { + cfconn *cloudformation.CloudFormation + cloudfrontconn *cloudfront.CloudFront + cloudtrailconn *cloudtrail.CloudTrail + cloudwatchconn *cloudwatch.CloudWatch + cloudwatchlogsconn *cloudwatchlogs.CloudWatchLogs + cloudwatcheventsconn *cloudwatchevents.CloudWatchEvents + cognitoconn *cognitoidentity.CognitoIdentity + configconn *configservice.ConfigService + devicefarmconn *devicefarm.DeviceFarm + dmsconn *databasemigrationservice.DatabaseMigrationService + dsconn *directoryservice.DirectoryService + dynamodbconn *dynamodb.DynamoDB + ec2conn *ec2.EC2 + ecrconn *ecr.ECR + ecsconn *ecs.ECS + efsconn *efs.EFS + elbconn *elb.ELB + elbv2conn *elbv2.ELBV2 + emrconn *emr.EMR + esconn *elasticsearch.ElasticsearchService + acmconn *acm.ACM + apigateway *apigateway.APIGateway + appautoscalingconn *applicationautoscaling.ApplicationAutoScaling + autoscalingconn *autoscaling.AutoScaling + s3conn *s3.S3 + sesConn *ses.SES + simpledbconn *simpledb.SimpleDB + sqsconn *sqs.SQS + snsconn *sns.SNS + stsconn *sts.STS + redshiftconn *redshift.Redshift + r53conn *route53.Route53 + partition string + accountid string + supportedplatforms []string + region string + rdsconn *rds.RDS + iamconn *iam.IAM + kinesisconn *kinesis.Kinesis + kmsconn *kms.KMS + firehoseconn *firehose.Firehose + inspectorconn *inspector.Inspector + elasticacheconn *elasticache.ElastiCache + elasticbeanstalkconn *elasticbeanstalk.ElasticBeanstalk + elastictranscoderconn *elastictranscoder.ElasticTranscoder + lambdaconn *lambda.Lambda + lightsailconn *lightsail.Lightsail + opsworksconn *opsworks.OpsWorks + glacierconn *glacier.Glacier + codebuildconn *codebuild.CodeBuild + codedeployconn *codedeploy.CodeDeploy + codecommitconn *codecommit.CodeCommit + codepipelineconn *codepipeline.CodePipeline + sfnconn *sfn.SFN + ssmconn *ssm.SSM + wafconn *waf.WAF + wafregionalconn *wafregional.WAFRegional +} + +func (c *AWSClient) S3() *s3.S3 { + return c.s3conn +} + +func (c *AWSClient) DynamoDB() *dynamodb.DynamoDB { + return c.dynamodbconn +} + +func (c *AWSClient) IsGovCloud() bool { + if c.region == "us-gov-west-1" { + return true + } + return false +} + +func (c *AWSClient) IsChinaCloud() bool { + if c.region == "cn-north-1" { + return true + } + return false +} + +// Client configures and returns a fully initialized AWSClient +func (c *Config) Client() (interface{}, error) { + // Get the auth and region. This can fail if keys/regions were not + // specified and we're attempting to use the environment. + if c.SkipRegionValidation { + log.Println("[INFO] Skipping region validation") + } else { + log.Println("[INFO] Building AWS region structure") + err := c.ValidateRegion() + if err != nil { + return nil, err + } + } + + var client AWSClient + // store AWS region in client struct, for region specific operations such as + // bucket storage in S3 + client.region = c.Region + + log.Println("[INFO] Building AWS auth structure") + creds, err := GetCredentials(c) + if err != nil { + return nil, err + } + // Call Get to check for credential provider. If nothing found, we'll get an + // error, and we can present it nicely to the user + cp, err := creds.Get() + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" { + return nil, errors.New(`No valid credential sources found for AWS Provider. + Please see https://terraform.io/docs/providers/aws/index.html for more information on + providing credentials for the AWS Provider`) + } + + return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err) + } + + log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName) + + awsConfig := &aws.Config{ + Credentials: creds, + Region: aws.String(c.Region), + MaxRetries: aws.Int(c.MaxRetries), + HTTPClient: cleanhttp.DefaultClient(), + S3ForcePathStyle: aws.Bool(c.S3ForcePathStyle), + } + + if logging.IsDebugOrHigher() { + awsConfig.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody) + awsConfig.Logger = awsLogger{} + } + + if c.Insecure { + transport := awsConfig.HTTPClient.Transport.(*http.Transport) + transport.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, + } + } + + // Set up base session + sess, err := session.NewSession(awsConfig) + if err != nil { + return nil, errwrap.Wrapf("Error creating AWS session: {{err}}", err) + } + + sess.Handlers.Build.PushBackNamed(addTerraformVersionToUserAgent) + + if extraDebug := os.Getenv("TERRAFORM_AWS_AUTHFAILURE_DEBUG"); extraDebug != "" { + sess.Handlers.UnmarshalError.PushFrontNamed(debugAuthFailure) + } + + // This restriction should only be used for Route53 sessions. + // Other resources that have restrictions should allow the API to fail, rather + // than Terraform abstracting the region for the user. This can lead to breaking + // changes if that resource is ever opened up to more regions. + r53Sess := sess.Copy(&aws.Config{Region: aws.String("us-east-1")}) + + // Some services have user-configurable endpoints + awsCfSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudFormationEndpoint)}) + awsCwSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudWatchEndpoint)}) + awsCweSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudWatchEventsEndpoint)}) + awsCwlSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.CloudWatchLogsEndpoint)}) + awsDynamoSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.DynamoDBEndpoint)}) + awsEc2Sess := sess.Copy(&aws.Config{Endpoint: aws.String(c.Ec2Endpoint)}) + awsElbSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.ElbEndpoint)}) + awsIamSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.IamEndpoint)}) + awsKinesisSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.KinesisEndpoint)}) + awsKmsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.KmsEndpoint)}) + awsRdsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.RdsEndpoint)}) + awsS3Sess := sess.Copy(&aws.Config{Endpoint: aws.String(c.S3Endpoint)}) + awsSnsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.SnsEndpoint)}) + awsSqsSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.SqsEndpoint)}) + awsDeviceFarmSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.DeviceFarmEndpoint)}) + + log.Println("[INFO] Initializing DeviceFarm SDK connection") + client.devicefarmconn = devicefarm.New(awsDeviceFarmSess) + + // These two services need to be set up early so we can check on AccountID + client.iamconn = iam.New(awsIamSess) + client.stsconn = sts.New(sess) + + if !c.SkipCredsValidation { + err = c.ValidateCredentials(client.stsconn) + if err != nil { + return nil, err + } + } + + if !c.SkipRequestingAccountId { + partition, accountId, err := GetAccountInfo(client.iamconn, client.stsconn, cp.ProviderName) + if err == nil { + client.partition = partition + client.accountid = accountId + } + } + + authErr := c.ValidateAccountId(client.accountid) + if authErr != nil { + return nil, authErr + } + + client.ec2conn = ec2.New(awsEc2Sess) + + if !c.SkipGetEC2Platforms { + supportedPlatforms, err := GetSupportedEC2Platforms(client.ec2conn) + if err != nil { + // We intentionally fail *silently* because there's a chance + // user just doesn't have ec2:DescribeAccountAttributes permissions + log.Printf("[WARN] Unable to get supported EC2 platforms: %s", err) + } else { + client.supportedplatforms = supportedPlatforms + } + } + + client.acmconn = acm.New(sess) + client.apigateway = apigateway.New(sess) + client.appautoscalingconn = applicationautoscaling.New(sess) + client.autoscalingconn = autoscaling.New(sess) + client.cfconn = cloudformation.New(awsCfSess) + client.cloudfrontconn = cloudfront.New(sess) + client.cloudtrailconn = cloudtrail.New(sess) + client.cloudwatchconn = cloudwatch.New(awsCwSess) + client.cloudwatcheventsconn = cloudwatchevents.New(awsCweSess) + client.cloudwatchlogsconn = cloudwatchlogs.New(awsCwlSess) + client.codecommitconn = codecommit.New(sess) + client.codebuildconn = codebuild.New(sess) + client.codedeployconn = codedeploy.New(sess) + client.configconn = configservice.New(sess) + client.cognitoconn = cognitoidentity.New(sess) + client.dmsconn = databasemigrationservice.New(sess) + client.codepipelineconn = codepipeline.New(sess) + client.dsconn = directoryservice.New(sess) + client.dynamodbconn = dynamodb.New(awsDynamoSess) + client.ecrconn = ecr.New(sess) + client.ecsconn = ecs.New(sess) + client.efsconn = efs.New(sess) + client.elasticacheconn = elasticache.New(sess) + client.elasticbeanstalkconn = elasticbeanstalk.New(sess) + client.elastictranscoderconn = elastictranscoder.New(sess) + client.elbconn = elb.New(awsElbSess) + client.elbv2conn = elbv2.New(awsElbSess) + client.emrconn = emr.New(sess) + client.esconn = elasticsearch.New(sess) + client.firehoseconn = firehose.New(sess) + client.inspectorconn = inspector.New(sess) + client.glacierconn = glacier.New(sess) + client.kinesisconn = kinesis.New(awsKinesisSess) + client.kmsconn = kms.New(awsKmsSess) + client.lambdaconn = lambda.New(sess) + client.lightsailconn = lightsail.New(sess) + client.opsworksconn = opsworks.New(sess) + client.r53conn = route53.New(r53Sess) + client.rdsconn = rds.New(awsRdsSess) + client.redshiftconn = redshift.New(sess) + client.simpledbconn = simpledb.New(sess) + client.s3conn = s3.New(awsS3Sess) + client.sesConn = ses.New(sess) + client.sfnconn = sfn.New(sess) + client.snsconn = sns.New(awsSnsSess) + client.sqsconn = sqs.New(awsSqsSess) + client.ssmconn = ssm.New(sess) + client.wafconn = waf.New(sess) + client.wafregionalconn = wafregional.New(sess) + + return &client, nil +} + +// ValidateRegion returns an error if the configured region is not a +// valid aws region and nil otherwise. +func (c *Config) ValidateRegion() error { + var regions = []string{ + "ap-northeast-1", + "ap-northeast-2", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "cn-north-1", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-west-1", + "us-west-1", + "us-west-2", + } + + for _, valid := range regions { + if c.Region == valid { + return nil + } + } + return fmt.Errorf("Not a valid region: %s", c.Region) +} + +// Validate credentials early and fail before we do any graph walking. +func (c *Config) ValidateCredentials(stsconn *sts.STS) error { + _, err := stsconn.GetCallerIdentity(&sts.GetCallerIdentityInput{}) + return err +} + +// ValidateAccountId returns a context-specific error if the configured account +// id is explicitly forbidden or not authorised; and nil if it is authorised. +func (c *Config) ValidateAccountId(accountId string) error { + if c.AllowedAccountIds == nil && c.ForbiddenAccountIds == nil { + return nil + } + + log.Println("[INFO] Validating account ID") + + if c.ForbiddenAccountIds != nil { + for _, id := range c.ForbiddenAccountIds { + if id == accountId { + return fmt.Errorf("Forbidden account ID (%s)", id) + } + } + } + + if c.AllowedAccountIds != nil { + for _, id := range c.AllowedAccountIds { + if id == accountId { + return nil + } + } + return fmt.Errorf("Account ID not allowed (%s)", accountId) + } + + return nil +} + +func GetSupportedEC2Platforms(conn *ec2.EC2) ([]string, error) { + attrName := "supported-platforms" + + input := ec2.DescribeAccountAttributesInput{ + AttributeNames: []*string{aws.String(attrName)}, + } + attributes, err := conn.DescribeAccountAttributes(&input) + if err != nil { + return nil, err + } + + var platforms []string + for _, attr := range attributes.AccountAttributes { + if *attr.AttributeName == attrName { + for _, v := range attr.AttributeValues { + platforms = append(platforms, *v.AttributeValue) + } + break + } + } + + if len(platforms) == 0 { + return nil, fmt.Errorf("No EC2 platforms detected") + } + + return platforms, nil +} + +// addTerraformVersionToUserAgent is a named handler that will add Terraform's +// version information to requests made by the AWS SDK. +var addTerraformVersionToUserAgent = request.NamedHandler{ + Name: "terraform.TerraformVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler( + "APN/1.0 HashiCorp/1.0 Terraform", terraform.VersionString()), +} + +var debugAuthFailure = request.NamedHandler{ + Name: "terraform.AuthFailureAdditionalDebugHandler", + Fn: func(req *request.Request) { + if isAWSErr(req.Error, "AuthFailure", "AWS was not able to validate the provided access credentials") { + log.Printf("[INFO] Additional AuthFailure Debugging Context") + log.Printf("[INFO] Current system UTC time: %s", time.Now().UTC()) + log.Printf("[INFO] Request object: %s", spew.Sdump(req)) + } + }, +} + +type awsLogger struct{} + +func (l awsLogger) Log(args ...interface{}) { + tokens := make([]string, 0, len(args)) + for _, arg := range args { + if token, ok := arg.(string); ok { + tokens = append(tokens, token) + } + } + log.Printf("[DEBUG] [aws-sdk-go] %s", strings.Join(tokens, " ")) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_acm_certificate.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_acm_certificate.go new file mode 100644 index 000000000..5b69ed93d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_acm_certificate.go @@ -0,0 +1,103 @@ +package aws + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/acm" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsAcmCertificate() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsAcmCertificateRead, + Schema: map[string]*schema.Schema{ + "domain": { + Type: schema.TypeString, + Required: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "statuses": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "types": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceAwsAcmCertificateRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).acmconn + params := &acm.ListCertificatesInput{} + + target := d.Get("domain") + + statuses, ok := d.GetOk("statuses") + if ok { + statusStrings := statuses.([]interface{}) + params.CertificateStatuses = expandStringList(statusStrings) + } else { + params.CertificateStatuses = []*string{aws.String("ISSUED")} + } + + var arns []string + err := conn.ListCertificatesPages(params, func(page *acm.ListCertificatesOutput, lastPage bool) bool { + for _, cert := range page.CertificateSummaryList { + if *cert.DomainName == target { + arns = append(arns, *cert.CertificateArn) + } + } + + return true + }) + if err != nil { + return errwrap.Wrapf("Error describing certificates: {{err}}", err) + } + + // filter based on certificate type (imported or aws-issued) + types, ok := d.GetOk("types") + if ok { + typesStrings := expandStringList(types.([]interface{})) + var matchedArns []string + for _, arn := range arns { + params := &acm.DescribeCertificateInput{} + params.CertificateArn = &arn + + description, err := conn.DescribeCertificate(params) + if err != nil { + return errwrap.Wrapf("Error describing certificates: {{err}}", err) + } + + for _, certType := range typesStrings { + if *description.Certificate.Type == *certType { + matchedArns = append(matchedArns, arn) + break + } + } + } + + arns = matchedArns + } + + if len(arns) == 0 { + return fmt.Errorf("No certificate for domain %q found in this region.", target) + } + if len(arns) > 1 { + return fmt.Errorf("Multiple certificates for domain %q found in this region.", target) + } + + d.SetId(time.Now().UTC().String()) + d.Set("arn", arns[0]) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_alb.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_alb.go new file mode 100644 index 000000000..d314e0ed7 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_alb.go @@ -0,0 +1,127 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsAlb() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsAlbRead, + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "arn_suffix": { + Type: schema.TypeString, + Computed: true, + }, + + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "internal": { + Type: schema.TypeBool, + Computed: true, + }, + + "security_groups": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + Set: schema.HashString, + }, + + "subnets": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + Set: schema.HashString, + }, + + "access_logs": { + Type: schema.TypeList, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Computed: true, + }, + "prefix": { + Type: schema.TypeString, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + + "enable_deletion_protection": { + Type: schema.TypeBool, + Computed: true, + }, + + "idle_timeout": { + Type: schema.TypeInt, + Computed: true, + }, + + "vpc_id": { + Type: schema.TypeString, + Computed: true, + }, + + "zone_id": { + Type: schema.TypeString, + Computed: true, + }, + + "dns_name": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": tagsSchemaComputed(), + }, + } +} + +func dataSourceAwsAlbRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbv2conn + albArn := d.Get("arn").(string) + albName := d.Get("name").(string) + + describeAlbOpts := &elbv2.DescribeLoadBalancersInput{} + switch { + case albArn != "": + describeAlbOpts.LoadBalancerArns = []*string{aws.String(albArn)} + case albName != "": + describeAlbOpts.Names = []*string{aws.String(albName)} + } + + describeResp, err := elbconn.DescribeLoadBalancers(describeAlbOpts) + if err != nil { + return errwrap.Wrapf("Error retrieving ALB: {{err}}", err) + } + if len(describeResp.LoadBalancers) != 1 { + return fmt.Errorf("Search returned %d results, please revise so only one is returned", len(describeResp.LoadBalancers)) + } + d.SetId(*describeResp.LoadBalancers[0].LoadBalancerArn) + + return flattenAwsAlbResource(d, meta, describeResp.LoadBalancers[0]) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_alb_listener.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_alb_listener.go new file mode 100644 index 000000000..63ec4ed1a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_alb_listener.go @@ -0,0 +1,62 @@ +package aws + +import "github.com/hashicorp/terraform/helper/schema" + +func dataSourceAwsAlbListener() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsAlbListenerRead, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Required: true, + }, + + "load_balancer_arn": { + Type: schema.TypeString, + Computed: true, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + }, + + "protocol": { + Type: schema.TypeString, + Computed: true, + }, + + "ssl_policy": { + Type: schema.TypeString, + Computed: true, + }, + + "certificate_arn": { + Type: schema.TypeString, + Computed: true, + }, + + "default_action": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target_group_arn": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceAwsAlbListenerRead(d *schema.ResourceData, meta interface{}) error { + d.SetId(d.Get("arn").(string)) + return resourceAwsAlbListenerRead(d, meta) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ami.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ami.go new file mode 100644 index 000000000..3439adaef --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ami.go @@ -0,0 +1,423 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "regexp" + + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsAmi() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsAmiRead, + + Schema: map[string]*schema.Schema{ + "filter": dataSourceFiltersSchema(), + "executable_users": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name_regex": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateNameRegex, + }, + "most_recent": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + "owners": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + // Computed values. + "architecture": { + Type: schema.TypeString, + Computed: true, + }, + "creation_date": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "hypervisor": { + Type: schema.TypeString, + Computed: true, + }, + "image_id": { + Type: schema.TypeString, + Computed: true, + }, + "image_location": { + Type: schema.TypeString, + Computed: true, + }, + "image_owner_alias": { + Type: schema.TypeString, + Computed: true, + }, + "image_type": { + Type: schema.TypeString, + Computed: true, + }, + "kernel_id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "platform": { + Type: schema.TypeString, + Computed: true, + }, + "public": { + Type: schema.TypeBool, + Computed: true, + }, + "ramdisk_id": { + Type: schema.TypeString, + Computed: true, + }, + "root_device_name": { + Type: schema.TypeString, + Computed: true, + }, + "root_device_type": { + Type: schema.TypeString, + Computed: true, + }, + "sriov_net_support": { + Type: schema.TypeString, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "virtualization_type": { + Type: schema.TypeString, + Computed: true, + }, + // Complex computed values + "block_device_mappings": { + Type: schema.TypeSet, + Computed: true, + Set: amiBlockDeviceMappingHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "device_name": { + Type: schema.TypeString, + Computed: true, + }, + "no_device": { + Type: schema.TypeString, + Computed: true, + }, + "virtual_name": { + Type: schema.TypeString, + Computed: true, + }, + "ebs": { + Type: schema.TypeMap, + Computed: true, + }, + }, + }, + }, + "product_codes": { + Type: schema.TypeSet, + Computed: true, + Set: amiProductCodesHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "product_code_id": { + Type: schema.TypeString, + Computed: true, + }, + "product_code_type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "state_reason": { + Type: schema.TypeMap, + Computed: true, + }, + "tags": dataSourceTagsSchema(), + }, + } +} + +// dataSourceAwsAmiDescriptionRead performs the AMI lookup. +func dataSourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + executableUsers, executableUsersOk := d.GetOk("executable_users") + filters, filtersOk := d.GetOk("filter") + nameRegex, nameRegexOk := d.GetOk("name_regex") + owners, ownersOk := d.GetOk("owners") + + if !executableUsersOk && !filtersOk && !nameRegexOk && !ownersOk { + return fmt.Errorf("One of executable_users, filters, name_regex, or owners must be assigned") + } + + params := &ec2.DescribeImagesInput{} + if executableUsersOk { + params.ExecutableUsers = expandStringList(executableUsers.([]interface{})) + } + if filtersOk { + params.Filters = buildAwsDataSourceFilters(filters.(*schema.Set)) + } + if ownersOk { + o := expandStringList(owners.([]interface{})) + + if len(o) > 0 { + params.Owners = o + } + } + + resp, err := conn.DescribeImages(params) + if err != nil { + return err + } + + var filteredImages []*ec2.Image + if nameRegexOk { + r := regexp.MustCompile(nameRegex.(string)) + for _, image := range resp.Images { + // Check for a very rare case where the response would include no + // image name. No name means nothing to attempt a match against, + // therefore we are skipping such image. + if image.Name == nil || *image.Name == "" { + log.Printf("[WARN] Unable to find AMI name to match against "+ + "for image ID %q owned by %q, nothing to do.", + *image.ImageId, *image.OwnerId) + continue + } + if r.MatchString(*image.Name) { + filteredImages = append(filteredImages, image) + } + } + } else { + filteredImages = resp.Images[:] + } + + var image *ec2.Image + if len(filteredImages) < 1 { + return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") + } + + if len(filteredImages) > 1 { + recent := d.Get("most_recent").(bool) + log.Printf("[DEBUG] aws_ami - multiple results found and `most_recent` is set to: %t", recent) + if recent { + image = mostRecentAmi(filteredImages) + } else { + return fmt.Errorf("Your query returned more than one result. Please try a more " + + "specific search criteria, or set `most_recent` attribute to true.") + } + } else { + // Query returned single result. + image = filteredImages[0] + } + + log.Printf("[DEBUG] aws_ami - Single AMI found: %s", *image.ImageId) + return amiDescriptionAttributes(d, image) +} + +// Returns the most recent AMI out of a slice of images. +func mostRecentAmi(images []*ec2.Image) *ec2.Image { + return sortImages(images)[0] +} + +// populate the numerous fields that the image description returns. +func amiDescriptionAttributes(d *schema.ResourceData, image *ec2.Image) error { + // Simple attributes first + d.SetId(*image.ImageId) + d.Set("architecture", image.Architecture) + d.Set("creation_date", image.CreationDate) + if image.Description != nil { + d.Set("description", image.Description) + } + d.Set("hypervisor", image.Hypervisor) + d.Set("image_id", image.ImageId) + d.Set("image_location", image.ImageLocation) + if image.ImageOwnerAlias != nil { + d.Set("image_owner_alias", image.ImageOwnerAlias) + } + d.Set("image_type", image.ImageType) + if image.KernelId != nil { + d.Set("kernel_id", image.KernelId) + } + d.Set("name", image.Name) + d.Set("owner_id", image.OwnerId) + if image.Platform != nil { + d.Set("platform", image.Platform) + } + d.Set("public", image.Public) + if image.RamdiskId != nil { + d.Set("ramdisk_id", image.RamdiskId) + } + if image.RootDeviceName != nil { + d.Set("root_device_name", image.RootDeviceName) + } + d.Set("root_device_type", image.RootDeviceType) + if image.SriovNetSupport != nil { + d.Set("sriov_net_support", image.SriovNetSupport) + } + d.Set("state", image.State) + d.Set("virtualization_type", image.VirtualizationType) + // Complex types get their own functions + if err := d.Set("block_device_mappings", amiBlockDeviceMappings(image.BlockDeviceMappings)); err != nil { + return err + } + if err := d.Set("product_codes", amiProductCodes(image.ProductCodes)); err != nil { + return err + } + if err := d.Set("state_reason", amiStateReason(image.StateReason)); err != nil { + return err + } + if err := d.Set("tags", dataSourceTags(image.Tags)); err != nil { + return err + } + return nil +} + +// Returns a set of block device mappings. +func amiBlockDeviceMappings(m []*ec2.BlockDeviceMapping) *schema.Set { + s := &schema.Set{ + F: amiBlockDeviceMappingHash, + } + for _, v := range m { + mapping := map[string]interface{}{ + "device_name": *v.DeviceName, + } + if v.Ebs != nil { + ebs := map[string]interface{}{ + "delete_on_termination": fmt.Sprintf("%t", *v.Ebs.DeleteOnTermination), + "encrypted": fmt.Sprintf("%t", *v.Ebs.Encrypted), + "volume_size": fmt.Sprintf("%d", *v.Ebs.VolumeSize), + "volume_type": *v.Ebs.VolumeType, + } + // Iops is not always set + if v.Ebs.Iops != nil { + ebs["iops"] = fmt.Sprintf("%d", *v.Ebs.Iops) + } else { + ebs["iops"] = "0" + } + // snapshot id may not be set + if v.Ebs.SnapshotId != nil { + ebs["snapshot_id"] = *v.Ebs.SnapshotId + } + + mapping["ebs"] = ebs + } + if v.VirtualName != nil { + mapping["virtual_name"] = *v.VirtualName + } + log.Printf("[DEBUG] aws_ami - adding block device mapping: %v", mapping) + s.Add(mapping) + } + return s +} + +// Returns a set of product codes. +func amiProductCodes(m []*ec2.ProductCode) *schema.Set { + s := &schema.Set{ + F: amiProductCodesHash, + } + for _, v := range m { + code := map[string]interface{}{ + "product_code_id": *v.ProductCodeId, + "product_code_type": *v.ProductCodeType, + } + s.Add(code) + } + return s +} + +// Returns the state reason. +func amiStateReason(m *ec2.StateReason) map[string]interface{} { + s := make(map[string]interface{}) + if m != nil { + s["code"] = *m.Code + s["message"] = *m.Message + } else { + s["code"] = "UNSET" + s["message"] = "UNSET" + } + return s +} + +// Generates a hash for the set hash function used by the block_device_mappings +// attribute. +func amiBlockDeviceMappingHash(v interface{}) int { + var buf bytes.Buffer + // All keys added in alphabetical order. + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) + if d, ok := m["ebs"]; ok { + if len(d.(map[string]interface{})) > 0 { + e := d.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", e["delete_on_termination"].(string))) + buf.WriteString(fmt.Sprintf("%s-", e["encrypted"].(string))) + buf.WriteString(fmt.Sprintf("%s-", e["iops"].(string))) + buf.WriteString(fmt.Sprintf("%s-", e["volume_size"].(string))) + buf.WriteString(fmt.Sprintf("%s-", e["volume_type"].(string))) + } + } + if d, ok := m["no_device"]; ok { + buf.WriteString(fmt.Sprintf("%s-", d.(string))) + } + if d, ok := m["virtual_name"]; ok { + buf.WriteString(fmt.Sprintf("%s-", d.(string))) + } + if d, ok := m["snapshot_id"]; ok { + buf.WriteString(fmt.Sprintf("%s-", d.(string))) + } + return hashcode.String(buf.String()) +} + +// Generates a hash for the set hash function used by the product_codes +// attribute. +func amiProductCodesHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + // All keys added in alphabetical order. + buf.WriteString(fmt.Sprintf("%s-", m["product_code_id"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["product_code_type"].(string))) + return hashcode.String(buf.String()) +} + +func validateNameRegex(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if _, err := regexp.Compile(value); err != nil { + errors = append(errors, fmt.Errorf( + "%q contains an invalid regular expression: %s", + k, err)) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ami_ids.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ami_ids.go new file mode 100644 index 000000000..20df34ac3 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ami_ids.go @@ -0,0 +1,111 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsAmiIds() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsAmiIdsRead, + + Schema: map[string]*schema.Schema{ + "filter": dataSourceFiltersSchema(), + "executable_users": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name_regex": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateNameRegex, + }, + "owners": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "tags": dataSourceTagsSchema(), + "ids": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceAwsAmiIdsRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + executableUsers, executableUsersOk := d.GetOk("executable_users") + filters, filtersOk := d.GetOk("filter") + nameRegex, nameRegexOk := d.GetOk("name_regex") + owners, ownersOk := d.GetOk("owners") + + if executableUsersOk == false && filtersOk == false && nameRegexOk == false && ownersOk == false { + return fmt.Errorf("One of executable_users, filters, name_regex, or owners must be assigned") + } + + params := &ec2.DescribeImagesInput{} + + if executableUsersOk { + params.ExecutableUsers = expandStringList(executableUsers.([]interface{})) + } + if filtersOk { + params.Filters = buildAwsDataSourceFilters(filters.(*schema.Set)) + } + if ownersOk { + o := expandStringList(owners.([]interface{})) + + if len(o) > 0 { + params.Owners = o + } + } + + resp, err := conn.DescribeImages(params) + if err != nil { + return err + } + + var filteredImages []*ec2.Image + imageIds := make([]string, 0) + + if nameRegexOk { + r := regexp.MustCompile(nameRegex.(string)) + for _, image := range resp.Images { + // Check for a very rare case where the response would include no + // image name. No name means nothing to attempt a match against, + // therefore we are skipping such image. + if image.Name == nil || *image.Name == "" { + log.Printf("[WARN] Unable to find AMI name to match against "+ + "for image ID %q owned by %q, nothing to do.", + *image.ImageId, *image.OwnerId) + continue + } + if r.MatchString(*image.Name) { + filteredImages = append(filteredImages, image) + } + } + } else { + filteredImages = resp.Images[:] + } + + for _, image := range sortImages(filteredImages) { + imageIds = append(imageIds, *image.ImageId) + } + + d.SetId(fmt.Sprintf("%d", hashcode.String(params.String()))) + d.Set("ids", imageIds) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_autoscaling_groups.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_autoscaling_groups.go new file mode 100644 index 000000000..f43f21d4e --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_autoscaling_groups.go @@ -0,0 +1,102 @@ +package aws + +import ( + "fmt" + "log" + "sort" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsAutoscalingGroups() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsAutoscalingGroupsRead, + + Schema: map[string]*schema.Schema{ + "names": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "filter": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "values": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + }, + }, + }, + } +} + +func dataSourceAwsAutoscalingGroupsRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).autoscalingconn + + log.Printf("[DEBUG] Reading Autoscaling Groups.") + d.SetId(time.Now().UTC().String()) + + var raw []string + + tf := d.Get("filter").(*schema.Set) + if tf.Len() > 0 { + out, err := conn.DescribeTags(&autoscaling.DescribeTagsInput{ + Filters: expandAsgTagFilters(tf.List()), + }) + if err != nil { + return err + } + + raw = make([]string, len(out.Tags)) + for i, v := range out.Tags { + raw[i] = *v.ResourceId + } + } else { + + resp, err := conn.DescribeAutoScalingGroups(&autoscaling.DescribeAutoScalingGroupsInput{}) + if err != nil { + return fmt.Errorf("Error fetching Autoscaling Groups: %s", err) + } + + raw = make([]string, len(resp.AutoScalingGroups)) + for i, v := range resp.AutoScalingGroups { + raw[i] = *v.AutoScalingGroupName + } + } + + sort.Strings(raw) + + if err := d.Set("names", raw); err != nil { + return fmt.Errorf("[WARN] Error setting Autoscaling Group Names: %s", err) + } + + return nil + +} + +func expandAsgTagFilters(in []interface{}) []*autoscaling.Filter { + out := make([]*autoscaling.Filter, len(in), len(in)) + for i, filter := range in { + m := filter.(map[string]interface{}) + values := expandStringList(m["values"].(*schema.Set).List()) + + out[i] = &autoscaling.Filter{ + Name: aws.String(m["name"].(string)), + Values: values, + } + } + return out +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_availability_zone.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_availability_zone.go new file mode 100644 index 000000000..edab7c926 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_availability_zone.go @@ -0,0 +1,89 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsAvailabilityZone() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsAvailabilityZoneRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "name_suffix": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "state": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func dataSourceAwsAvailabilityZoneRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + req := &ec2.DescribeAvailabilityZonesInput{} + + if name := d.Get("name"); name != "" { + req.ZoneNames = []*string{aws.String(name.(string))} + } + + req.Filters = buildEC2AttributeFilterList( + map[string]string{ + "state": d.Get("state").(string), + }, + ) + if len(req.Filters) == 0 { + // Don't send an empty filters list; the EC2 API won't accept it. + req.Filters = nil + } + + log.Printf("[DEBUG] DescribeAvailabilityZones %s\n", req) + resp, err := conn.DescribeAvailabilityZones(req) + if err != nil { + return err + } + if resp == nil || len(resp.AvailabilityZones) == 0 { + return fmt.Errorf("no matching AZ found") + } + if len(resp.AvailabilityZones) > 1 { + return fmt.Errorf("multiple AZs matched; use additional constraints to reduce matches to a single AZ") + } + + az := resp.AvailabilityZones[0] + + // As a convenience when working with AZs generically, we expose + // the AZ suffix alone, without the region name. + // This can be used e.g. to create lookup tables by AZ letter that + // work regardless of region. + nameSuffix := (*az.ZoneName)[len(*az.RegionName):] + + d.SetId(*az.ZoneName) + d.Set("id", az.ZoneName) + d.Set("name", az.ZoneName) + d.Set("name_suffix", nameSuffix) + d.Set("region", az.RegionName) + d.Set("state", az.State) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_availability_zones.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_availability_zones.go new file mode 100644 index 000000000..dcc09438f --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_availability_zones.go @@ -0,0 +1,87 @@ +package aws + +import ( + "fmt" + "log" + "sort" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsAvailabilityZones() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsAvailabilityZonesRead, + + Schema: map[string]*schema.Schema{ + "names": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "state": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateStateType, + }, + }, + } +} + +func dataSourceAwsAvailabilityZonesRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + log.Printf("[DEBUG] Reading Availability Zones.") + d.SetId(time.Now().UTC().String()) + + request := &ec2.DescribeAvailabilityZonesInput{} + + if v, ok := d.GetOk("state"); ok { + request.Filters = []*ec2.Filter{ + { + Name: aws.String("state"), + Values: []*string{aws.String(v.(string))}, + }, + } + } + + log.Printf("[DEBUG] Availability Zones request options: %#v", *request) + + resp, err := conn.DescribeAvailabilityZones(request) + if err != nil { + return fmt.Errorf("Error fetching Availability Zones: %s", err) + } + + raw := make([]string, len(resp.AvailabilityZones)) + for i, v := range resp.AvailabilityZones { + raw[i] = *v.ZoneName + } + + sort.Strings(raw) + + if err := d.Set("names", raw); err != nil { + return fmt.Errorf("[WARN] Error setting Availability Zones: %s", err) + } + + return nil +} + +func validateStateType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + validState := map[string]bool{ + "available": true, + "information": true, + "impaired": true, + "unavailable": true, + } + + if !validState[value] { + errors = append(errors, fmt.Errorf( + "%q contains an invalid Availability Zone state %q. Valid states are: %q, %q, %q and %q.", + k, value, "available", "information", "impaired", "unavailable")) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_billing_service_account.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_billing_service_account.go new file mode 100644 index 000000000..23ec40843 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_billing_service_account.go @@ -0,0 +1,31 @@ +package aws + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +// See http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/billing-getting-started.html#step-2 +var billingAccountId = "386209384616" + +func dataSourceAwsBillingServiceAccount() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsBillingServiceAccountRead, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsBillingServiceAccountRead(d *schema.ResourceData, meta interface{}) error { + d.SetId(billingAccountId) + + d.Set("arn", fmt.Sprintf("arn:%s:iam::%s:root", meta.(*AWSClient).partition, billingAccountId)) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_caller_identity.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_caller_identity.go new file mode 100644 index 000000000..a2adcef34 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_caller_identity.go @@ -0,0 +1,50 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/service/sts" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsCallerIdentity() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsCallerIdentityRead, + + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeString, + Computed: true, + }, + + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "user_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsCallerIdentityRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).stsconn + + res, err := client.GetCallerIdentity(&sts.GetCallerIdentityInput{}) + if err != nil { + return fmt.Errorf("Error getting Caller Identity: %v", err) + } + + log.Printf("[DEBUG] Received Caller Identity: %s", res) + + d.SetId(time.Now().UTC().String()) + d.Set("account_id", res.Account) + d.Set("arn", res.Arn) + d.Set("user_id", res.UserId) + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_canonical_user_id.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_canonical_user_id.go new file mode 100644 index 000000000..ba6a0b098 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_canonical_user_id.go @@ -0,0 +1,48 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsCanonicalUserId() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsCanonicalUserIdRead, + + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "display_name": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsCanonicalUserIdRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).s3conn + + log.Printf("[DEBUG] Listing S3 buckets.") + + req := &s3.ListBucketsInput{} + resp, err := conn.ListBuckets(req) + if err != nil { + return err + } + if resp == nil || resp.Owner == nil { + return fmt.Errorf("no canonical user ID found") + } + + d.SetId(aws.StringValue(resp.Owner.ID)) + d.Set("id", resp.Owner.ID) + d.Set("display_name", resp.Owner.DisplayName) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudformation_stack.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudformation_stack.go new file mode 100644 index 000000000..b834e0a29 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_cloudformation_stack.go @@ -0,0 +1,122 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudformation" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsCloudFormationStack() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsCloudFormationStackRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "template_body": { + Type: schema.TypeString, + Computed: true, + StateFunc: func(v interface{}) string { + template, _ := normalizeCloudFormationTemplate(v) + return template + }, + }, + "capabilities": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "disable_rollback": { + Type: schema.TypeBool, + Computed: true, + }, + "notification_arns": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "parameters": { + Type: schema.TypeMap, + Computed: true, + }, + "outputs": { + Type: schema.TypeMap, + Computed: true, + }, + "timeout_in_minutes": { + Type: schema.TypeInt, + Computed: true, + }, + "iam_role_arn": { + Type: schema.TypeString, + Computed: true, + }, + "tags": { + Type: schema.TypeMap, + Computed: true, + }, + }, + } +} + +func dataSourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cfconn + name := d.Get("name").(string) + input := cloudformation.DescribeStacksInput{ + StackName: aws.String(name), + } + + out, err := conn.DescribeStacks(&input) + if err != nil { + return fmt.Errorf("Failed describing CloudFormation stack (%s): %s", name, err) + } + if l := len(out.Stacks); l != 1 { + return fmt.Errorf("Expected 1 CloudFormation stack (%s), found %d", name, l) + } + stack := out.Stacks[0] + d.SetId(*stack.StackId) + + d.Set("description", stack.Description) + d.Set("disable_rollback", stack.DisableRollback) + d.Set("timeout_in_minutes", stack.TimeoutInMinutes) + d.Set("iam_role_arn", stack.RoleARN) + + if len(stack.NotificationARNs) > 0 { + d.Set("notification_arns", schema.NewSet(schema.HashString, flattenStringList(stack.NotificationARNs))) + } + + d.Set("parameters", flattenAllCloudFormationParameters(stack.Parameters)) + d.Set("tags", flattenCloudFormationTags(stack.Tags)) + d.Set("outputs", flattenCloudFormationOutputs(stack.Outputs)) + + if len(stack.Capabilities) > 0 { + d.Set("capabilities", schema.NewSet(schema.HashString, flattenStringList(stack.Capabilities))) + } + + tInput := cloudformation.GetTemplateInput{ + StackName: aws.String(name), + } + tOut, err := conn.GetTemplate(&tInput) + if err != nil { + return err + } + + template, err := normalizeCloudFormationTemplate(*tOut.TemplateBody) + if err != nil { + return errwrap.Wrapf("template body contains an invalid JSON or YAML: {{err}}", err) + } + d.Set("template_body", template) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_common_schema.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_common_schema.go new file mode 100644 index 000000000..839f8a67b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_common_schema.go @@ -0,0 +1,91 @@ +package aws + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceTagsHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["key"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["value"].(string))) + return hashcode.String(buf.String()) +} + +func dataSourceTags(m []*ec2.Tag) *schema.Set { + s := &schema.Set{ + F: dataSourceTagsHash, + } + for _, v := range m { + tag := map[string]interface{}{ + "key": *v.Key, + "value": *v.Value, + } + s.Add(tag) + } + return s +} + +func buildAwsDataSourceFilters(set *schema.Set) []*ec2.Filter { + var filters []*ec2.Filter + for _, v := range set.List() { + m := v.(map[string]interface{}) + var filterValues []*string + for _, e := range m["values"].([]interface{}) { + filterValues = append(filterValues, aws.String(e.(string))) + } + filters = append(filters, &ec2.Filter{ + Name: aws.String(m["name"].(string)), + Values: filterValues, + }) + } + return filters +} + +func dataSourceFiltersSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "values": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + } +} + +func dataSourceTagsSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeSet, + Computed: true, + Set: dataSourceTagsHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_instance.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_instance.go new file mode 100644 index 000000000..753319a84 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_instance.go @@ -0,0 +1,290 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsDbInstance() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsDbInstanceRead, + + Schema: map[string]*schema.Schema{ + "db_instance_identifier": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "address": { + Type: schema.TypeString, + Computed: true, + }, + + "allocated_storage": { + Type: schema.TypeInt, + Computed: true, + }, + + "auto_minor_version_upgrade": { + Type: schema.TypeBool, + Computed: true, + }, + + "availability_zone": { + Type: schema.TypeString, + Computed: true, + }, + + "backup_retention_period": { + Type: schema.TypeInt, + Computed: true, + }, + + "db_cluster_identifier": { + Type: schema.TypeString, + Computed: true, + }, + + "db_instance_arn": { + Type: schema.TypeString, + Computed: true, + }, + + "db_instance_class": { + Type: schema.TypeString, + Computed: true, + }, + + "db_name": { + Type: schema.TypeString, + Computed: true, + }, + + "db_parameter_groups": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "db_security_groups": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "db_subnet_group": { + Type: schema.TypeString, + Computed: true, + }, + + "db_instance_port": { + Type: schema.TypeInt, + Computed: true, + }, + + "endpoint": { + Type: schema.TypeString, + Computed: true, + }, + + "engine": { + Type: schema.TypeString, + Computed: true, + }, + + "engine_version": { + Type: schema.TypeString, + Computed: true, + }, + + "hosted_zone_id": { + Type: schema.TypeString, + Computed: true, + }, + + "iops": { + Type: schema.TypeInt, + Computed: true, + }, + + "kms_key_id": { + Type: schema.TypeString, + Computed: true, + }, + + "license_model": { + Type: schema.TypeString, + Computed: true, + }, + + "master_username": { + Type: schema.TypeString, + Computed: true, + }, + + "monitoring_interval": { + Type: schema.TypeInt, + Computed: true, + }, + + "monitoring_role_arn": { + Type: schema.TypeString, + Computed: true, + }, + + "multi_az": { + Type: schema.TypeBool, + Computed: true, + }, + + "option_group_memberships": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "port": { + Type: schema.TypeInt, + Computed: true, + }, + + "preferred_backup_window": { + Type: schema.TypeString, + Computed: true, + }, + + "preferred_maintenance_window": { + Type: schema.TypeString, + Computed: true, + }, + + "publicly_accessible": { + Type: schema.TypeBool, + Computed: true, + }, + + "storage_encrypted": { + Type: schema.TypeBool, + Computed: true, + }, + + "storage_type": { + Type: schema.TypeString, + Computed: true, + }, + + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + + "vpc_security_groups": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "replicate_source_db": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + + opts := rds.DescribeDBInstancesInput{ + DBInstanceIdentifier: aws.String(d.Get("db_instance_identifier").(string)), + } + + log.Printf("[DEBUG] DB Instance describe configuration: %#v", opts) + + resp, err := conn.DescribeDBInstances(&opts) + if err != nil { + return err + } + + if len(resp.DBInstances) < 1 { + return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") + } + if len(resp.DBInstances) > 1 { + return fmt.Errorf("Your query returned more than one result. Please try a more specific search criteria.") + } + + dbInstance := *resp.DBInstances[0] + + d.SetId(d.Get("db_instance_identifier").(string)) + + d.Set("allocated_storage", dbInstance.AllocatedStorage) + d.Set("auto_minor_upgrade_enabled", dbInstance.AutoMinorVersionUpgrade) + d.Set("availability_zone", dbInstance.AvailabilityZone) + d.Set("backup_retention_period", dbInstance.BackupRetentionPeriod) + d.Set("db_cluster_identifier", dbInstance.DBClusterIdentifier) + d.Set("db_instance_arn", dbInstance.DBClusterIdentifier) + d.Set("db_instance_class", dbInstance.DBInstanceClass) + d.Set("db_name", dbInstance.DBName) + + var parameterGroups []string + for _, v := range dbInstance.DBParameterGroups { + parameterGroups = append(parameterGroups, *v.DBParameterGroupName) + } + if err := d.Set("db_parameter_groups", parameterGroups); err != nil { + return fmt.Errorf("[DEBUG] Error setting db_parameter_groups attribute: %#v, error: %#v", parameterGroups, err) + } + + var dbSecurityGroups []string + for _, v := range dbInstance.DBSecurityGroups { + dbSecurityGroups = append(dbSecurityGroups, *v.DBSecurityGroupName) + } + if err := d.Set("db_security_groups", dbSecurityGroups); err != nil { + return fmt.Errorf("[DEBUG] Error setting db_security_groups attribute: %#v, error: %#v", dbSecurityGroups, err) + } + + d.Set("db_subnet_group", dbInstance.DBSubnetGroup) + d.Set("db_instance_port", dbInstance.DbInstancePort) + d.Set("engine", dbInstance.Engine) + d.Set("engine_version", dbInstance.EngineVersion) + d.Set("iops", dbInstance.Iops) + d.Set("kms_key_id", dbInstance.KmsKeyId) + d.Set("license_model", dbInstance.LicenseModel) + d.Set("master_username", dbInstance.MasterUsername) + d.Set("monitoring_interval", dbInstance.MonitoringInterval) + d.Set("monitoring_role_arn", dbInstance.MonitoringRoleArn) + d.Set("address", dbInstance.Endpoint.Address) + d.Set("port", dbInstance.Endpoint.Port) + d.Set("hosted_zone_id", dbInstance.Endpoint.HostedZoneId) + d.Set("endpoint", fmt.Sprintf("%s:%d", *dbInstance.Endpoint.Address, *dbInstance.Endpoint.Port)) + + var optionGroups []string + for _, v := range dbInstance.OptionGroupMemberships { + optionGroups = append(optionGroups, *v.OptionGroupName) + } + if err := d.Set("option_group_memberships", optionGroups); err != nil { + return fmt.Errorf("[DEBUG] Error setting option_group_memberships attribute: %#v, error: %#v", optionGroups, err) + } + + d.Set("preferred_backup_window", dbInstance.PreferredBackupWindow) + d.Set("preferred_maintenance_window", dbInstance.PreferredMaintenanceWindow) + d.Set("publicly_accessible", dbInstance.PubliclyAccessible) + d.Set("storage_encrypted", dbInstance.StorageEncrypted) + d.Set("storage_type", dbInstance.StorageType) + d.Set("timezone", dbInstance.Timezone) + d.Set("replicate_source_db", dbInstance.ReadReplicaSourceDBInstanceIdentifier) + + var vpcSecurityGroups []string + for _, v := range dbInstance.VpcSecurityGroups { + vpcSecurityGroups = append(vpcSecurityGroups, *v.VpcSecurityGroupId) + } + if err := d.Set("vpc_security_groups", vpcSecurityGroups); err != nil { + return fmt.Errorf("[DEBUG] Error setting vpc_security_groups attribute: %#v, error: %#v", vpcSecurityGroups, err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_snapshot.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_snapshot.go new file mode 100644 index 000000000..1f381e405 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_db_snapshot.go @@ -0,0 +1,217 @@ +package aws + +import ( + "fmt" + "log" + "sort" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsDbSnapshot() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsDbSnapshotRead, + + Schema: map[string]*schema.Schema{ + //selection criteria + "db_instance_identifier": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "db_snapshot_identifier": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "snapshot_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "include_shared": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + + "include_public": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + "most_recent": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + //Computed values returned + "allocated_storage": { + Type: schema.TypeInt, + Computed: true, + }, + "availability_zone": { + Type: schema.TypeString, + Computed: true, + }, + "db_snapshot_arn": { + Type: schema.TypeString, + Computed: true, + }, + "encrypted": { + Type: schema.TypeBool, + Computed: true, + }, + "engine": { + Type: schema.TypeString, + Computed: true, + }, + "engine_version": { + Type: schema.TypeString, + Computed: true, + }, + "iops": { + Type: schema.TypeInt, + Computed: true, + }, + "kms_key_id": { + Type: schema.TypeString, + Computed: true, + }, + "license_model": { + Type: schema.TypeString, + Computed: true, + }, + "option_group_name": { + Type: schema.TypeString, + Computed: true, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + }, + "source_db_snapshot_identifier": { + Type: schema.TypeString, + Computed: true, + }, + "source_region": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_create_time": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "storage_type": { + Type: schema.TypeString, + Computed: true, + }, + "vpc_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsDbSnapshotRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + + instanceIdentifier, instanceIdentifierOk := d.GetOk("db_instance_identifier") + snapshotIdentifier, snapshotIdentifierOk := d.GetOk("db_snapshot_identifier") + + if !instanceIdentifierOk && !snapshotIdentifierOk { + return fmt.Errorf("One of db_snapshot_indentifier or db_instance_identifier must be assigned") + } + + params := &rds.DescribeDBSnapshotsInput{ + IncludePublic: aws.Bool(d.Get("include_public").(bool)), + IncludeShared: aws.Bool(d.Get("include_shared").(bool)), + } + if v, ok := d.GetOk("snapshot_type"); ok { + params.SnapshotType = aws.String(v.(string)) + } + if instanceIdentifierOk { + params.DBInstanceIdentifier = aws.String(instanceIdentifier.(string)) + } + if snapshotIdentifierOk { + params.DBSnapshotIdentifier = aws.String(snapshotIdentifier.(string)) + } + + resp, err := conn.DescribeDBSnapshots(params) + if err != nil { + return err + } + + if len(resp.DBSnapshots) < 1 { + return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") + } + + var snapshot *rds.DBSnapshot + if len(resp.DBSnapshots) > 1 { + recent := d.Get("most_recent").(bool) + log.Printf("[DEBUG] aws_db_snapshot - multiple results found and `most_recent` is set to: %t", recent) + if recent { + snapshot = mostRecentDbSnapshot(resp.DBSnapshots) + } else { + return fmt.Errorf("Your query returned more than one result. Please try a more specific search criteria.") + } + } else { + snapshot = resp.DBSnapshots[0] + } + + return dbSnapshotDescriptionAttributes(d, snapshot) +} + +type rdsSnapshotSort []*rds.DBSnapshot + +func (a rdsSnapshotSort) Len() int { return len(a) } +func (a rdsSnapshotSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a rdsSnapshotSort) Less(i, j int) bool { + return (*a[i].SnapshotCreateTime).Before(*a[j].SnapshotCreateTime) +} + +func mostRecentDbSnapshot(snapshots []*rds.DBSnapshot) *rds.DBSnapshot { + sortedSnapshots := snapshots + sort.Sort(rdsSnapshotSort(sortedSnapshots)) + return sortedSnapshots[len(sortedSnapshots)-1] +} + +func dbSnapshotDescriptionAttributes(d *schema.ResourceData, snapshot *rds.DBSnapshot) error { + d.SetId(*snapshot.DBInstanceIdentifier) + d.Set("db_instance_identifier", snapshot.DBInstanceIdentifier) + d.Set("db_snapshot_identifier", snapshot.DBSnapshotIdentifier) + d.Set("snapshot_type", snapshot.SnapshotType) + d.Set("allocated_storage", snapshot.AllocatedStorage) + d.Set("availability_zone", snapshot.AvailabilityZone) + d.Set("db_snapshot_arn", snapshot.DBSnapshotArn) + d.Set("encrypted", snapshot.Encrypted) + d.Set("engine", snapshot.Engine) + d.Set("engine_version", snapshot.EngineVersion) + d.Set("iops", snapshot.Iops) + d.Set("kms_key_id", snapshot.KmsKeyId) + d.Set("license_model", snapshot.LicenseModel) + d.Set("option_group_name", snapshot.OptionGroupName) + d.Set("port", snapshot.Port) + d.Set("source_db_snapshot_identifier", snapshot.SourceDBSnapshotIdentifier) + d.Set("source_region", snapshot.SourceRegion) + d.Set("status", snapshot.Status) + d.Set("vpc_id", snapshot.VpcId) + d.Set("snapshot_create_time", snapshot.SnapshotCreateTime.Format(time.RFC3339)) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_snapshot.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_snapshot.go new file mode 100644 index 000000000..c0e386643 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_snapshot.go @@ -0,0 +1,162 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsEbsSnapshot() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsEbsSnapshotRead, + + Schema: map[string]*schema.Schema{ + //selection criteria + "filter": dataSourceFiltersSchema(), + "most_recent": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + "owners": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "snapshot_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "restorable_by_user_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + //Computed values returned + "snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "volume_id": { + Type: schema.TypeString, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "owner_alias": { + Type: schema.TypeString, + Computed: true, + }, + "encrypted": { + Type: schema.TypeBool, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "volume_size": { + Type: schema.TypeInt, + Computed: true, + }, + "kms_key_id": { + Type: schema.TypeString, + Computed: true, + }, + "data_encryption_key_id": { + Type: schema.TypeString, + Computed: true, + }, + "tags": dataSourceTagsSchema(), + }, + } +} + +func dataSourceAwsEbsSnapshotRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + restorableUsers, restorableUsersOk := d.GetOk("restorable_by_user_ids") + filters, filtersOk := d.GetOk("filter") + snapshotIds, snapshotIdsOk := d.GetOk("snapshot_ids") + owners, ownersOk := d.GetOk("owners") + + if !restorableUsersOk && !filtersOk && !snapshotIdsOk && !ownersOk { + return fmt.Errorf("One of snapshot_ids, filters, restorable_by_user_ids, or owners must be assigned") + } + + params := &ec2.DescribeSnapshotsInput{} + if restorableUsersOk { + params.RestorableByUserIds = expandStringList(restorableUsers.([]interface{})) + } + if filtersOk { + params.Filters = buildAwsDataSourceFilters(filters.(*schema.Set)) + } + if ownersOk { + params.OwnerIds = expandStringList(owners.([]interface{})) + } + if snapshotIdsOk { + params.SnapshotIds = expandStringList(snapshotIds.([]interface{})) + } + + resp, err := conn.DescribeSnapshots(params) + if err != nil { + return err + } + + var snapshot *ec2.Snapshot + if len(resp.Snapshots) < 1 { + return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") + } + + if len(resp.Snapshots) > 1 { + recent := d.Get("most_recent").(bool) + log.Printf("[DEBUG] aws_ebs_snapshot - multiple results found and `most_recent` is set to: %t", recent) + if recent { + snapshot = mostRecentSnapshot(resp.Snapshots) + } else { + return fmt.Errorf("Your query returned more than one result. Please try a more specific search criteria.") + } + } else { + snapshot = resp.Snapshots[0] + } + + //Single Snapshot found so set to state + return snapshotDescriptionAttributes(d, snapshot) +} + +func mostRecentSnapshot(snapshots []*ec2.Snapshot) *ec2.Snapshot { + return sortSnapshots(snapshots)[0] +} + +func snapshotDescriptionAttributes(d *schema.ResourceData, snapshot *ec2.Snapshot) error { + d.SetId(*snapshot.SnapshotId) + d.Set("snapshot_id", snapshot.SnapshotId) + d.Set("volume_id", snapshot.VolumeId) + d.Set("data_encryption_key_id", snapshot.DataEncryptionKeyId) + d.Set("description", snapshot.Description) + d.Set("encrypted", snapshot.Encrypted) + d.Set("kms_key_id", snapshot.KmsKeyId) + d.Set("volume_size", snapshot.VolumeSize) + d.Set("state", snapshot.State) + d.Set("owner_id", snapshot.OwnerId) + d.Set("owner_alias", snapshot.OwnerAlias) + + if err := d.Set("tags", dataSourceTags(snapshot.Tags)); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_snapshot_ids.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_snapshot_ids.go new file mode 100644 index 000000000..bd4f2ad8b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_snapshot_ids.go @@ -0,0 +1,77 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsEbsSnapshotIds() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsEbsSnapshotIdsRead, + + Schema: map[string]*schema.Schema{ + "filter": dataSourceFiltersSchema(), + "owners": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "restorable_by_user_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "tags": dataSourceTagsSchema(), + "ids": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceAwsEbsSnapshotIdsRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + restorableUsers, restorableUsersOk := d.GetOk("restorable_by_user_ids") + filters, filtersOk := d.GetOk("filter") + owners, ownersOk := d.GetOk("owners") + + if restorableUsers == false && filtersOk == false && ownersOk == false { + return fmt.Errorf("One of filters, restorable_by_user_ids, or owners must be assigned") + } + + params := &ec2.DescribeSnapshotsInput{} + + if restorableUsersOk { + params.RestorableByUserIds = expandStringList(restorableUsers.([]interface{})) + } + if filtersOk { + params.Filters = buildAwsDataSourceFilters(filters.(*schema.Set)) + } + if ownersOk { + params.OwnerIds = expandStringList(owners.([]interface{})) + } + + resp, err := conn.DescribeSnapshots(params) + if err != nil { + return err + } + + snapshotIds := make([]string, 0) + + for _, snapshot := range sortSnapshots(resp.Snapshots) { + snapshotIds = append(snapshotIds, *snapshot.SnapshotId) + } + + d.SetId(fmt.Sprintf("%d", hashcode.String(params.String()))) + d.Set("ids", snapshotIds) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_volume.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_volume.go new file mode 100644 index 000000000..7794ecf28 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ebs_volume.go @@ -0,0 +1,136 @@ +package aws + +import ( + "fmt" + "log" + "sort" + + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsEbsVolume() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsEbsVolumeRead, + + Schema: map[string]*schema.Schema{ + "filter": dataSourceFiltersSchema(), + "most_recent": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + "availability_zone": { + Type: schema.TypeString, + Computed: true, + }, + "encrypted": { + Type: schema.TypeBool, + Computed: true, + }, + "iops": { + Type: schema.TypeInt, + Computed: true, + }, + "volume_type": { + Type: schema.TypeString, + Computed: true, + }, + "size": { + Type: schema.TypeInt, + Computed: true, + }, + "snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "kms_key_id": { + Type: schema.TypeString, + Computed: true, + }, + "volume_id": { + Type: schema.TypeString, + Computed: true, + }, + "tags": dataSourceTagsSchema(), + }, + } +} + +func dataSourceAwsEbsVolumeRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + filters, filtersOk := d.GetOk("filter") + + params := &ec2.DescribeVolumesInput{} + if filtersOk { + params.Filters = buildAwsDataSourceFilters(filters.(*schema.Set)) + } + + resp, err := conn.DescribeVolumes(params) + if err != nil { + return err + } + + log.Printf("Found These Volumes %s", spew.Sdump(resp.Volumes)) + + filteredVolumes := resp.Volumes[:] + + var volume *ec2.Volume + if len(filteredVolumes) < 1 { + return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") + } + + if len(filteredVolumes) > 1 { + recent := d.Get("most_recent").(bool) + log.Printf("[DEBUG] aws_ebs_volume - multiple results found and `most_recent` is set to: %t", recent) + if recent { + volume = mostRecentVolume(filteredVolumes) + } else { + return fmt.Errorf("Your query returned more than one result. Please try a more " + + "specific search criteria, or set `most_recent` attribute to true.") + } + } else { + // Query returned single result. + volume = filteredVolumes[0] + } + + log.Printf("[DEBUG] aws_ebs_volume - Single Volume found: %s", *volume.VolumeId) + return volumeDescriptionAttributes(d, volume) +} + +type volumeSort []*ec2.Volume + +func (a volumeSort) Len() int { return len(a) } +func (a volumeSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a volumeSort) Less(i, j int) bool { + itime := *a[i].CreateTime + jtime := *a[j].CreateTime + return itime.Unix() < jtime.Unix() +} + +func mostRecentVolume(volumes []*ec2.Volume) *ec2.Volume { + sortedVolumes := volumes + sort.Sort(volumeSort(sortedVolumes)) + return sortedVolumes[len(sortedVolumes)-1] +} + +func volumeDescriptionAttributes(d *schema.ResourceData, volume *ec2.Volume) error { + d.SetId(*volume.VolumeId) + d.Set("volume_id", volume.VolumeId) + d.Set("availability_zone", volume.AvailabilityZone) + d.Set("encrypted", volume.Encrypted) + d.Set("iops", volume.Iops) + d.Set("kms_key_id", volume.KmsKeyId) + d.Set("size", volume.Size) + d.Set("snapshot_id", volume.SnapshotId) + d.Set("volume_type", volume.VolumeType) + + if err := d.Set("tags", dataSourceTags(volume.Tags)); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_cluster.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_cluster.go new file mode 100644 index 000000000..2d8afeeea --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_cluster.go @@ -0,0 +1,77 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ecs" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsEcsCluster() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsEcsClusterRead, + + Schema: map[string]*schema.Schema{ + "cluster_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + }, + + "pending_tasks_count": { + Type: schema.TypeInt, + Computed: true, + }, + + "running_tasks_count": { + Type: schema.TypeInt, + Computed: true, + }, + + "registered_container_instances_count": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func dataSourceAwsEcsClusterRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecsconn + + desc, err := conn.DescribeClusters(&ecs.DescribeClustersInput{ + Clusters: []*string{aws.String(d.Get("cluster_name").(string))}, + }) + + if err != nil { + return err + } + + for _, cluster := range desc.Clusters { + if aws.StringValue(cluster.ClusterName) != d.Get("cluster_name").(string) { + continue + } + d.SetId(aws.StringValue(cluster.ClusterArn)) + d.Set("status", cluster.Status) + d.Set("pending_tasks_count", cluster.PendingTasksCount) + d.Set("running_tasks_count", cluster.RunningTasksCount) + d.Set("registered_container_instances_count", cluster.RegisteredContainerInstancesCount) + } + + if d.Id() == "" { + return fmt.Errorf("cluster with name %q not found", d.Get("cluster_name").(string)) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_container_definition.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_container_definition.go new file mode 100644 index 000000000..412019ac9 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_container_definition.go @@ -0,0 +1,107 @@ +package aws + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ecs" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsEcsContainerDefinition() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsEcsContainerDefinitionRead, + + Schema: map[string]*schema.Schema{ + "task_definition": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "container_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + // Computed values. + "image": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "image_digest": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "cpu": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + "memory": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + "memory_reservation": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + "disable_networking": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + }, + "docker_labels": &schema.Schema{ + Type: schema.TypeMap, + Computed: true, + Elem: schema.TypeString, + }, + "environment": &schema.Schema{ + Type: schema.TypeMap, + Computed: true, + Elem: schema.TypeString, + }, + }, + } +} + +func dataSourceAwsEcsContainerDefinitionRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecsconn + + desc, err := conn.DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{ + TaskDefinition: aws.String(d.Get("task_definition").(string)), + }) + + if err != nil { + return err + } + + taskDefinition := *desc.TaskDefinition + for _, def := range taskDefinition.ContainerDefinitions { + if aws.StringValue(def.Name) != d.Get("container_name").(string) { + continue + } + + d.SetId(fmt.Sprintf("%s/%s", aws.StringValue(taskDefinition.TaskDefinitionArn), d.Get("container_name").(string))) + d.Set("image", aws.StringValue(def.Image)) + image := aws.StringValue(def.Image) + if strings.Contains(image, ":") { + d.Set("image_digest", strings.Split(image, ":")[1]) + } + d.Set("cpu", aws.Int64Value(def.Cpu)) + d.Set("memory", aws.Int64Value(def.Memory)) + d.Set("memory_reservation", aws.Int64Value(def.MemoryReservation)) + d.Set("disable_networking", aws.BoolValue(def.DisableNetworking)) + d.Set("docker_labels", aws.StringValueMap(def.DockerLabels)) + + var environment = map[string]string{} + for _, keyValuePair := range def.Environment { + environment[aws.StringValue(keyValuePair.Name)] = aws.StringValue(keyValuePair.Value) + } + d.Set("environment", environment) + } + + if d.Id() == "" { + return fmt.Errorf("container with name %q not found in task definition %q", d.Get("container_name").(string), d.Get("task_definition").(string)) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_task_definition.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_task_definition.go new file mode 100644 index 000000000..3a5096a3b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ecs_task_definition.go @@ -0,0 +1,71 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ecs" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsEcsTaskDefinition() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsEcsTaskDefinitionRead, + + Schema: map[string]*schema.Schema{ + "task_definition": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + // Computed values. + "family": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "network_mode": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "revision": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + "status": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "task_role_arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsEcsTaskDefinitionRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecsconn + + desc, err := conn.DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{ + TaskDefinition: aws.String(d.Get("task_definition").(string)), + }) + + if err != nil { + return fmt.Errorf("Failed getting task definition %s %q", err, d.Get("task_definition").(string)) + } + + taskDefinition := *desc.TaskDefinition + + d.SetId(aws.StringValue(taskDefinition.TaskDefinitionArn)) + d.Set("family", aws.StringValue(taskDefinition.Family)) + d.Set("network_mode", aws.StringValue(taskDefinition.NetworkMode)) + d.Set("revision", aws.Int64Value(taskDefinition.Revision)) + d.Set("status", aws.StringValue(taskDefinition.Status)) + d.Set("task_role_arn", aws.StringValue(taskDefinition.TaskRoleArn)) + + if d.Id() == "" { + return fmt.Errorf("task definition %q not found", d.Get("task_definition").(string)) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_efs_file_system.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_efs_file_system.go new file mode 100644 index 000000000..014ae1353 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_efs_file_system.go @@ -0,0 +1,113 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/efs" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsEfsFileSystem() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsEfsFileSystemRead, + + Schema: map[string]*schema.Schema{ + "creation_token": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateMaxLength(64), + }, + "file_system_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "performance_mode": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchemaComputed(), + }, + } +} + +func dataSourceAwsEfsFileSystemRead(d *schema.ResourceData, meta interface{}) error { + efsconn := meta.(*AWSClient).efsconn + + describeEfsOpts := &efs.DescribeFileSystemsInput{} + + if v, ok := d.GetOk("creation_token"); ok { + describeEfsOpts.CreationToken = aws.String(v.(string)) + } + + if v, ok := d.GetOk("file_system_id"); ok { + describeEfsOpts.FileSystemId = aws.String(v.(string)) + } + + describeResp, err := efsconn.DescribeFileSystems(describeEfsOpts) + if err != nil { + return errwrap.Wrapf("Error retrieving EFS: {{err}}", err) + } + if len(describeResp.FileSystems) != 1 { + return fmt.Errorf("Search returned %d results, please revise so only one is returned", len(describeResp.FileSystems)) + } + + d.SetId(*describeResp.FileSystems[0].FileSystemId) + + tags := make([]*efs.Tag, 0) + var marker string + for { + params := &efs.DescribeTagsInput{ + FileSystemId: aws.String(d.Id()), + } + if marker != "" { + params.Marker = aws.String(marker) + } + + tagsResp, err := efsconn.DescribeTags(params) + if err != nil { + return fmt.Errorf("Error retrieving EC2 tags for EFS file system (%q): %s", + d.Id(), err.Error()) + } + + for _, tag := range tagsResp.Tags { + tags = append(tags, tag) + } + + if tagsResp.NextMarker != nil { + marker = *tagsResp.NextMarker + } else { + break + } + } + + err = d.Set("tags", tagsToMapEFS(tags)) + if err != nil { + return err + } + + var fs *efs.FileSystemDescription + for _, f := range describeResp.FileSystems { + if d.Id() == *f.FileSystemId { + fs = f + break + } + } + if fs == nil { + log.Printf("[WARN] EFS (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + d.Set("creation_token", fs.CreationToken) + d.Set("performance_mode", fs.PerformanceMode) + d.Set("file_system_id", fs.FileSystemId) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_eip.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_eip.go new file mode 100644 index 000000000..0352f48bf --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_eip.go @@ -0,0 +1,64 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsEip() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsEipRead, + + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "public_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func dataSourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + req := &ec2.DescribeAddressesInput{} + + if id := d.Get("id"); id != "" { + req.AllocationIds = []*string{aws.String(id.(string))} + } + + if public_ip := d.Get("public_ip"); public_ip != "" { + req.PublicIps = []*string{aws.String(public_ip.(string))} + } + + log.Printf("[DEBUG] DescribeAddresses %s\n", req) + resp, err := conn.DescribeAddresses(req) + if err != nil { + return err + } + if resp == nil || len(resp.Addresses) == 0 { + return fmt.Errorf("no matching Elastic IP found") + } + if len(resp.Addresses) > 1 { + return fmt.Errorf("multiple Elastic IPs matched; use additional constraints to reduce matches to a single Elastic IP") + } + + eip := resp.Addresses[0] + + d.SetId(*eip.AllocationId) + d.Set("id", eip.AllocationId) + d.Set("public_ip", eip.PublicIp) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elastic_beanstalk_solution_stack.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elastic_beanstalk_solution_stack.go new file mode 100644 index 000000000..f9bec5bce --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elastic_beanstalk_solution_stack.go @@ -0,0 +1,105 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + + "github.com/aws/aws-sdk-go/service/elasticbeanstalk" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsElasticBeanstalkSolutionStack() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsElasticBeanstalkSolutionStackRead, + + Schema: map[string]*schema.Schema{ + "name_regex": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateSolutionStackNameRegex, + }, + "most_recent": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + // Computed values. + "name": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +// dataSourceAwsElasticBeanstalkSolutionStackRead performs the API lookup. +func dataSourceAwsElasticBeanstalkSolutionStackRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticbeanstalkconn + + nameRegex := d.Get("name_regex") + + var params *elasticbeanstalk.ListAvailableSolutionStacksInput + + resp, err := conn.ListAvailableSolutionStacks(params) + if err != nil { + return err + } + + var filteredSolutionStacks []*string + + r := regexp.MustCompile(nameRegex.(string)) + for _, solutionStack := range resp.SolutionStacks { + if r.MatchString(*solutionStack) { + filteredSolutionStacks = append(filteredSolutionStacks, solutionStack) + } + } + + var solutionStack *string + if len(filteredSolutionStacks) < 1 { + return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") + } + + if len(filteredSolutionStacks) == 1 { + // Query returned single result. + solutionStack = filteredSolutionStacks[0] + } else { + recent := d.Get("most_recent").(bool) + log.Printf("[DEBUG] aws_elastic_beanstalk_solution_stack - multiple results found and `most_recent` is set to: %t", recent) + if recent { + solutionStack = mostRecentSolutionStack(filteredSolutionStacks) + } else { + return fmt.Errorf("Your query returned more than one result. Please try a more " + + "specific search criteria, or set `most_recent` attribute to true.") + } + } + + log.Printf("[DEBUG] aws_elastic_beanstalk_solution_stack - Single solution stack found: %s", *solutionStack) + return solutionStackDescriptionAttributes(d, solutionStack) +} + +// Returns the most recent solution stack out of a slice of stacks. +func mostRecentSolutionStack(solutionStacks []*string) *string { + return solutionStacks[0] +} + +// populate the numerous fields that the image description returns. +func solutionStackDescriptionAttributes(d *schema.ResourceData, solutionStack *string) error { + // Simple attributes first + d.SetId(*solutionStack) + d.Set("name", solutionStack) + return nil +} + +func validateSolutionStackNameRegex(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if _, err := regexp.Compile(value); err != nil { + errors = append(errors, fmt.Errorf( + "%q contains an invalid regular expression: %s", + k, err)) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elasticache_cluster.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elasticache_cluster.go new file mode 100644 index 000000000..eaa539d3a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elasticache_cluster.go @@ -0,0 +1,236 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsElastiCacheCluster() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsElastiCacheClusterRead, + + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: func(v interface{}) string { + value := v.(string) + return strings.ToLower(value) + }, + }, + + "node_type": { + Type: schema.TypeString, + Computed: true, + }, + + "num_cache_nodes": { + Type: schema.TypeInt, + Computed: true, + }, + + "subnet_group_name": { + Type: schema.TypeString, + Computed: true, + }, + + "engine": { + Type: schema.TypeString, + Computed: true, + }, + + "engine_version": { + Type: schema.TypeString, + Computed: true, + }, + + "parameter_group_name": { + Type: schema.TypeString, + Computed: true, + }, + + "replication_group_id": { + Type: schema.TypeString, + Computed: true, + }, + + "security_group_names": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "security_group_ids": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "maintenance_window": { + Type: schema.TypeString, + Computed: true, + }, + + "snapshot_window": { + Type: schema.TypeString, + Computed: true, + }, + + "snapshot_retention_limit": { + Type: schema.TypeInt, + Computed: true, + }, + + "availability_zone": { + Type: schema.TypeString, + Computed: true, + }, + + "notification_topic_arn": { + Type: schema.TypeString, + Computed: true, + }, + + "port": { + Type: schema.TypeInt, + Computed: true, + }, + + "configuration_endpoint": { + Type: schema.TypeString, + Computed: true, + }, + + "cluster_address": { + Type: schema.TypeString, + Computed: true, + }, + + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "cache_nodes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "address": { + Type: schema.TypeString, + Computed: true, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + }, + "availability_zone": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "tags": tagsSchemaComputed(), + }, + } +} + +func dataSourceAwsElastiCacheClusterRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + + req := &elasticache.DescribeCacheClustersInput{ + CacheClusterId: aws.String(d.Get("cluster_id").(string)), + ShowCacheNodeInfo: aws.Bool(true), + } + + resp, err := conn.DescribeCacheClusters(req) + if err != nil { + return err + } + + if len(resp.CacheClusters) < 1 { + return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") + } + if len(resp.CacheClusters) > 1 { + return fmt.Errorf("Your query returned more than one result. Please try a more specific search criteria.") + } + + cluster := resp.CacheClusters[0] + + d.SetId(*cluster.CacheClusterId) + + d.Set("cluster_id", cluster.CacheClusterId) + d.Set("node_type", cluster.CacheNodeType) + d.Set("num_cache_nodes", cluster.NumCacheNodes) + d.Set("subnet_group_name", cluster.CacheSubnetGroupName) + d.Set("engine", cluster.Engine) + d.Set("engine_version", cluster.EngineVersion) + d.Set("security_group_names", flattenElastiCacheSecurityGroupNames(cluster.CacheSecurityGroups)) + d.Set("security_group_ids", flattenElastiCacheSecurityGroupIds(cluster.SecurityGroups)) + + if cluster.CacheParameterGroup != nil { + d.Set("parameter_group_name", cluster.CacheParameterGroup.CacheParameterGroupName) + } + + if cluster.ReplicationGroupId != nil { + d.Set("replication_group_id", cluster.ReplicationGroupId) + } + + d.Set("maintenance_window", cluster.PreferredMaintenanceWindow) + d.Set("snapshot_window", cluster.SnapshotWindow) + d.Set("snapshot_retention_limit", cluster.SnapshotRetentionLimit) + d.Set("availability_zone", cluster.PreferredAvailabilityZone) + + if cluster.NotificationConfiguration != nil { + if *cluster.NotificationConfiguration.TopicStatus == "active" { + d.Set("notification_topic_arn", cluster.NotificationConfiguration.TopicArn) + } + } + + if cluster.ConfigurationEndpoint != nil { + d.Set("port", cluster.ConfigurationEndpoint.Port) + d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", *cluster.ConfigurationEndpoint.Address, *cluster.ConfigurationEndpoint.Port))) + d.Set("cluster_address", aws.String(fmt.Sprintf("%s", *cluster.ConfigurationEndpoint.Address))) + } + + if err := setCacheNodeData(d, cluster); err != nil { + return err + } + + arn, err := buildECARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) + if err != nil { + log.Printf("[DEBUG] Error building ARN for ElastiCache Cluster %s", *cluster.CacheClusterId) + } + d.Set("arn", arn) + + tagResp, err := conn.ListTagsForResource(&elasticache.ListTagsForResourceInput{ + ResourceName: aws.String(arn), + }) + + if err != nil { + log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn) + } + + var et []*elasticache.Tag + if len(tagResp.TagList) > 0 { + et = tagResp.TagList + } + d.Set("tags", tagsToMapEC(et)) + + return nil + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elb_hosted_zone_id.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elb_hosted_zone_id.go new file mode 100644 index 000000000..ee75a27bf --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elb_hosted_zone_id.go @@ -0,0 +1,56 @@ +package aws + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +// See https://github.com/fog/fog-aws/pull/332/files +// This list isn't exposed by AWS - it's been found through +// trouble solving +var elbHostedZoneIdPerRegionMap = map[string]string{ + "ap-northeast-1": "Z14GRHDCWA56QT", + "ap-northeast-2": "ZWKZPGTI48KDX", + "ap-south-1": "ZP97RAFLXTNZK", + "ap-southeast-1": "Z1LMS91P8CMLE5", + "ap-southeast-2": "Z1GM3OXH4ZPM65", + "ca-central-1": "ZQSVJUPU6J1EY", + "eu-central-1": "Z215JYRZR1TBD5", + "eu-west-1": "Z32O12XQLNTSW2", + "eu-west-2": "ZHURV8PSTC4K8", + "us-east-1": "Z35SXDOTRQ7X7K", + "us-east-2": "Z3AADJGX6KTTL2", + "us-west-1": "Z368ELLRRE2KJ0", + "us-west-2": "Z1H1FL5HABSF5", + "sa-east-1": "Z2P70J7HTTTPLU", + "us-gov-west-1": "048591011584", + "cn-north-1": "638102146993", +} + +func dataSourceAwsElbHostedZoneId() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsElbHostedZoneIdRead, + + Schema: map[string]*schema.Schema{ + "region": { + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func dataSourceAwsElbHostedZoneIdRead(d *schema.ResourceData, meta interface{}) error { + region := meta.(*AWSClient).region + if v, ok := d.GetOk("region"); ok { + region = v.(string) + } + + if zoneId, ok := elbHostedZoneIdPerRegionMap[region]; ok { + d.SetId(zoneId) + return nil + } + + return fmt.Errorf("Unknown region (%q)", region) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elb_service_account.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elb_service_account.go new file mode 100644 index 000000000..a3d6cdd71 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_elb_service_account.go @@ -0,0 +1,61 @@ +package aws + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +// See http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html#attach-bucket-policy +var elbAccountIdPerRegionMap = map[string]string{ + "ap-northeast-1": "582318560864", + "ap-northeast-2": "600734575887", + "ap-south-1": "718504428378", + "ap-southeast-1": "114774131450", + "ap-southeast-2": "783225319266", + "ca-central-1": "985666609251", + "cn-north-1": "638102146993", + "eu-central-1": "054676820928", + "eu-west-1": "156460612806", + "eu-west-2": "652711504416", + "sa-east-1": "507241528517", + "us-east-1": "127311923021", + "us-east-2": "033677994240", + "us-gov-west": "048591011584", + "us-west-1": "027434742980", + "us-west-2": "797873946194", +} + +func dataSourceAwsElbServiceAccount() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsElbServiceAccountRead, + + Schema: map[string]*schema.Schema{ + "region": { + Type: schema.TypeString, + Optional: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsElbServiceAccountRead(d *schema.ResourceData, meta interface{}) error { + region := meta.(*AWSClient).region + if v, ok := d.GetOk("region"); ok { + region = v.(string) + } + + if accid, ok := elbAccountIdPerRegionMap[region]; ok { + d.SetId(accid) + + d.Set("arn", fmt.Sprintf("arn:%s:iam::%s:root", meta.(*AWSClient).partition, accid)) + + return nil + } + + return fmt.Errorf("Unknown region (%q)", region) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_account_alias.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_account_alias.go new file mode 100644 index 000000000..f93897373 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_account_alias.go @@ -0,0 +1,48 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsIamAccountAlias() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsIamAccountAliasRead, + + Schema: map[string]*schema.Schema{ + "account_alias": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsIamAccountAliasRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + + log.Printf("[DEBUG] Reading IAM Account Aliases.") + d.SetId(time.Now().UTC().String()) + + req := &iam.ListAccountAliasesInput{} + resp, err := conn.ListAccountAliases(req) + if err != nil { + return err + } + + // 'AccountAliases': [] if there is no alias. + if resp == nil || len(resp.AccountAliases) == 0 { + return fmt.Errorf("no IAM account alias found") + } + + alias := aws.StringValue(resp.AccountAliases[0]) + log.Printf("[DEBUG] Setting AWS IAM Account Alias to %s.", alias) + d.Set("account_alias", alias) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_policy_document.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_policy_document.go new file mode 100644 index 000000000..2366ae4bc --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_policy_document.go @@ -0,0 +1,232 @@ +package aws + +import ( + "fmt" + + "encoding/json" + "strings" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" + "strconv" +) + +var dataSourceAwsIamPolicyDocumentVarReplacer = strings.NewReplacer("&{", "${") + +func dataSourceAwsIamPolicyDocument() *schema.Resource { + setOfString := &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + } + + return &schema.Resource{ + Read: dataSourceAwsIamPolicyDocumentRead, + + Schema: map[string]*schema.Schema{ + "policy_id": { + Type: schema.TypeString, + Optional: true, + }, + "statement": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sid": { + Type: schema.TypeString, + Optional: true, + }, + "effect": { + Type: schema.TypeString, + Optional: true, + Default: "Allow", + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + switch v.(string) { + case "Allow", "Deny": + return + default: + es = append(es, fmt.Errorf("%q must be either \"Allow\" or \"Deny\"", k)) + return + } + }, + }, + "actions": setOfString, + "not_actions": setOfString, + "resources": setOfString, + "not_resources": setOfString, + "principals": dataSourceAwsIamPolicyPrincipalSchema(), + "not_principals": dataSourceAwsIamPolicyPrincipalSchema(), + "condition": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "test": { + Type: schema.TypeString, + Required: true, + }, + "variable": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + "json": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{}) error { + doc := &IAMPolicyDoc{ + Version: "2012-10-17", + } + + if policyId, hasPolicyId := d.GetOk("policy_id"); hasPolicyId { + doc.Id = policyId.(string) + } + + var cfgStmts = d.Get("statement").([]interface{}) + stmts := make([]*IAMPolicyStatement, len(cfgStmts)) + doc.Statements = stmts + for i, stmtI := range cfgStmts { + cfgStmt := stmtI.(map[string]interface{}) + stmt := &IAMPolicyStatement{ + Effect: cfgStmt["effect"].(string), + } + + if sid, ok := cfgStmt["sid"]; ok { + stmt.Sid = sid.(string) + } + + if actions := cfgStmt["actions"].(*schema.Set).List(); len(actions) > 0 { + stmt.Actions = iamPolicyDecodeConfigStringList(actions) + } + if actions := cfgStmt["not_actions"].(*schema.Set).List(); len(actions) > 0 { + stmt.NotActions = iamPolicyDecodeConfigStringList(actions) + } + + if resources := cfgStmt["resources"].(*schema.Set).List(); len(resources) > 0 { + stmt.Resources = dataSourceAwsIamPolicyDocumentReplaceVarsInList( + iamPolicyDecodeConfigStringList(resources), + ) + } + if resources := cfgStmt["not_resources"].(*schema.Set).List(); len(resources) > 0 { + stmt.NotResources = dataSourceAwsIamPolicyDocumentReplaceVarsInList( + iamPolicyDecodeConfigStringList(resources), + ) + } + + if principals := cfgStmt["principals"].(*schema.Set).List(); len(principals) > 0 { + stmt.Principals = dataSourceAwsIamPolicyDocumentMakePrincipals(principals) + } + + if principals := cfgStmt["not_principals"].(*schema.Set).List(); len(principals) > 0 { + stmt.NotPrincipals = dataSourceAwsIamPolicyDocumentMakePrincipals(principals) + } + + if conditions := cfgStmt["condition"].(*schema.Set).List(); len(conditions) > 0 { + stmt.Conditions = dataSourceAwsIamPolicyDocumentMakeConditions(conditions) + } + + stmts[i] = stmt + } + + jsonDoc, err := json.MarshalIndent(doc, "", " ") + if err != nil { + // should never happen if the above code is correct + return err + } + jsonString := string(jsonDoc) + + d.Set("json", jsonString) + d.SetId(strconv.Itoa(hashcode.String(jsonString))) + + return nil +} + +func dataSourceAwsIamPolicyDocumentReplaceVarsInList(in interface{}) interface{} { + switch v := in.(type) { + case string: + return dataSourceAwsIamPolicyDocumentVarReplacer.Replace(v) + case []string: + out := make([]string, len(v)) + for i, item := range v { + out[i] = dataSourceAwsIamPolicyDocumentVarReplacer.Replace(item) + } + return out + default: + panic("dataSourceAwsIamPolicyDocumentReplaceVarsInList: input not string nor []string") + } +} + +func dataSourceAwsIamPolicyDocumentMakeConditions(in []interface{}) IAMPolicyStatementConditionSet { + out := make([]IAMPolicyStatementCondition, len(in)) + for i, itemI := range in { + item := itemI.(map[string]interface{}) + out[i] = IAMPolicyStatementCondition{ + Test: item["test"].(string), + Variable: item["variable"].(string), + Values: dataSourceAwsIamPolicyDocumentReplaceVarsInList( + iamPolicyDecodeConfigStringList( + item["values"].(*schema.Set).List(), + ), + ), + } + } + return IAMPolicyStatementConditionSet(out) +} + +func dataSourceAwsIamPolicyDocumentMakePrincipals(in []interface{}) IAMPolicyStatementPrincipalSet { + out := make([]IAMPolicyStatementPrincipal, len(in)) + for i, itemI := range in { + item := itemI.(map[string]interface{}) + out[i] = IAMPolicyStatementPrincipal{ + Type: item["type"].(string), + Identifiers: dataSourceAwsIamPolicyDocumentReplaceVarsInList( + iamPolicyDecodeConfigStringList( + item["identifiers"].(*schema.Set).List(), + ), + ), + } + } + return IAMPolicyStatementPrincipalSet(out) +} + +func dataSourceAwsIamPolicyPrincipalSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "identifiers": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_role.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_role.go new file mode 100644 index 000000000..f681268b9 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_role.go @@ -0,0 +1,67 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsIAMRole() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsIAMRoleRead, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "assume_role_policy_document": { + Type: schema.TypeString, + Computed: true, + }, + "path": { + Type: schema.TypeString, + Computed: true, + }, + "role_id": { + Type: schema.TypeString, + Computed: true, + }, + "role_name": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceAwsIAMRoleRead(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + roleName := d.Get("role_name").(string) + + req := &iam.GetRoleInput{ + RoleName: aws.String(roleName), + } + + resp, err := iamconn.GetRole(req) + if err != nil { + return errwrap.Wrapf("Error getting roles: {{err}}", err) + } + if resp == nil { + return fmt.Errorf("no IAM role found") + } + + role := resp.Role + + d.SetId(*role.RoleId) + d.Set("arn", role.Arn) + d.Set("assume_role_policy_document", role.AssumeRolePolicyDocument) + d.Set("path", role.Path) + d.Set("role_id", role.RoleId) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_server_certificate.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_server_certificate.go new file mode 100644 index 000000000..c4bf8bd2d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_iam_server_certificate.go @@ -0,0 +1,140 @@ +package aws + +import ( + "fmt" + "sort" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsIAMServerCertificate() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsIAMServerCertificateRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 128 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 128 characters", k)) + } + return + }, + }, + + "name_prefix": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 30 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 30 characters, name is limited to 128", k)) + } + return + }, + }, + + "latest": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "id": { + Type: schema.TypeString, + Computed: true, + }, + + "path": { + Type: schema.TypeString, + Computed: true, + }, + + "expiration_date": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +type certificateByExpiration []*iam.ServerCertificateMetadata + +func (m certificateByExpiration) Len() int { + return len(m) +} + +func (m certificateByExpiration) Swap(i, j int) { + m[i], m[j] = m[j], m[i] +} + +func (m certificateByExpiration) Less(i, j int) bool { + return m[i].Expiration.After(*m[j].Expiration) +} + +func dataSourceAwsIAMServerCertificateRead(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + var matcher = func(cert *iam.ServerCertificateMetadata) bool { + return strings.HasPrefix(aws.StringValue(cert.ServerCertificateName), d.Get("name_prefix").(string)) + } + if v, ok := d.GetOk("name"); ok { + matcher = func(cert *iam.ServerCertificateMetadata) bool { + return aws.StringValue(cert.ServerCertificateName) == v.(string) + } + } + + var metadatas = []*iam.ServerCertificateMetadata{} + err := iamconn.ListServerCertificatesPages(&iam.ListServerCertificatesInput{}, func(p *iam.ListServerCertificatesOutput, lastPage bool) bool { + for _, cert := range p.ServerCertificateMetadataList { + if matcher(cert) { + metadatas = append(metadatas, cert) + } + } + return true + }) + if err != nil { + return errwrap.Wrapf("Error describing certificates: {{err}}", err) + } + + if len(metadatas) == 0 { + return fmt.Errorf("Search for AWS IAM server certificate returned no results") + } + if len(metadatas) > 1 { + if !d.Get("latest").(bool) { + return fmt.Errorf("Search for AWS IAM server certificate returned too many results") + } + + sort.Sort(certificateByExpiration(metadatas)) + } + + metadata := metadatas[0] + d.SetId(*metadata.ServerCertificateId) + d.Set("arn", *metadata.Arn) + d.Set("path", *metadata.Path) + d.Set("id", *metadata.ServerCertificateId) + d.Set("name", *metadata.ServerCertificateName) + if metadata.Expiration != nil { + d.Set("expiration_date", metadata.Expiration.Format("2006-01-02T15:04:05")) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_instance.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_instance.go new file mode 100644 index 000000000..617a5c257 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_instance.go @@ -0,0 +1,356 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsInstance() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsInstanceRead, + + Schema: map[string]*schema.Schema{ + "filter": dataSourceFiltersSchema(), + "tags": dataSourceTagsSchema(), + "instance_tags": tagsSchemaComputed(), + "instance_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "ami": { + Type: schema.TypeString, + Computed: true, + }, + "instance_type": { + Type: schema.TypeString, + Computed: true, + }, + "instance_state": { + Type: schema.TypeString, + Computed: true, + }, + "availability_zone": { + Type: schema.TypeString, + Computed: true, + }, + "tenancy": { + Type: schema.TypeString, + Computed: true, + }, + "key_name": { + Type: schema.TypeString, + Computed: true, + }, + "public_dns": { + Type: schema.TypeString, + Computed: true, + }, + "public_ip": { + Type: schema.TypeString, + Computed: true, + }, + "private_dns": { + Type: schema.TypeString, + Computed: true, + }, + "private_ip": { + Type: schema.TypeString, + Computed: true, + }, + "iam_instance_profile": { + Type: schema.TypeString, + Computed: true, + }, + "subnet_id": { + Type: schema.TypeString, + Computed: true, + }, + "network_interface_id": { + Type: schema.TypeString, + Computed: true, + }, + "associate_public_ip_address": { + Type: schema.TypeBool, + Computed: true, + }, + "ebs_optimized": { + Type: schema.TypeBool, + Computed: true, + }, + "source_dest_check": { + Type: schema.TypeBool, + Computed: true, + }, + "monitoring": { + Type: schema.TypeBool, + Computed: true, + }, + "user_data": { + Type: schema.TypeString, + Computed: true, + }, + "security_groups": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "vpc_security_group_ids": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "ephemeral_block_device": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "device_name": { + Type: schema.TypeString, + Required: true, + }, + + "virtual_name": { + Type: schema.TypeString, + Optional: true, + }, + + "no_device": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + "ebs_block_device": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delete_on_termination": { + Type: schema.TypeBool, + Computed: true, + }, + + "device_name": { + Type: schema.TypeString, + Computed: true, + }, + + "encrypted": { + Type: schema.TypeBool, + Computed: true, + }, + + "iops": { + Type: schema.TypeInt, + Computed: true, + }, + + "snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + + "volume_size": { + Type: schema.TypeInt, + Computed: true, + }, + + "volume_type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "root_block_device": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delete_on_termination": { + Type: schema.TypeBool, + Computed: true, + }, + + "iops": { + Type: schema.TypeInt, + Computed: true, + }, + + "volume_size": { + Type: schema.TypeInt, + Computed: true, + }, + + "volume_type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +// dataSourceAwsInstanceRead performs the instanceID lookup +func dataSourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + filters, filtersOk := d.GetOk("filter") + instanceID, instanceIDOk := d.GetOk("instance_id") + tags, tagsOk := d.GetOk("instance_tags") + + if filtersOk == false && instanceIDOk == false && tagsOk == false { + return fmt.Errorf("One of filters, instance_tags, or instance_id must be assigned") + } + + // Build up search parameters + params := &ec2.DescribeInstancesInput{} + if filtersOk { + params.Filters = buildAwsDataSourceFilters(filters.(*schema.Set)) + } + if instanceIDOk { + params.InstanceIds = []*string{aws.String(instanceID.(string))} + } + if tagsOk { + params.Filters = append(params.Filters, buildEC2TagFilterList( + tagsFromMap(tags.(map[string]interface{})), + )...) + } + + // Perform the lookup + resp, err := conn.DescribeInstances(params) + if err != nil { + return err + } + + // If no instances were returned, return + if len(resp.Reservations) == 0 { + return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") + } + + var filteredInstances []*ec2.Instance + + // loop through reservations, and remove terminated instances, populate instance slice + for _, res := range resp.Reservations { + for _, instance := range res.Instances { + if instance.State != nil && *instance.State.Name != "terminated" { + filteredInstances = append(filteredInstances, instance) + } + } + } + + var instance *ec2.Instance + if len(filteredInstances) < 1 { + return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") + } + + // (TODO: Support a list of instances to be returned) + // Possibly with a different data source that returns a list of individual instance data sources + if len(filteredInstances) > 1 { + return fmt.Errorf("Your query returned more than one result. Please try a more " + + "specific search criteria.") + } else { + instance = filteredInstances[0] + } + + log.Printf("[DEBUG] aws_instance - Single Instance ID found: %s", *instance.InstanceId) + return instanceDescriptionAttributes(d, instance, conn) +} + +// Populate instance attribute fields with the returned instance +func instanceDescriptionAttributes(d *schema.ResourceData, instance *ec2.Instance, conn *ec2.EC2) error { + d.SetId(*instance.InstanceId) + // Set the easy attributes + d.Set("instance_state", instance.State.Name) + if instance.Placement != nil { + d.Set("availability_zone", instance.Placement.AvailabilityZone) + } + if instance.Placement.Tenancy != nil { + d.Set("tenancy", instance.Placement.Tenancy) + } + d.Set("ami", instance.ImageId) + d.Set("instance_type", instance.InstanceType) + d.Set("key_name", instance.KeyName) + d.Set("public_dns", instance.PublicDnsName) + d.Set("public_ip", instance.PublicIpAddress) + d.Set("private_dns", instance.PrivateDnsName) + d.Set("private_ip", instance.PrivateIpAddress) + d.Set("iam_instance_profile", iamInstanceProfileArnToName(instance.IamInstanceProfile)) + + // iterate through network interfaces, and set subnet, network_interface, public_addr + if len(instance.NetworkInterfaces) > 0 { + for _, ni := range instance.NetworkInterfaces { + if *ni.Attachment.DeviceIndex == 0 { + d.Set("subnet_id", ni.SubnetId) + d.Set("network_interface_id", ni.NetworkInterfaceId) + d.Set("associate_public_ip_address", ni.Association != nil) + } + } + } else { + d.Set("subnet_id", instance.SubnetId) + d.Set("network_interface_id", "") + } + + d.Set("ebs_optimized", instance.EbsOptimized) + if instance.SubnetId != nil && *instance.SubnetId != "" { + d.Set("source_dest_check", instance.SourceDestCheck) + } + + if instance.Monitoring != nil && instance.Monitoring.State != nil { + monitoringState := *instance.Monitoring.State + d.Set("monitoring", monitoringState == "enabled" || monitoringState == "pending") + } + + d.Set("tags", dataSourceTags(instance.Tags)) + + // Security Groups + if err := readSecurityGroups(d, instance); err != nil { + return err + } + + // Block devices + if err := readBlockDevices(d, instance, conn); err != nil { + return err + } + if _, ok := d.GetOk("ephemeral_block_device"); !ok { + d.Set("ephemeral_block_device", []interface{}{}) + } + + // Lookup and Set Instance Attributes + { + attr, err := conn.DescribeInstanceAttribute(&ec2.DescribeInstanceAttributeInput{ + Attribute: aws.String("disableApiTermination"), + InstanceId: aws.String(d.Id()), + }) + if err != nil { + return err + } + d.Set("disable_api_termination", attr.DisableApiTermination.Value) + } + { + attr, err := conn.DescribeInstanceAttribute(&ec2.DescribeInstanceAttributeInput{ + Attribute: aws.String(ec2.InstanceAttributeNameUserData), + InstanceId: aws.String(d.Id()), + }) + if err != nil { + return err + } + if attr.UserData.Value != nil { + d.Set("user_data", userDataHashSum(*attr.UserData.Value)) + } + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ip_ranges.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ip_ranges.go new file mode 100644 index 000000000..32e9d8988 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ip_ranges.go @@ -0,0 +1,151 @@ +package aws + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform/helper/schema" +) + +type dataSourceAwsIPRangesResult struct { + CreateDate string + Prefixes []dataSourceAwsIPRangesPrefix + SyncToken string +} + +type dataSourceAwsIPRangesPrefix struct { + IpPrefix string `json:"ip_prefix"` + Region string + Service string +} + +func dataSourceAwsIPRanges() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsIPRangesRead, + + Schema: map[string]*schema.Schema{ + "cidr_blocks": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "create_date": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "regions": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + }, + "services": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "sync_token": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func dataSourceAwsIPRangesRead(d *schema.ResourceData, meta interface{}) error { + + conn := cleanhttp.DefaultClient() + + log.Printf("[DEBUG] Reading IP ranges") + + res, err := conn.Get("https://ip-ranges.amazonaws.com/ip-ranges.json") + + if err != nil { + return fmt.Errorf("Error listing IP ranges: %s", err) + } + + defer res.Body.Close() + + data, err := ioutil.ReadAll(res.Body) + + if err != nil { + return fmt.Errorf("Error reading response body: %s", err) + } + + result := new(dataSourceAwsIPRangesResult) + + if err := json.Unmarshal(data, result); err != nil { + return fmt.Errorf("Error parsing result: %s", err) + } + + if err := d.Set("create_date", result.CreateDate); err != nil { + return fmt.Errorf("Error setting create date: %s", err) + } + + syncToken, err := strconv.Atoi(result.SyncToken) + + if err != nil { + return fmt.Errorf("Error while converting sync token: %s", err) + } + + d.SetId(result.SyncToken) + + if err := d.Set("sync_token", syncToken); err != nil { + return fmt.Errorf("Error setting sync token: %s", err) + } + + get := func(key string) *schema.Set { + + set := d.Get(key).(*schema.Set) + + for _, e := range set.List() { + + s := e.(string) + + set.Remove(s) + set.Add(strings.ToLower(s)) + + } + + return set + + } + + var ( + regions = get("regions") + services = get("services") + noRegionFilter = regions.Len() == 0 + prefixes []string + ) + + for _, e := range result.Prefixes { + + var ( + matchRegion = noRegionFilter || regions.Contains(strings.ToLower(e.Region)) + matchService = services.Contains(strings.ToLower(e.Service)) + ) + + if matchRegion && matchService { + prefixes = append(prefixes, e.IpPrefix) + } + + } + + if len(prefixes) == 0 { + return fmt.Errorf(" No IP ranges result from filters") + } + + sort.Strings(prefixes) + + if err := d.Set("cidr_blocks", prefixes); err != nil { + return fmt.Errorf("Error setting ip ranges: %s", err) + } + + return nil + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kinesis_stream.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kinesis_stream.go new file mode 100644 index 000000000..ebc843d11 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kinesis_stream.go @@ -0,0 +1,95 @@ +package aws + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsKinesisStream() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsKinesisStreamRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "creation_timestamp": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + + "status": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "retention_period": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + + "open_shards": &schema.Schema{ + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "closed_shards": &schema.Schema{ + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "shard_level_metrics": &schema.Schema{ + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "tags": &schema.Schema{ + Type: schema.TypeMap, + Computed: true, + }, + }, + } +} + +func dataSourceAwsKinesisStreamRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kinesisconn + sn := d.Get("name").(string) + + state, err := readKinesisStreamState(conn, sn) + if err != nil { + return err + } + d.SetId(state.arn) + d.Set("arn", state.arn) + d.Set("name", sn) + d.Set("open_shards", state.openShards) + d.Set("closed_shards", state.closedShards) + d.Set("status", state.status) + d.Set("creation_timestamp", state.creationTimestamp) + d.Set("retention_period", state.retentionPeriod) + d.Set("shard_level_metrics", state.shardLevelMetrics) + + tags, err := conn.ListTagsForStream(&kinesis.ListTagsForStreamInput{ + StreamName: aws.String(sn), + }) + if err != nil { + return err + } + d.Set("tags", tagsToMapKinesis(tags.Tags)) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_alias.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_alias.go new file mode 100644 index 000000000..41c33b680 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_alias.go @@ -0,0 +1,62 @@ +package aws + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/service/kms" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsKmsAlias() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsKmsAliasRead, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAwsKmsName, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "target_key_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsKmsAliasRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kmsconn + params := &kms.ListAliasesInput{} + + target := d.Get("name") + var alias *kms.AliasListEntry + err := conn.ListAliasesPages(params, func(page *kms.ListAliasesOutput, lastPage bool) bool { + for _, entity := range page.Aliases { + if *entity.AliasName == target { + alias = entity + return false + } + } + + return true + }) + if err != nil { + return errwrap.Wrapf("Error fetch KMS alias list: {{err}}", err) + } + + if alias == nil { + return fmt.Errorf("No alias with name %q found in this region.", target) + } + + d.SetId(time.Now().UTC().String()) + d.Set("arn", alias.AliasArn) + d.Set("target_key_id", alias.TargetKeyId) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_ciphertext.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_ciphertext.go new file mode 100644 index 000000000..3f15965ca --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_ciphertext.go @@ -0,0 +1,66 @@ +package aws + +import ( + "encoding/base64" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/kms" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsKmsCiphetext() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsKmsCiphetextRead, + + Schema: map[string]*schema.Schema{ + "plaintext": { + Type: schema.TypeString, + Required: true, + }, + + "key_id": { + Type: schema.TypeString, + Required: true, + }, + + "context": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "ciphertext_blob": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsKmsCiphetextRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kmsconn + + d.SetId(time.Now().UTC().String()) + + req := &kms.EncryptInput{ + KeyId: aws.String(d.Get("key_id").(string)), + Plaintext: []byte(d.Get("plaintext").(string)), + } + + if ec := d.Get("context"); ec != nil { + req.EncryptionContext = stringMapToPointers(ec.(map[string]interface{})) + } + + log.Printf("[DEBUG] KMS encrypt for key: %s", d.Get("key_id").(string)) + + resp, err := conn.Encrypt(req) + if err != nil { + return err + } + + d.Set("ciphertext_blob", base64.StdEncoding.EncodeToString(resp.CiphertextBlob)) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_secret.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_secret.go new file mode 100644 index 000000000..92d5134fd --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_kms_secret.go @@ -0,0 +1,99 @@ +package aws + +import ( + "encoding/base64" + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/kms" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsKmsSecret() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsKmsSecretRead, + + Schema: map[string]*schema.Schema{ + "secret": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "payload": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "context": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "grant_tokens": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "__has_dynamic_attributes": { + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +// dataSourceAwsKmsSecretRead decrypts the specified secrets +func dataSourceAwsKmsSecretRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kmsconn + secrets := d.Get("secret").(*schema.Set) + + d.SetId(time.Now().UTC().String()) + + for _, v := range secrets.List() { + secret := v.(map[string]interface{}) + + // base64 decode the payload + payload, err := base64.StdEncoding.DecodeString(secret["payload"].(string)) + if err != nil { + return fmt.Errorf("Invalid base64 value for secret '%s': %v", secret["name"].(string), err) + } + + // build the kms decrypt params + params := &kms.DecryptInput{ + CiphertextBlob: []byte(payload), + } + if context, exists := secret["context"]; exists { + params.EncryptionContext = make(map[string]*string) + for k, v := range context.(map[string]interface{}) { + params.EncryptionContext[k] = aws.String(v.(string)) + } + } + if grant_tokens, exists := secret["grant_tokens"]; exists { + params.GrantTokens = make([]*string, 0) + for _, v := range grant_tokens.([]interface{}) { + params.GrantTokens = append(params.GrantTokens, aws.String(v.(string))) + } + } + + // decrypt + resp, err := conn.Decrypt(params) + if err != nil { + return fmt.Errorf("Failed to decrypt '%s': %s", secret["name"].(string), err) + } + + // Set the secret via the name + log.Printf("[DEBUG] aws_kms_secret - successfully decrypted secret: %s", secret["name"].(string)) + d.UnsafeSetFieldRaw(secret["name"].(string), string(resp.Plaintext)) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_partition.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_partition.go new file mode 100644 index 000000000..d52f7ee47 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_partition.go @@ -0,0 +1,33 @@ +package aws + +import ( + "log" + "time" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsPartition() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsPartitionRead, + + Schema: map[string]*schema.Schema{ + "partition": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsPartitionRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient) + + log.Printf("[DEBUG] Reading Partition.") + d.SetId(time.Now().UTC().String()) + + log.Printf("[DEBUG] Setting AWS Partition to %s.", client.partition) + d.Set("partition", meta.(*AWSClient).partition) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_prefix_list.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_prefix_list.go new file mode 100644 index 000000000..8bed85506 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_prefix_list.go @@ -0,0 +1,76 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsPrefixList() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsPrefixListRead, + + Schema: map[string]*schema.Schema{ + "prefix_list_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + // Computed values. + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "cidr_blocks": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceAwsPrefixListRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + req := &ec2.DescribePrefixListsInput{} + + if prefixListID := d.Get("prefix_list_id"); prefixListID != "" { + req.PrefixListIds = aws.StringSlice([]string{prefixListID.(string)}) + } + req.Filters = buildEC2AttributeFilterList( + map[string]string{ + "prefix-list-name": d.Get("name").(string), + }, + ) + + log.Printf("[DEBUG] DescribePrefixLists %s\n", req) + resp, err := conn.DescribePrefixLists(req) + if err != nil { + return err + } + if resp == nil || len(resp.PrefixLists) == 0 { + return fmt.Errorf("no matching prefix list found; the prefix list ID or name may be invalid or not exist in the current region") + } + + pl := resp.PrefixLists[0] + + d.SetId(*pl.PrefixListId) + d.Set("id", pl.PrefixListId) + d.Set("name", pl.PrefixListName) + + cidrs := make([]string, len(pl.Cidrs)) + for i, v := range pl.Cidrs { + cidrs[i] = *v + } + d.Set("cidr_blocks", cidrs) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_redshift_service_account.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_redshift_service_account.go new file mode 100644 index 000000000..faa210fff --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_redshift_service_account.go @@ -0,0 +1,50 @@ +package aws + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +// See http://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-enable-logging +var redshiftServiceAccountPerRegionMap = map[string]string{ + "us-east-1": "193672423079", + "us-east-2": "391106570357", + "us-west-1": "262260360010", + "us-west-2": "902366379725", + "ap-south-1": "865932855811", + "ap-northeast-2": "760740231472", + "ap-southeast-1": "361669875840", + "ap-southeast-2": "762762565011", + "ap-northeast-1": "404641285394", + "ca-central-1": "907379612154", + "eu-central-1": "053454850223", + "eu-west-1": "210876761215", +} + +func dataSourceAwsRedshiftServiceAccount() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsRedshiftServiceAccountRead, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func dataSourceAwsRedshiftServiceAccountRead(d *schema.ResourceData, meta interface{}) error { + region := meta.(*AWSClient).region + if v, ok := d.GetOk("region"); ok { + region = v.(string) + } + + if accid, ok := redshiftServiceAccountPerRegionMap[region]; ok { + d.SetId(accid) + return nil + } + + return fmt.Errorf("Unknown region (%q)", region) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_region.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_region.go new file mode 100644 index 000000000..ed75f7056 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_region.go @@ -0,0 +1,84 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsRegion() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsRegionRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "current": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "endpoint": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func dataSourceAwsRegionRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + currentRegion := meta.(*AWSClient).region + + req := &ec2.DescribeRegionsInput{} + + req.RegionNames = make([]*string, 0, 2) + if name := d.Get("name").(string); name != "" { + req.RegionNames = append(req.RegionNames, aws.String(name)) + } + + if d.Get("current").(bool) { + req.RegionNames = append(req.RegionNames, aws.String(currentRegion)) + } + + req.Filters = buildEC2AttributeFilterList( + map[string]string{ + "endpoint": d.Get("endpoint").(string), + }, + ) + if len(req.Filters) == 0 { + // Don't send an empty filters list; the EC2 API won't accept it. + req.Filters = nil + } + + log.Printf("[DEBUG] DescribeRegions %s\n", req) + resp, err := conn.DescribeRegions(req) + if err != nil { + return err + } + if resp == nil || len(resp.Regions) == 0 { + return fmt.Errorf("no matching regions found") + } + if len(resp.Regions) > 1 { + return fmt.Errorf("multiple regions matched; use additional constraints to reduce matches to a single region") + } + + region := resp.Regions[0] + + d.SetId(*region.RegionName) + d.Set("id", region.RegionName) + d.Set("name", region.RegionName) + d.Set("endpoint", region.Endpoint) + d.Set("current", *region.RegionName == currentRegion) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route53_zone.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route53_zone.go new file mode 100644 index 000000000..b3de4eed4 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route53_zone.go @@ -0,0 +1,176 @@ +package aws + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsRoute53Zone() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsRoute53ZoneRead, + + Schema: map[string]*schema.Schema{ + "zone_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "private_zone": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "comment": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "caller_reference": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "vpc_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "tags": tagsSchemaComputed(), + "resource_record_set_count": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + }, + } +} + +func dataSourceAwsRoute53ZoneRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).r53conn + name, nameExists := d.GetOk("name") + name = hostedZoneName(name.(string)) + id, idExists := d.GetOk("zone_id") + vpcId, vpcIdExists := d.GetOk("vpc_id") + tags := tagsFromMap(d.Get("tags").(map[string]interface{})) + if nameExists && idExists { + return fmt.Errorf("zone_id and name arguments can't be used together") + } else if !nameExists && !idExists { + return fmt.Errorf("Either name or zone_id must be set") + } + + var nextMarker *string + + var hostedZoneFound *route53.HostedZone + // We loop through all hostedzone + for allHostedZoneListed := false; !allHostedZoneListed; { + req := &route53.ListHostedZonesInput{} + if nextMarker != nil { + req.Marker = nextMarker + } + resp, err := conn.ListHostedZones(req) + + if err != nil { + return fmt.Errorf("Error finding Route 53 Hosted Zone: %v", err) + } + for _, hostedZone := range resp.HostedZones { + hostedZoneId := cleanZoneID(*hostedZone.Id) + if idExists && hostedZoneId == id.(string) { + hostedZoneFound = hostedZone + break + // we check if the name is the same as requested and if private zone field is the same as requested or if there is a vpc_id + } else if *hostedZone.Name == name && (*hostedZone.Config.PrivateZone == d.Get("private_zone").(bool) || (*hostedZone.Config.PrivateZone == true && vpcIdExists)) { + matchingVPC := false + if vpcIdExists { + reqHostedZone := &route53.GetHostedZoneInput{} + reqHostedZone.Id = aws.String(hostedZoneId) + + respHostedZone, errHostedZone := conn.GetHostedZone(reqHostedZone) + if errHostedZone != nil { + return fmt.Errorf("Error finding Route 53 Hosted Zone: %v", errHostedZone) + } + // we go through all VPCs + for _, vpc := range respHostedZone.VPCs { + if *vpc.VPCId == vpcId.(string) { + matchingVPC = true + break + } + } + } else { + matchingVPC = true + } + // we check if tags match + matchingTags := true + if len(tags) > 0 { + reqListTags := &route53.ListTagsForResourceInput{} + reqListTags.ResourceId = aws.String(hostedZoneId) + reqListTags.ResourceType = aws.String("hostedzone") + respListTags, errListTags := conn.ListTagsForResource(reqListTags) + + if errListTags != nil { + return fmt.Errorf("Error finding Route 53 Hosted Zone: %v", errListTags) + } + for _, tag := range tags { + found := false + for _, tagRequested := range respListTags.ResourceTagSet.Tags { + if *tag.Key == *tagRequested.Key && *tag.Value == *tagRequested.Value { + found = true + } + } + + if !found { + matchingTags = false + break + } + } + + } + + if matchingTags && matchingVPC { + if hostedZoneFound != nil { + return fmt.Errorf("multiple Route53Zone found please use vpc_id option to filter") + } else { + hostedZoneFound = hostedZone + } + } + } + + } + if *resp.IsTruncated { + + nextMarker = resp.NextMarker + } else { + allHostedZoneListed = true + } + } + if hostedZoneFound == nil { + return fmt.Errorf("no matching Route53Zone found") + } + + idHostedZone := cleanZoneID(*hostedZoneFound.Id) + d.SetId(idHostedZone) + d.Set("zone_id", idHostedZone) + d.Set("name", hostedZoneFound.Name) + d.Set("comment", hostedZoneFound.Config.Comment) + d.Set("private_zone", hostedZoneFound.Config.PrivateZone) + d.Set("caller_reference", hostedZoneFound.CallerReference) + d.Set("resource_record_set_count", hostedZoneFound.ResourceRecordSetCount) + return nil +} + +// used to manage trailing . +func hostedZoneName(name string) string { + if strings.HasSuffix(name, ".") { + return name + } else { + return name + "." + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route_table.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route_table.go new file mode 100644 index 000000000..c332bdd91 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_route_table.go @@ -0,0 +1,233 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsRouteTable() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsRouteTableRead, + + Schema: map[string]*schema.Schema{ + "subnet_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "route_table_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "vpc_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "filter": ec2CustomFiltersSchema(), + "tags": tagsSchemaComputed(), + "routes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr_block": { + Type: schema.TypeString, + Computed: true, + }, + + "ipv6_cidr_block": { + Type: schema.TypeString, + Computed: true, + }, + + "egress_only_gateway_id": { + Type: schema.TypeString, + Computed: true, + }, + + "gateway_id": { + Type: schema.TypeString, + Computed: true, + }, + + "instance_id": { + Type: schema.TypeString, + Computed: true, + }, + + "nat_gateway_id": { + Type: schema.TypeString, + Computed: true, + }, + + "vpc_peering_connection_id": { + Type: schema.TypeString, + Computed: true, + }, + + "network_interface_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "associations": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "route_table_association_id": { + Type: schema.TypeString, + Computed: true, + }, + + "route_table_id": { + Type: schema.TypeString, + Computed: true, + }, + + "subnet_id": { + Type: schema.TypeString, + Computed: true, + }, + + "main": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceAwsRouteTableRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + req := &ec2.DescribeRouteTablesInput{} + vpcId, vpcIdOk := d.GetOk("vpc_id") + subnetId, subnetIdOk := d.GetOk("subnet_id") + rtbId, rtbOk := d.GetOk("route_table_id") + tags, tagsOk := d.GetOk("tags") + filter, filterOk := d.GetOk("filter") + + if !vpcIdOk && !subnetIdOk && !tagsOk && !filterOk && !rtbOk { + return fmt.Errorf("One of route_table_id, vpc_id, subnet_id, filters, or tags must be assigned") + } + req.Filters = buildEC2AttributeFilterList( + map[string]string{ + "route-table-id": rtbId.(string), + "vpc-id": vpcId.(string), + "association.subnet-id": subnetId.(string), + }, + ) + req.Filters = append(req.Filters, buildEC2TagFilterList( + tagsFromMap(tags.(map[string]interface{})), + )...) + req.Filters = append(req.Filters, buildEC2CustomFilterList( + filter.(*schema.Set), + )...) + + log.Printf("[DEBUG] Describe Route Tables %v\n", req) + resp, err := conn.DescribeRouteTables(req) + if err != nil { + return err + } + if resp == nil || len(resp.RouteTables) == 0 { + return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") + } + if len(resp.RouteTables) > 1 { + return fmt.Errorf("Multiple Route Table matched; use additional constraints to reduce matches to a single Route Table") + } + + rt := resp.RouteTables[0] + + d.SetId(aws.StringValue(rt.RouteTableId)) + d.Set("route_table_id", rt.RouteTableId) + d.Set("vpc_id", rt.VpcId) + d.Set("tags", tagsToMap(rt.Tags)) + if err := d.Set("routes", dataSourceRoutesRead(rt.Routes)); err != nil { + return err + } + + if err := d.Set("associations", dataSourceAssociationsRead(rt.Associations)); err != nil { + return err + } + + return nil +} + +func dataSourceRoutesRead(ec2Routes []*ec2.Route) []map[string]interface{} { + routes := make([]map[string]interface{}, 0, len(ec2Routes)) + // Loop through the routes and add them to the set + for _, r := range ec2Routes { + if r.GatewayId != nil && *r.GatewayId == "local" { + continue + } + + if r.Origin != nil && *r.Origin == "EnableVgwRoutePropagation" { + continue + } + + if r.DestinationPrefixListId != nil { + // Skipping because VPC endpoint routes are handled separately + // See aws_vpc_endpoint + continue + } + + m := make(map[string]interface{}) + + if r.DestinationCidrBlock != nil { + m["cidr_block"] = *r.DestinationCidrBlock + } + if r.DestinationIpv6CidrBlock != nil { + m["ipv6_cidr_block"] = *r.DestinationIpv6CidrBlock + } + if r.EgressOnlyInternetGatewayId != nil { + m["egress_only_gateway_id"] = *r.EgressOnlyInternetGatewayId + } + if r.GatewayId != nil { + m["gateway_id"] = *r.GatewayId + } + if r.NatGatewayId != nil { + m["nat_gateway_id"] = *r.NatGatewayId + } + if r.InstanceId != nil { + m["instance_id"] = *r.InstanceId + } + if r.VpcPeeringConnectionId != nil { + m["vpc_peering_connection_id"] = *r.VpcPeeringConnectionId + } + if r.NetworkInterfaceId != nil { + m["network_interface_id"] = *r.NetworkInterfaceId + } + + routes = append(routes, m) + } + return routes +} + +func dataSourceAssociationsRead(ec2Assocations []*ec2.RouteTableAssociation) []map[string]interface{} { + associations := make([]map[string]interface{}, 0, len(ec2Assocations)) + // Loop through the routes and add them to the set + for _, a := range ec2Assocations { + + m := make(map[string]interface{}) + m["route_table_id"] = *a.RouteTableId + m["route_table_association_id"] = *a.RouteTableAssociationId + // GH[11134] + if a.SubnetId != nil { + m["subnet_id"] = *a.SubnetId + } + m["main"] = *a.Main + associations = append(associations, m) + } + return associations +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_s3_bucket_object.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_s3_bucket_object.go new file mode 100644 index 000000000..2eff5e6da --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_s3_bucket_object.go @@ -0,0 +1,239 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsS3BucketObject() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsS3BucketObjectRead, + + Schema: map[string]*schema.Schema{ + "body": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "cache_control": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "content_disposition": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "content_encoding": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "content_language": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "content_length": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + "content_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "etag": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "expiration": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "expires": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "key": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "last_modified": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Computed: true, + }, + "range": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "server_side_encryption": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "sse_kms_key_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "storage_class": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "version_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "website_redirect_location": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "tags": tagsSchemaComputed(), + }, + } +} + +func dataSourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).s3conn + + bucket := d.Get("bucket").(string) + key := d.Get("key").(string) + + input := s3.HeadObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + if v, ok := d.GetOk("range"); ok { + input.Range = aws.String(v.(string)) + } + if v, ok := d.GetOk("version_id"); ok { + input.VersionId = aws.String(v.(string)) + } + + versionText := "" + uniqueId := bucket + "/" + key + if v, ok := d.GetOk("version_id"); ok { + versionText = fmt.Sprintf(" of version %q", v.(string)) + uniqueId += "@" + v.(string) + } + + log.Printf("[DEBUG] Reading S3 object: %s", input) + out, err := conn.HeadObject(&input) + if err != nil { + return fmt.Errorf("Failed getting S3 object: %s Bucket: %q Object: %q", err, bucket, key) + } + if out.DeleteMarker != nil && *out.DeleteMarker == true { + return fmt.Errorf("Requested S3 object %q%s has been deleted", + bucket+key, versionText) + } + + log.Printf("[DEBUG] Received S3 object: %s", out) + + d.SetId(uniqueId) + + d.Set("cache_control", out.CacheControl) + d.Set("content_disposition", out.ContentDisposition) + d.Set("content_encoding", out.ContentEncoding) + d.Set("content_language", out.ContentLanguage) + d.Set("content_length", out.ContentLength) + d.Set("content_type", out.ContentType) + // See https://forums.aws.amazon.com/thread.jspa?threadID=44003 + d.Set("etag", strings.Trim(*out.ETag, `"`)) + d.Set("expiration", out.Expiration) + d.Set("expires", out.Expires) + d.Set("last_modified", out.LastModified.Format(time.RFC1123)) + d.Set("metadata", pointersMapToStringList(out.Metadata)) + d.Set("server_side_encryption", out.ServerSideEncryption) + d.Set("sse_kms_key_id", out.SSEKMSKeyId) + d.Set("version_id", out.VersionId) + d.Set("website_redirect_location", out.WebsiteRedirectLocation) + + // The "STANDARD" (which is also the default) storage + // class when set would not be included in the results. + d.Set("storage_class", s3.StorageClassStandard) + if out.StorageClass != nil { + d.Set("storage_class", out.StorageClass) + } + + if isContentTypeAllowed(out.ContentType) { + input := s3.GetObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + if v, ok := d.GetOk("range"); ok { + input.Range = aws.String(v.(string)) + } + if out.VersionId != nil { + input.VersionId = out.VersionId + } + out, err := conn.GetObject(&input) + if err != nil { + return fmt.Errorf("Failed getting S3 object: %s", err) + } + + buf := new(bytes.Buffer) + bytesRead, err := buf.ReadFrom(out.Body) + if err != nil { + return fmt.Errorf("Failed reading content of S3 object (%s): %s", + uniqueId, err) + } + log.Printf("[INFO] Saving %d bytes from S3 object %s", bytesRead, uniqueId) + d.Set("body", buf.String()) + } else { + contentType := "" + if out.ContentType == nil { + contentType = "" + } else { + contentType = *out.ContentType + } + + log.Printf("[INFO] Ignoring body of S3 object %s with Content-Type %q", + uniqueId, contentType) + } + + tagResp, err := conn.GetObjectTagging( + &s3.GetObjectTaggingInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + }) + if err != nil { + return err + } + d.Set("tags", tagsToMapS3(tagResp.TagSet)) + + return nil +} + +// This is to prevent potential issues w/ binary files +// and generally unprintable characters +// See https://github.com/hashicorp/terraform/pull/3858#issuecomment-156856738 +func isContentTypeAllowed(contentType *string) bool { + if contentType == nil { + return false + } + + allowedContentTypes := []*regexp.Regexp{ + regexp.MustCompile("^text/.+"), + regexp.MustCompile("^application/json$"), + } + + for _, r := range allowedContentTypes { + if r.MatchString(*contentType) { + return true + } + } + + return false +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_security_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_security_group.go new file mode 100644 index 000000000..c0757d9a8 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_security_group.go @@ -0,0 +1,94 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsSecurityGroup() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsSecurityGroupRead, + + Schema: map[string]*schema.Schema{ + "vpc_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "filter": ec2CustomFiltersSchema(), + + "id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": tagsSchemaComputed(), + }, + } +} + +func dataSourceAwsSecurityGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + req := &ec2.DescribeSecurityGroupsInput{} + + if id, idExists := d.GetOk("id"); idExists { + req.GroupIds = []*string{aws.String(id.(string))} + } + + req.Filters = buildEC2AttributeFilterList( + map[string]string{ + "group-name": d.Get("name").(string), + "vpc-id": d.Get("vpc_id").(string), + }, + ) + req.Filters = append(req.Filters, buildEC2TagFilterList( + tagsFromMap(d.Get("tags").(map[string]interface{})), + )...) + req.Filters = append(req.Filters, buildEC2CustomFilterList( + d.Get("filter").(*schema.Set), + )...) + if len(req.Filters) == 0 { + // Don't send an empty filters list; the EC2 API won't accept it. + req.Filters = nil + } + + log.Printf("[DEBUG] Describe Security Groups %v\n", req) + resp, err := conn.DescribeSecurityGroups(req) + if err != nil { + return err + } + if resp == nil || len(resp.SecurityGroups) == 0 { + return fmt.Errorf("no matching SecurityGroup found") + } + if len(resp.SecurityGroups) > 1 { + return fmt.Errorf("multiple Security Groups matched; use additional constraints to reduce matches to a single Security Group") + } + + sg := resp.SecurityGroups[0] + + d.SetId(*sg.GroupId) + d.Set("id", sg.VpcId) + d.Set("name", sg.GroupName) + d.Set("description", sg.Description) + d.Set("vpc_id", sg.VpcId) + d.Set("tags", tagsToMap(sg.Tags)) + d.Set("arn", fmt.Sprintf("arn:%s:ec2:%s:%s/security-group/%s", + meta.(*AWSClient).partition, meta.(*AWSClient).region, *sg.OwnerId, *sg.GroupId)) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_sns.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_sns.go new file mode 100644 index 000000000..c02ec328a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_sns.go @@ -0,0 +1,71 @@ +package aws + +import ( + "fmt" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/service/sns" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsSnsTopic() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsSnsTopicsRead, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + validNamePattern := "^[A-Za-z0-9_-]+$" + validName, nameMatchErr := regexp.MatchString(validNamePattern, value) + if !validName || nameMatchErr != nil { + errors = append(errors, fmt.Errorf( + "%q must match regex '%v'", k, validNamePattern)) + } + return + }, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsSnsTopicsRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).snsconn + params := &sns.ListTopicsInput{} + + target := d.Get("name") + var arns []string + err := conn.ListTopicsPages(params, func(page *sns.ListTopicsOutput, lastPage bool) bool { + for _, topic := range page.Topics { + topicPattern := fmt.Sprintf(".*:%v$", target) + matched, regexpErr := regexp.MatchString(topicPattern, *topic.TopicArn) + if matched && regexpErr == nil { + arns = append(arns, *topic.TopicArn) + } + } + + return true + }) + if err != nil { + return errwrap.Wrapf("Error describing topics: {{err}}", err) + } + + if len(arns) == 0 { + return fmt.Errorf("No topic with name %q found in this region.", target) + } + if len(arns) > 1 { + return fmt.Errorf("Multiple topics with name %q found in this region.", target) + } + + d.SetId(time.Now().UTC().String()) + d.Set("arn", arns[0]) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ssm_parameter.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ssm_parameter.go new file mode 100644 index 000000000..388366686 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_ssm_parameter.go @@ -0,0 +1,63 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsSsmParameter() *schema.Resource { + return &schema.Resource{ + Read: dataAwsSsmParameterRead, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + }, + } +} + +func dataAwsSsmParameterRead(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[DEBUG] Reading SSM Parameter: %s", d.Id()) + + paramInput := &ssm.GetParametersInput{ + Names: []*string{ + aws.String(d.Get("name").(string)), + }, + WithDecryption: aws.Bool(true), + } + + resp, err := ssmconn.GetParameters(paramInput) + + if err != nil { + return errwrap.Wrapf("[ERROR] Error describing SSM parameter: {{err}}", err) + } + + if len(resp.InvalidParameters) > 0 { + return fmt.Errorf("[ERROR] SSM Parameter %s is invalid", d.Get("name").(string)) + } + + param := resp.Parameters[0] + d.SetId(*param.Name) + d.Set("name", param.Name) + d.Set("type", param.Type) + d.Set("value", param.Value) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_subnet.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_subnet.go new file mode 100644 index 000000000..188a09dd2 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_subnet.go @@ -0,0 +1,160 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsSubnet() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsSubnetRead, + + Schema: map[string]*schema.Schema{ + "availability_zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "cidr_block": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "ipv6_cidr_block": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "default_for_az": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "filter": ec2CustomFiltersSchema(), + + "id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "state": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "tags": tagsSchemaComputed(), + + "vpc_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "assign_ipv6_address_on_creation": { + Type: schema.TypeBool, + Computed: true, + }, + + "map_public_ip_on_launch": { + Type: schema.TypeBool, + Computed: true, + }, + + "ipv6_cidr_block_association_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsSubnetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + req := &ec2.DescribeSubnetsInput{} + + if id := d.Get("id"); id != "" { + req.SubnetIds = []*string{aws.String(id.(string))} + } + + // We specify default_for_az as boolean, but EC2 filters want + // it to be serialized as a string. Note that setting it to + // "false" here does not actually filter by it *not* being + // the default, because Terraform can't distinguish between + // "false" and "not set". + defaultForAzStr := "" + if d.Get("default_for_az").(bool) { + defaultForAzStr = "true" + } + + filters := map[string]string{ + "availabilityZone": d.Get("availability_zone").(string), + "defaultForAz": defaultForAzStr, + "state": d.Get("state").(string), + "vpc-id": d.Get("vpc_id").(string), + } + + if v, ok := d.GetOk("cidr_block"); ok { + filters["cidrBlock"] = v.(string) + } + + if v, ok := d.GetOk("ipv6_cidr_block"); ok { + filters["ipv6-cidr-block-association.ipv6-cidr-block"] = v.(string) + } + + req.Filters = buildEC2AttributeFilterList(filters) + req.Filters = append(req.Filters, buildEC2TagFilterList( + tagsFromMap(d.Get("tags").(map[string]interface{})), + )...) + req.Filters = append(req.Filters, buildEC2CustomFilterList( + d.Get("filter").(*schema.Set), + )...) + if len(req.Filters) == 0 { + // Don't send an empty filters list; the EC2 API won't accept it. + req.Filters = nil + } + + log.Printf("[DEBUG] DescribeSubnets %s\n", req) + resp, err := conn.DescribeSubnets(req) + if err != nil { + return err + } + if resp == nil || len(resp.Subnets) == 0 { + return fmt.Errorf("no matching subnet found") + } + if len(resp.Subnets) > 1 { + return fmt.Errorf("multiple subnets matched; use additional constraints to reduce matches to a single subnet") + } + + subnet := resp.Subnets[0] + + d.SetId(*subnet.SubnetId) + d.Set("id", subnet.SubnetId) + d.Set("vpc_id", subnet.VpcId) + d.Set("availability_zone", subnet.AvailabilityZone) + d.Set("cidr_block", subnet.CidrBlock) + d.Set("default_for_az", subnet.DefaultForAz) + d.Set("state", subnet.State) + d.Set("tags", tagsToMap(subnet.Tags)) + d.Set("assign_ipv6_address_on_creation", subnet.AssignIpv6AddressOnCreation) + d.Set("map_public_ip_on_launch", subnet.MapPublicIpOnLaunch) + + for _, a := range subnet.Ipv6CidrBlockAssociationSet { + if *a.Ipv6CidrBlockState.State == "associated" { //we can only ever have 1 IPv6 block associated at once + d.Set("ipv6_cidr_block_association_id", a.AssociationId) + d.Set("ipv6_cidr_block", a.Ipv6CidrBlock) + } + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_subnet_ids.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_subnet_ids.go new file mode 100644 index 000000000..c1a495aa1 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_subnet_ids.go @@ -0,0 +1,68 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsSubnetIDs() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsSubnetIDsRead, + Schema: map[string]*schema.Schema{ + + "tags": tagsSchemaComputed(), + + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "ids": &schema.Schema{ + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func dataSourceAwsSubnetIDsRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + req := &ec2.DescribeSubnetsInput{} + + req.Filters = buildEC2AttributeFilterList( + map[string]string{ + "vpc-id": d.Get("vpc_id").(string), + }, + ) + + req.Filters = append(req.Filters, buildEC2TagFilterList( + tagsFromMap(d.Get("tags").(map[string]interface{})), + )...) + + log.Printf("[DEBUG] DescribeSubnets %s\n", req) + resp, err := conn.DescribeSubnets(req) + if err != nil { + return err + } + + if resp == nil || len(resp.Subnets) == 0 { + return fmt.Errorf("no matching subnet found for vpc with id %s", d.Get("vpc_id").(string)) + } + + subnets := make([]string, 0) + + for _, subnet := range resp.Subnets { + subnets = append(subnets, *subnet.SubnetId) + } + + d.SetId(d.Get("vpc_id").(string)) + d.Set("ids", subnets) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc.go new file mode 100644 index 000000000..6e09e971d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc.go @@ -0,0 +1,136 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsVpc() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsVpcRead, + + Schema: map[string]*schema.Schema{ + "cidr_block": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "dhcp_options_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "default": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "filter": ec2CustomFiltersSchema(), + + "id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "instance_tenancy": { + Type: schema.TypeString, + Computed: true, + }, + + "ipv6_cidr_block": { + Type: schema.TypeString, + Computed: true, + }, + + "ipv6_association_id": { + Type: schema.TypeString, + Computed: true, + }, + + "state": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "tags": tagsSchemaComputed(), + }, + } +} + +func dataSourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + req := &ec2.DescribeVpcsInput{} + + if id := d.Get("id"); id != "" { + req.VpcIds = []*string{aws.String(id.(string))} + } + + // We specify "default" as boolean, but EC2 filters want + // it to be serialized as a string. Note that setting it to + // "false" here does not actually filter by it *not* being + // the default, because Terraform can't distinguish between + // "false" and "not set". + isDefaultStr := "" + if d.Get("default").(bool) { + isDefaultStr = "true" + } + + req.Filters = buildEC2AttributeFilterList( + map[string]string{ + "cidr": d.Get("cidr_block").(string), + "dhcp-options-id": d.Get("dhcp_options_id").(string), + "isDefault": isDefaultStr, + "state": d.Get("state").(string), + }, + ) + req.Filters = append(req.Filters, buildEC2TagFilterList( + tagsFromMap(d.Get("tags").(map[string]interface{})), + )...) + req.Filters = append(req.Filters, buildEC2CustomFilterList( + d.Get("filter").(*schema.Set), + )...) + if len(req.Filters) == 0 { + // Don't send an empty filters list; the EC2 API won't accept it. + req.Filters = nil + } + + log.Printf("[DEBUG] DescribeVpcs %s\n", req) + resp, err := conn.DescribeVpcs(req) + if err != nil { + return err + } + if resp == nil || len(resp.Vpcs) == 0 { + return fmt.Errorf("no matching VPC found") + } + if len(resp.Vpcs) > 1 { + return fmt.Errorf("multiple VPCs matched; use additional constraints to reduce matches to a single VPC") + } + + vpc := resp.Vpcs[0] + + d.SetId(*vpc.VpcId) + d.Set("id", vpc.VpcId) + d.Set("cidr_block", vpc.CidrBlock) + d.Set("dhcp_options_id", vpc.DhcpOptionsId) + d.Set("instance_tenancy", vpc.InstanceTenancy) + d.Set("default", vpc.IsDefault) + d.Set("state", vpc.State) + d.Set("tags", tagsToMap(vpc.Tags)) + + if vpc.Ipv6CidrBlockAssociationSet != nil { + d.Set("ipv6_association_id", vpc.Ipv6CidrBlockAssociationSet[0].AssociationId) + d.Set("ipv6_cidr_block", vpc.Ipv6CidrBlockAssociationSet[0].Ipv6CidrBlock) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_endpoint.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_endpoint.go new file mode 100644 index 000000000..c15933129 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_endpoint.go @@ -0,0 +1,103 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsVpcEndpoint() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsVpcEndpointRead, + + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "vpc_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "service_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "policy": { + Type: schema.TypeString, + Computed: true, + }, + "route_table_ids": &schema.Schema{ + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func dataSourceAwsVpcEndpointRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + log.Printf("[DEBUG] Reading VPC Endpoints.") + + req := &ec2.DescribeVpcEndpointsInput{} + + if id, ok := d.GetOk("id"); ok { + req.VpcEndpointIds = aws.StringSlice([]string{id.(string)}) + } + + req.Filters = buildEC2AttributeFilterList( + map[string]string{ + "vpc-endpoint-state": d.Get("state").(string), + "vpc-id": d.Get("vpc_id").(string), + "service-name": d.Get("service_name").(string), + }, + ) + if len(req.Filters) == 0 { + // Don't send an empty filters list; the EC2 API won't accept it. + req.Filters = nil + } + + resp, err := conn.DescribeVpcEndpoints(req) + if err != nil { + return err + } + if resp == nil || len(resp.VpcEndpoints) == 0 { + return fmt.Errorf("no matching VPC endpoint found") + } + if len(resp.VpcEndpoints) > 1 { + return fmt.Errorf("multiple VPC endpoints matched; use additional constraints to reduce matches to a single VPC endpoint") + } + + vpce := resp.VpcEndpoints[0] + policy, err := normalizeJsonString(*vpce.PolicyDocument) + if err != nil { + return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err) + } + + d.SetId(aws.StringValue(vpce.VpcEndpointId)) + d.Set("id", vpce.VpcEndpointId) + d.Set("state", vpce.State) + d.Set("vpc_id", vpce.VpcId) + d.Set("service_name", vpce.ServiceName) + d.Set("policy", policy) + if err := d.Set("route_table_ids", aws.StringValueSlice(vpce.RouteTableIds)); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_endpoint_service.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_endpoint_service.go new file mode 100644 index 000000000..8860b39a7 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_endpoint_service.go @@ -0,0 +1,56 @@ +package aws + +import ( + "fmt" + "log" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsVpcEndpointService() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsVpcEndpointServiceRead, + + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Required: true, + }, + "service_name": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsVpcEndpointServiceRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + service := d.Get("service").(string) + + log.Printf("[DEBUG] Reading VPC Endpoint Services.") + + request := &ec2.DescribeVpcEndpointServicesInput{} + + resp, err := conn.DescribeVpcEndpointServices(request) + if err != nil { + return fmt.Errorf("Error fetching VPC Endpoint Services: %s", err) + } + + names := aws.StringValueSlice(resp.ServiceNames) + for _, name := range names { + if strings.HasSuffix(name, "."+service) { + d.SetId(strconv.Itoa(hashcode.String(name))) + d.Set("service_name", name) + return nil + } + } + + return fmt.Errorf("VPC Endpoint Service (%s) not found", service) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_peering_connection.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_peering_connection.go new file mode 100644 index 000000000..8d800751f --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpc_peering_connection.go @@ -0,0 +1,143 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsVpcPeeringConnection() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsVpcPeeringConnectionRead, + + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "vpc_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "cidr_block": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "peer_vpc_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "peer_owner_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "peer_cidr_block": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "accepter": { + Type: schema.TypeMap, + Computed: true, + Elem: schema.TypeBool, + }, + "requester": { + Type: schema.TypeMap, + Computed: true, + Elem: schema.TypeBool, + }, + "filter": ec2CustomFiltersSchema(), + "tags": tagsSchemaComputed(), + }, + } +} + +func dataSourceAwsVpcPeeringConnectionRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + log.Printf("[DEBUG] Reading VPC Peering Connections.") + + req := &ec2.DescribeVpcPeeringConnectionsInput{} + + if id, ok := d.GetOk("id"); ok { + req.VpcPeeringConnectionIds = aws.StringSlice([]string{id.(string)}) + } + + req.Filters = buildEC2AttributeFilterList( + map[string]string{ + "status-code": d.Get("status").(string), + "requester-vpc-info.vpc-id": d.Get("vpc_id").(string), + "requester-vpc-info.owner-id": d.Get("owner_id").(string), + "requester-vpc-info.cidr-block": d.Get("cidr_block").(string), + "accepter-vpc-info.vpc-id": d.Get("peer_vpc_id").(string), + "accepter-vpc-info.owner-id": d.Get("peer_owner_id").(string), + "accepter-vpc-info.cidr-block": d.Get("peer_cidr_block").(string), + }, + ) + req.Filters = append(req.Filters, buildEC2TagFilterList( + tagsFromMap(d.Get("tags").(map[string]interface{})), + )...) + req.Filters = append(req.Filters, buildEC2CustomFilterList( + d.Get("filter").(*schema.Set), + )...) + if len(req.Filters) == 0 { + // Don't send an empty filters list; the EC2 API won't accept it. + req.Filters = nil + } + + resp, err := conn.DescribeVpcPeeringConnections(req) + if err != nil { + return err + } + if resp == nil || len(resp.VpcPeeringConnections) == 0 { + return fmt.Errorf("no matching VPC peering connection found") + } + if len(resp.VpcPeeringConnections) > 1 { + return fmt.Errorf("multiple VPC peering connections matched; use additional constraints to reduce matches to a single VPC peering connection") + } + + pcx := resp.VpcPeeringConnections[0] + + d.SetId(aws.StringValue(pcx.VpcPeeringConnectionId)) + d.Set("id", pcx.VpcPeeringConnectionId) + d.Set("status", pcx.Status.Code) + d.Set("vpc_id", pcx.RequesterVpcInfo.VpcId) + d.Set("owner_id", pcx.RequesterVpcInfo.OwnerId) + d.Set("cidr_block", pcx.RequesterVpcInfo.CidrBlock) + d.Set("peer_vpc_id", pcx.AccepterVpcInfo.VpcId) + d.Set("peer_owner_id", pcx.AccepterVpcInfo.OwnerId) + d.Set("peer_cidr_block", pcx.AccepterVpcInfo.CidrBlock) + d.Set("tags", tagsToMap(pcx.Tags)) + + if pcx.AccepterVpcInfo.PeeringOptions != nil { + if err := d.Set("accepter", flattenPeeringOptions(pcx.AccepterVpcInfo.PeeringOptions)[0]); err != nil { + return err + } + } + + if pcx.RequesterVpcInfo.PeeringOptions != nil { + if err := d.Set("requester", flattenPeeringOptions(pcx.RequesterVpcInfo.PeeringOptions)[0]); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpn_gateway.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpn_gateway.go new file mode 100644 index 000000000..5d088e548 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_vpn_gateway.go @@ -0,0 +1,105 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsVpnGateway() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsVpnGatewayRead, + + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "attached_vpc_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "availability_zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "filter": ec2CustomFiltersSchema(), + "tags": tagsSchemaComputed(), + }, + } +} + +func dataSourceAwsVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + log.Printf("[DEBUG] Reading VPN Gateways.") + + req := &ec2.DescribeVpnGatewaysInput{} + + if id, ok := d.GetOk("id"); ok { + req.VpnGatewayIds = aws.StringSlice([]string{id.(string)}) + } + + req.Filters = buildEC2AttributeFilterList( + map[string]string{ + "state": d.Get("state").(string), + "availability-zone": d.Get("availability_zone").(string), + }, + ) + if id, ok := d.GetOk("attached_vpc_id"); ok { + req.Filters = append(req.Filters, buildEC2AttributeFilterList( + map[string]string{ + "attachment.state": "attached", + "attachment.vpc-id": id.(string), + }, + )...) + } + req.Filters = append(req.Filters, buildEC2TagFilterList( + tagsFromMap(d.Get("tags").(map[string]interface{})), + )...) + req.Filters = append(req.Filters, buildEC2CustomFilterList( + d.Get("filter").(*schema.Set), + )...) + if len(req.Filters) == 0 { + // Don't send an empty filters list; the EC2 API won't accept it. + req.Filters = nil + } + + resp, err := conn.DescribeVpnGateways(req) + if err != nil { + return err + } + if resp == nil || len(resp.VpnGateways) == 0 { + return fmt.Errorf("no matching VPN gateway found: %#v", req) + } + if len(resp.VpnGateways) > 1 { + return fmt.Errorf("multiple VPN gateways matched; use additional constraints to reduce matches to a single VPN gateway") + } + + vgw := resp.VpnGateways[0] + + d.SetId(aws.StringValue(vgw.VpnGatewayId)) + d.Set("state", vgw.State) + d.Set("availability_zone", vgw.AvailabilityZone) + d.Set("tags", tagsToMap(vgw.Tags)) + + for _, attachment := range vgw.VpcAttachments { + if *attachment.State == "attached" { + d.Set("attached_vpc_id", attachment.VpcId) + break + } + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/diff_suppress_funcs.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/diff_suppress_funcs.go new file mode 100644 index 000000000..e8c58b813 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/diff_suppress_funcs.go @@ -0,0 +1,77 @@ +package aws + +import ( + "bytes" + "encoding/json" + "log" + "net/url" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/jen20/awspolicyequivalence" +) + +func suppressEquivalentAwsPolicyDiffs(k, old, new string, d *schema.ResourceData) bool { + equivalent, err := awspolicy.PoliciesAreEquivalent(old, new) + if err != nil { + return false + } + + return equivalent +} + +// Suppresses minor version changes to the db_instance engine_version attribute +func suppressAwsDbEngineVersionDiffs(k, old, new string, d *schema.ResourceData) bool { + // First check if the old/new values are nil. + // If both are nil, we have no state to compare the values with, so register a diff. + // This populates the attribute field during a plan/apply with fresh state, allowing + // the attribute to still be used in future resources. + // See https://github.com/hashicorp/terraform/issues/11881 + if old == "" && new == "" { + return false + } + + if v, ok := d.GetOk("auto_minor_version_upgrade"); ok { + if v.(bool) { + // If we're set to auto upgrade minor versions + // ignore a minor version diff between versions + if strings.HasPrefix(old, new) { + log.Printf("[DEBUG] Ignoring minor version diff") + return true + } + } + } + + // Throw a diff by default + return false +} + +func suppressEquivalentJsonDiffs(k, old, new string, d *schema.ResourceData) bool { + ob := bytes.NewBufferString("") + if err := json.Compact(ob, []byte(old)); err != nil { + return false + } + + nb := bytes.NewBufferString("") + if err := json.Compact(nb, []byte(new)); err != nil { + return false + } + + return jsonBytesEqual(ob.Bytes(), nb.Bytes()) +} + +func suppressOpenIdURL(k, old, new string, d *schema.ResourceData) bool { + oldUrl, err := url.Parse(old) + if err != nil { + return false + } + + newUrl, err := url.Parse(new) + if err != nil { + return false + } + + oldUrl.Scheme = "https" + + return oldUrl.String() == newUrl.String() +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/ec2_filters.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/ec2_filters.go new file mode 100644 index 000000000..743d28224 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/ec2_filters.go @@ -0,0 +1,163 @@ +package aws + +import ( + "fmt" + "sort" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + + "github.com/hashicorp/terraform/helper/schema" +) + +// buildEC2AttributeFilterList takes a flat map of scalar attributes (most +// likely values extracted from a *schema.ResourceData on an EC2-querying +// data source) and produces a []*ec2.Filter representing an exact match +// for each of the given non-empty attributes. +// +// The keys of the given attributes map are the attribute names expected +// by the EC2 API, which are usually either in camelcase or with dash-separated +// words. We conventionally map these to underscore-separated identifiers +// with the same words when presenting these as data source query attributes +// in Terraform. +// +// It's the callers responsibility to transform any non-string values into +// the appropriate string serialization required by the AWS API when +// encoding the given filter. Any attributes given with empty string values +// are ignored, assuming that the user wishes to leave that attribute +// unconstrained while filtering. +// +// The purpose of this function is to create values to pass in +// for the "Filters" attribute on most of the "Describe..." API functions in +// the EC2 API, to aid in the implementation of Terraform data sources that +// retrieve data about EC2 objects. +func buildEC2AttributeFilterList(attrs map[string]string) []*ec2.Filter { + var filters []*ec2.Filter + + // sort the filters by name to make the output deterministic + var names []string + for filterName := range attrs { + names = append(names, filterName) + } + + sort.Strings(names) + + for _, filterName := range names { + value := attrs[filterName] + if value == "" { + continue + } + + filters = append(filters, &ec2.Filter{ + Name: aws.String(filterName), + Values: []*string{aws.String(value)}, + }) + } + + return filters +} + +// buildEC2TagFilterList takes a []*ec2.Tag and produces a []*ec2.Filter that +// represents exact matches for all of the tag key/value pairs given in +// the tag set. +// +// The purpose of this function is to create values to pass in for +// the "Filters" attribute on most of the "Describe..." API functions +// in the EC2 API, to implement filtering by tag values e.g. in Terraform +// data sources that retrieve data about EC2 objects. +// +// It is conventional for an EC2 data source to include an attribute called +// "tags" which conforms to the schema returned by the tagsSchema() function. +// The value of this can then be converted to a tags slice using tagsFromMap, +// and the result finally passed in to this function. +// +// In Terraform configuration this would then look like this, to constrain +// results by name: +// +// tags { +// Name = "my-awesome-subnet" +// } +func buildEC2TagFilterList(tags []*ec2.Tag) []*ec2.Filter { + filters := make([]*ec2.Filter, len(tags)) + + for i, tag := range tags { + filters[i] = &ec2.Filter{ + Name: aws.String(fmt.Sprintf("tag:%s", *tag.Key)), + Values: []*string{tag.Value}, + } + } + + return filters +} + +// ec2CustomFiltersSchema returns a *schema.Schema that represents +// a set of custom filtering criteria that a user can specify as input +// to a data source that wraps one of the many "Describe..." API calls +// in the EC2 API. +// +// It is conventional for an attribute of this type to be included +// as a top-level attribute called "filter". This is the "catch all" for +// filter combinations that are not possible to express using scalar +// attributes or tags. In Terraform configuration, the custom filter blocks +// then look like this: +// +// filter { +// name = "availabilityZone" +// values = ["us-west-2a", "us-west-2b"] +// } +func ec2CustomFiltersSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + } +} + +// buildEC2CustomFilterList takes the set value extracted from a schema +// attribute conforming to the schema returned by ec2CustomFiltersSchema, +// and transforms it into a []*ec2.Filter representing the same filter +// expressions which is ready to pass into the "Filters" attribute on most +// of the "Describe..." functions in the EC2 API. +// +// This function is intended only to be used in conjunction with +// ec2CustomFitlersSchema. See the docs on that function for more details +// on the configuration pattern this is intended to support. +func buildEC2CustomFilterList(filterSet *schema.Set) []*ec2.Filter { + if filterSet == nil { + return []*ec2.Filter{} + } + + customFilters := filterSet.List() + filters := make([]*ec2.Filter, len(customFilters)) + + for filterIdx, customFilterI := range customFilters { + customFilterMapI := customFilterI.(map[string]interface{}) + name := customFilterMapI["name"].(string) + valuesI := customFilterMapI["values"].(*schema.Set).List() + values := make([]*string, len(valuesI)) + for valueIdx, valueI := range valuesI { + values[valueIdx] = aws.String(valueI.(string)) + } + + filters[filterIdx] = &ec2.Filter{ + Name: &name, + Values: values, + } + } + + return filters +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/hosted_zones.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/hosted_zones.go new file mode 100644 index 000000000..131f03ebd --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/hosted_zones.go @@ -0,0 +1,28 @@ +package aws + +// This list is copied from +// http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints +// It currently cannot be generated from the API json. +var hostedZoneIDsMap = map[string]string{ + "us-east-1": "Z3AQBSTGFYJSTF", + "us-east-2": "Z2O1EMRO9K5GLX", + "us-west-2": "Z3BJ6K6RIION7M", + "us-west-1": "Z2F56UZL2M1ACD", + "eu-west-1": "Z1BKCTXD74EZPE", + "eu-west-2": "Z3GKZC51ZF0DB4", + "eu-central-1": "Z21DNDUVLTQW6Q", + "ap-south-1": "Z11RGJOFQNVJUP", + "ap-southeast-1": "Z3O0J2DXBE1FTB", + "ap-southeast-2": "Z1WCIGYICN2BYD", + "ap-northeast-1": "Z2M4EHUR26P7ZW", + "ap-northeast-2": "Z3W03O7B5YMIYP", + "ca-central-1": "Z1QDHH18159H29", + "sa-east-1": "Z7KQH4QJS55SO", + "us-gov-west-1": "Z31GFT0UA1I2HV", +} + +// Returns the hosted zone ID for an S3 website endpoint region. This can be +// used as input to the aws_route53_record resource's zone_id argument. +func HostedZoneIDForRegion(region string) string { + return hostedZoneIDsMap[region] +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/iam_policy_model.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/iam_policy_model.go new file mode 100644 index 000000000..81306971d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/iam_policy_model.go @@ -0,0 +1,112 @@ +package aws + +import ( + "encoding/json" + "sort" +) + +type IAMPolicyDoc struct { + Version string `json:",omitempty"` + Id string `json:",omitempty"` + Statements []*IAMPolicyStatement `json:"Statement"` +} + +type IAMPolicyStatement struct { + Sid string + Effect string `json:",omitempty"` + Actions interface{} `json:"Action,omitempty"` + NotActions interface{} `json:"NotAction,omitempty"` + Resources interface{} `json:"Resource,omitempty"` + NotResources interface{} `json:"NotResource,omitempty"` + Principals IAMPolicyStatementPrincipalSet `json:"Principal,omitempty"` + NotPrincipals IAMPolicyStatementPrincipalSet `json:"NotPrincipal,omitempty"` + Conditions IAMPolicyStatementConditionSet `json:"Condition,omitempty"` +} + +type IAMPolicyStatementPrincipal struct { + Type string + Identifiers interface{} +} + +type IAMPolicyStatementCondition struct { + Test string + Variable string + Values interface{} +} + +type IAMPolicyStatementPrincipalSet []IAMPolicyStatementPrincipal +type IAMPolicyStatementConditionSet []IAMPolicyStatementCondition + +func (ps IAMPolicyStatementPrincipalSet) MarshalJSON() ([]byte, error) { + raw := map[string]interface{}{} + + // As a special case, IAM considers the string value "*" to be + // equivalent to "AWS": "*", and normalizes policies as such. + // We'll follow their lead and do the same normalization here. + // IAM also considers {"*": "*"} to be equivalent to this. + if len(ps) == 1 { + p := ps[0] + if p.Type == "AWS" || p.Type == "*" { + if sv, ok := p.Identifiers.(string); ok && sv == "*" { + return []byte(`"*"`), nil + } + + if av, ok := p.Identifiers.([]string); ok && len(av) == 1 && av[0] == "*" { + return []byte(`"*"`), nil + } + } + } + + for _, p := range ps { + switch i := p.Identifiers.(type) { + case []string: + if _, ok := raw[p.Type]; !ok { + raw[p.Type] = make([]string, 0, len(i)) + } + sort.Sort(sort.Reverse(sort.StringSlice(i))) + raw[p.Type] = append(raw[p.Type].([]string), i...) + case string: + raw[p.Type] = i + default: + panic("Unsupported data type for IAMPolicyStatementPrincipalSet") + } + } + + return json.Marshal(&raw) +} + +func (cs IAMPolicyStatementConditionSet) MarshalJSON() ([]byte, error) { + raw := map[string]map[string]interface{}{} + + for _, c := range cs { + if _, ok := raw[c.Test]; !ok { + raw[c.Test] = map[string]interface{}{} + } + switch i := c.Values.(type) { + case []string: + if _, ok := raw[c.Test][c.Variable]; !ok { + raw[c.Test][c.Variable] = make([]string, 0, len(i)) + } + sort.Sort(sort.Reverse(sort.StringSlice(i))) + raw[c.Test][c.Variable] = append(raw[c.Test][c.Variable].([]string), i...) + case string: + raw[c.Test][c.Variable] = i + default: + panic("Unsupported data type for IAMPolicyStatementConditionSet") + } + } + + return json.Marshal(&raw) +} + +func iamPolicyDecodeConfigStringList(lI []interface{}) interface{} { + if len(lI) == 1 { + return lI[0].(string) + } + ret := make([]string, len(lI)) + for i, vI := range lI { + ret[i] = vI.(string) + } + sort.Sort(sort.Reverse(sort.StringSlice(ret))) + return ret +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_cloudfront_distribution.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_cloudfront_distribution.go new file mode 100644 index 000000000..acfc836dc --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_cloudfront_distribution.go @@ -0,0 +1,32 @@ +package aws + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCloudFrontDistributionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + // This is a non API attribute + // We are merely setting this to the same value as the Default setting in the schema + d.Set("retain_on_delete", false) + + conn := meta.(*AWSClient).cloudfrontconn + id := d.Id() + resp, err := conn.GetDistributionConfig(&cloudfront.GetDistributionConfigInput{ + Id: aws.String(id), + }) + + if err != nil { + return nil, err + } + + distConfig := resp.DistributionConfig + results := make([]*schema.ResourceData, 1) + err = flattenDistributionConfig(d, distConfig) + if err != nil { + return nil, err + } + results[0] = d + return results, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_db_event_subscription.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_db_event_subscription.go new file mode 100644 index 000000000..82e5317ea --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_db_event_subscription.go @@ -0,0 +1,17 @@ +package aws + +import "github.com/hashicorp/terraform/helper/schema" + +func resourceAwsDbEventSubscriptionImport( + d *schema.ResourceData, + meta interface{}) ([]*schema.ResourceData, error) { + + // The db event subscription Read function only needs the "name" of the event subscription + // in order to populate the necessary values. This takes the "id" from the supplied StateFunc + // and sets it as the "name" attribute, as described in the import documentation. This allows + // the Read function to actually succeed and set the ID of the resource + results := make([]*schema.ResourceData, 1, 1) + d.Set("name", d.Id()) + results[0] = d + return results, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_network_acl.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_network_acl.go new file mode 100644 index 000000000..bcc221d0e --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_network_acl.go @@ -0,0 +1,95 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +// Network ACLs import their rules and associations +func resourceAwsNetworkAclImportState( + d *schema.ResourceData, + meta interface{}) ([]*schema.ResourceData, error) { + conn := meta.(*AWSClient).ec2conn + + // First query the resource itself + resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{ + NetworkAclIds: []*string{aws.String(d.Id())}, + }) + if err != nil { + return nil, err + } + if resp == nil || len(resp.NetworkAcls) < 1 || resp.NetworkAcls[0] == nil { + return nil, fmt.Errorf("network ACL %s is not found", d.Id()) + } + acl := resp.NetworkAcls[0] + + // Start building our results + results := make([]*schema.ResourceData, 1, + 2+len(acl.Associations)+len(acl.Entries)) + results[0] = d + + /* + { + // Construct the entries + subResource := resourceAwsNetworkAclRule() + for _, entry := range acl.Entries { + // Minimal data for route + d := subResource.Data(nil) + d.SetType("aws_network_acl_rule") + d.Set("network_acl_id", acl.NetworkAclId) + d.Set("rule_number", entry.RuleNumber) + d.Set("egress", entry.Egress) + d.Set("protocol", entry.Protocol) + d.SetId(networkAclIdRuleNumberEgressHash( + d.Get("network_acl_id").(string), + d.Get("rule_number").(int), + d.Get("egress").(bool), + d.Get("protocol").(string))) + results = append(results, d) + } + } + + { + // Construct the associations + subResource := resourceAwsRouteTableAssociation() + for _, assoc := range table.Associations { + if *assoc.Main { + // Ignore + continue + } + + // Minimal data for route + d := subResource.Data(nil) + d.SetType("aws_route_table_association") + d.Set("route_table_id", assoc.RouteTableId) + d.SetId(*assoc.RouteTableAssociationId) + results = append(results, d) + } + } + + { + // Construct the main associations. We could do this above but + // I keep this as a separate section since it is a separate resource. + subResource := resourceAwsMainRouteTableAssociation() + for _, assoc := range table.Associations { + if !*assoc.Main { + // Ignore + continue + } + + // Minimal data for route + d := subResource.Data(nil) + d.SetType("aws_main_route_table_association") + d.Set("route_table_id", id) + d.Set("vpc_id", table.VpcId) + d.SetId(*assoc.RouteTableAssociationId) + results = append(results, d) + } + } + */ + + return results, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_route_table.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_route_table.go new file mode 100644 index 000000000..185d99411 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_route_table.go @@ -0,0 +1,99 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +// Route table import also imports all the rules +func resourceAwsRouteTableImportState( + d *schema.ResourceData, + meta interface{}) ([]*schema.ResourceData, error) { + conn := meta.(*AWSClient).ec2conn + + // First query the resource itself + id := d.Id() + resp, err := conn.DescribeRouteTables(&ec2.DescribeRouteTablesInput{ + RouteTableIds: []*string{&id}, + }) + if err != nil { + return nil, err + } + if len(resp.RouteTables) < 1 || resp.RouteTables[0] == nil { + return nil, fmt.Errorf("route table %s is not found", id) + } + table := resp.RouteTables[0] + + // Start building our results + results := make([]*schema.ResourceData, 1, + 2+len(table.Associations)+len(table.Routes)) + results[0] = d + + { + // Construct the routes + subResource := resourceAwsRoute() + for _, route := range table.Routes { + // Ignore the local/default route + if route.GatewayId != nil && *route.GatewayId == "local" { + continue + } + + if route.DestinationPrefixListId != nil { + // Skipping because VPC endpoint routes are handled separately + // See aws_vpc_endpoint + continue + } + + // Minimal data for route + d := subResource.Data(nil) + d.SetType("aws_route") + d.Set("route_table_id", id) + d.Set("destination_cidr_block", route.DestinationCidrBlock) + d.Set("destination_ipv6_cidr_block", route.DestinationIpv6CidrBlock) + d.SetId(routeIDHash(d, route)) + results = append(results, d) + } + } + + { + // Construct the associations + subResource := resourceAwsRouteTableAssociation() + for _, assoc := range table.Associations { + if *assoc.Main { + // Ignore + continue + } + + // Minimal data for route + d := subResource.Data(nil) + d.SetType("aws_route_table_association") + d.Set("route_table_id", assoc.RouteTableId) + d.SetId(*assoc.RouteTableAssociationId) + results = append(results, d) + } + } + + { + // Construct the main associations. We could do this above but + // I keep this as a separate section since it is a separate resource. + subResource := resourceAwsMainRouteTableAssociation() + for _, assoc := range table.Associations { + if !*assoc.Main { + // Ignore + continue + } + + // Minimal data for route + d := subResource.Data(nil) + d.SetType("aws_main_route_table_association") + d.Set("route_table_id", id) + d.Set("vpc_id", table.VpcId) + d.SetId(*assoc.RouteTableAssociationId) + results = append(results, d) + } + } + + return results, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_s3_bucket.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_s3_bucket.go new file mode 100644 index 000000000..ba2129e1f --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_s3_bucket.go @@ -0,0 +1,39 @@ +package aws + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsS3BucketImportState( + d *schema.ResourceData, + meta interface{}) ([]*schema.ResourceData, error) { + + results := make([]*schema.ResourceData, 1, 1) + results[0] = d + + conn := meta.(*AWSClient).s3conn + pol, err := conn.GetBucketPolicy(&s3.GetBucketPolicyInput{ + Bucket: aws.String(d.Id()), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoSuchBucketPolicy" { + // Bucket without policy + return results, nil + } + return nil, errwrap.Wrapf("Error importing AWS S3 bucket policy: {{err}}", err) + } + + policy := resourceAwsS3BucketPolicy() + pData := policy.Data(nil) + pData.SetId(d.Id()) + pData.SetType("aws_s3_bucket_policy") + pData.Set("bucket", d.Id()) + pData.Set("policy", pol) + results = append(results, pData) + + return results, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_security_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_security_group.go new file mode 100644 index 000000000..d1a6341f0 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/import_aws_security_group.go @@ -0,0 +1,186 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +// Security group import fans out to multiple resources due to the +// security group rules. Instead of creating one resource with nested +// rules, we use the best practices approach of one resource per rule. +func resourceAwsSecurityGroupImportState( + d *schema.ResourceData, + meta interface{}) ([]*schema.ResourceData, error) { + conn := meta.(*AWSClient).ec2conn + + // First query the security group + sgRaw, _, err := SGStateRefreshFunc(conn, d.Id())() + if err != nil { + return nil, err + } + if sgRaw == nil { + return nil, fmt.Errorf("security group not found") + } + sg := sgRaw.(*ec2.SecurityGroup) + + // Start building our results + results := make([]*schema.ResourceData, 1, + 1+len(sg.IpPermissions)+len(sg.IpPermissionsEgress)) + results[0] = d + + // Construct the rules + permMap := map[string][]*ec2.IpPermission{ + "ingress": sg.IpPermissions, + "egress": sg.IpPermissionsEgress, + } + for ruleType, perms := range permMap { + for _, perm := range perms { + ds, err := resourceAwsSecurityGroupImportStatePerm(sg, ruleType, perm) + if err != nil { + return nil, err + } + results = append(results, ds...) + } + } + + return results, nil +} + +func resourceAwsSecurityGroupImportStatePerm(sg *ec2.SecurityGroup, ruleType string, perm *ec2.IpPermission) ([]*schema.ResourceData, error) { + /* + Create a seperate Security Group Rule for: + * The collection of IpRanges (cidr_blocks) + * The collection of Ipv6Ranges (ipv6_cidr_blocks) + * Each individual UserIdGroupPair (source_security_group_id) + + If, for example, a security group has rules for: + * 2 IpRanges + * 2 Ipv6Ranges + * 2 UserIdGroupPairs + + This would generate 4 security group rules: + * 1 for the collection of IpRanges + * 1 for the collection of Ipv6Ranges + * 1 for the first UserIdGroupPair + * 1 for the second UserIdGroupPair + */ + var result []*schema.ResourceData + + if perm.IpRanges != nil { + p := &ec2.IpPermission{ + FromPort: perm.FromPort, + IpProtocol: perm.IpProtocol, + PrefixListIds: perm.PrefixListIds, + ToPort: perm.ToPort, + IpRanges: perm.IpRanges, + } + + r, err := resourceAwsSecurityGroupImportStatePermPair(sg, ruleType, p) + if err != nil { + return nil, err + } + result = append(result, r) + } + + if perm.Ipv6Ranges != nil { + p := &ec2.IpPermission{ + FromPort: perm.FromPort, + IpProtocol: perm.IpProtocol, + PrefixListIds: perm.PrefixListIds, + ToPort: perm.ToPort, + Ipv6Ranges: perm.Ipv6Ranges, + } + + r, err := resourceAwsSecurityGroupImportStatePermPair(sg, ruleType, p) + if err != nil { + return nil, err + } + result = append(result, r) + } + + if len(perm.UserIdGroupPairs) > 0 { + for _, pair := range perm.UserIdGroupPairs { + p := &ec2.IpPermission{ + FromPort: perm.FromPort, + IpProtocol: perm.IpProtocol, + PrefixListIds: perm.PrefixListIds, + ToPort: perm.ToPort, + UserIdGroupPairs: []*ec2.UserIdGroupPair{pair}, + } + + r, err := resourceAwsSecurityGroupImportStatePermPair(sg, ruleType, p) + if err != nil { + return nil, err + } + result = append(result, r) + } + } + + if len(result) == 0 && len(perm.PrefixListIds) > 0 { + p := &ec2.IpPermission{ + FromPort: perm.FromPort, + IpProtocol: perm.IpProtocol, + PrefixListIds: perm.PrefixListIds, + ToPort: perm.ToPort, + } + + r, err := resourceAwsSecurityGroupImportStatePermPair(sg, ruleType, p) + if err != nil { + return nil, err + } + result = append(result, r) + } + + return result, nil +} + +func resourceAwsSecurityGroupImportStatePermPair(sg *ec2.SecurityGroup, ruleType string, perm *ec2.IpPermission) (*schema.ResourceData, error) { + // Construct the rule. We do this by populating the absolute + // minimum necessary for Refresh on the rule to work. This + // happens to be a lot of fields since they're almost all needed + // for de-dupping. + sgId := sg.GroupId + id := ipPermissionIDHash(*sgId, ruleType, perm) + ruleResource := resourceAwsSecurityGroupRule() + d := ruleResource.Data(nil) + d.SetId(id) + d.SetType("aws_security_group_rule") + d.Set("security_group_id", sgId) + d.Set("type", ruleType) + + // 'self' is false by default. Below, we range over the group ids and set true + // if the parent sg id is found + d.Set("self", false) + + if len(perm.UserIdGroupPairs) > 0 { + s := perm.UserIdGroupPairs[0] + + // Check for Pair that is the same as the Security Group, to denote self. + // Otherwise, mark the group id in source_security_group_id + isVPC := sg.VpcId != nil && *sg.VpcId != "" + if isVPC { + if *s.GroupId == *sg.GroupId { + d.Set("self", true) + // prune the self reference from the UserIdGroupPairs, so we don't + // have duplicate sg ids (both self and in source_security_group_id) + perm.UserIdGroupPairs = append(perm.UserIdGroupPairs[:0], perm.UserIdGroupPairs[0+1:]...) + } + } else { + if *s.GroupName == *sg.GroupName { + d.Set("self", true) + // prune the self reference from the UserIdGroupPairs, so we don't + // have duplicate sg ids (both self and in source_security_group_id) + perm.UserIdGroupPairs = append(perm.UserIdGroupPairs[:0], perm.UserIdGroupPairs[0+1:]...) + } + } + } + + if err := setFromIPPerm(d, sg, perm); err != nil { + return nil, errwrap.Wrapf("Error importing AWS Security Group: {{err}}", err) + } + + return d, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/network_acl_entry.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/network_acl_entry.go new file mode 100644 index 000000000..c57f82222 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/network_acl_entry.go @@ -0,0 +1,141 @@ +package aws + +import ( + "fmt" + "net" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" +) + +func expandNetworkAclEntries(configured []interface{}, entryType string) ([]*ec2.NetworkAclEntry, error) { + entries := make([]*ec2.NetworkAclEntry, 0, len(configured)) + for _, eRaw := range configured { + data := eRaw.(map[string]interface{}) + protocol := data["protocol"].(string) + p, err := strconv.Atoi(protocol) + if err != nil { + var ok bool + p, ok = protocolIntegers()[protocol] + if !ok { + return nil, fmt.Errorf("Invalid Protocol %s for rule %#v", protocol, data) + } + } + + e := &ec2.NetworkAclEntry{ + Protocol: aws.String(strconv.Itoa(p)), + PortRange: &ec2.PortRange{ + From: aws.Int64(int64(data["from_port"].(int))), + To: aws.Int64(int64(data["to_port"].(int))), + }, + Egress: aws.Bool(entryType == "egress"), + RuleAction: aws.String(data["action"].(string)), + RuleNumber: aws.Int64(int64(data["rule_no"].(int))), + } + + if v, ok := data["ipv6_cidr_block"]; ok { + e.Ipv6CidrBlock = aws.String(v.(string)) + } + + if v, ok := data["cidr_block"]; ok { + e.CidrBlock = aws.String(v.(string)) + } + + // Specify additional required fields for ICMP + if p == 1 { + e.IcmpTypeCode = &ec2.IcmpTypeCode{} + if v, ok := data["icmp_code"]; ok { + e.IcmpTypeCode.Code = aws.Int64(int64(v.(int))) + } + if v, ok := data["icmp_type"]; ok { + e.IcmpTypeCode.Type = aws.Int64(int64(v.(int))) + } + } + + entries = append(entries, e) + } + return entries, nil +} + +func flattenNetworkAclEntries(list []*ec2.NetworkAclEntry) []map[string]interface{} { + entries := make([]map[string]interface{}, 0, len(list)) + + for _, entry := range list { + + newEntry := map[string]interface{}{ + "from_port": *entry.PortRange.From, + "to_port": *entry.PortRange.To, + "action": *entry.RuleAction, + "rule_no": *entry.RuleNumber, + "protocol": *entry.Protocol, + } + + if entry.CidrBlock != nil { + newEntry["cidr_block"] = *entry.CidrBlock + } + + if entry.Ipv6CidrBlock != nil { + newEntry["ipv6_cidr_block"] = *entry.Ipv6CidrBlock + } + + entries = append(entries, newEntry) + } + + return entries + +} + +func protocolStrings(protocolIntegers map[string]int) map[int]string { + protocolStrings := make(map[int]string, len(protocolIntegers)) + for k, v := range protocolIntegers { + protocolStrings[v] = k + } + + return protocolStrings +} + +func protocolIntegers() map[string]int { + var protocolIntegers = make(map[string]int) + protocolIntegers = map[string]int{ + // defined at https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml + "ah": 51, + "esp": 50, + "udp": 17, + "tcp": 6, + "icmp": 1, + "all": -1, + "vrrp": 112, + } + return protocolIntegers +} + +// expectedPortPair stores a pair of ports we expect to see together. +type expectedPortPair struct { + to_port int64 + from_port int64 +} + +// validatePorts ensures the ports and protocol match expected +// values. +func validatePorts(to int64, from int64, expected expectedPortPair) bool { + if to != expected.to_port || from != expected.from_port { + return false + } + + return true +} + +// validateCIDRBlock ensures the passed CIDR block represents an implied +// network, and not an overly-specified IP address. +func validateCIDRBlock(cidr string) error { + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return err + } + if ipnet.String() != cidr { + return fmt.Errorf("%s is not a valid mask; did you mean %s?", cidr, ipnet) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/opsworks_layers.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/opsworks_layers.go new file mode 100644 index 000000000..c4bfeb6b2 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/opsworks_layers.go @@ -0,0 +1,645 @@ +package aws + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/opsworks" +) + +// OpsWorks has a single concept of "layer" which represents several different +// layer types. The differences between these are in some extra properties that +// get packed into an "Attributes" map, but in the OpsWorks UI these are presented +// as first-class options, and so Terraform prefers to expose them this way and +// hide the implementation detail that they are all packed into a single type +// in the underlying API. +// +// This file contains utilities that are shared between all of the concrete +// layer resource types, which have names matching aws_opsworks_*_layer . + +type opsworksLayerTypeAttribute struct { + AttrName string + Type schema.ValueType + Default interface{} + Required bool + WriteOnly bool +} + +type opsworksLayerType struct { + TypeName string + DefaultLayerName string + Attributes map[string]*opsworksLayerTypeAttribute + CustomShortName bool +} + +var ( + opsworksTrueString = "true" + opsworksFalseString = "false" +) + +func (lt *opsworksLayerType) SchemaResource() *schema.Resource { + resourceSchema := map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "auto_assign_elastic_ips": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "auto_assign_public_ips": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "custom_instance_profile_arn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "elastic_load_balancer": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "custom_setup_recipes": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "custom_configure_recipes": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "custom_deploy_recipes": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "custom_undeploy_recipes": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "custom_shutdown_recipes": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "custom_security_group_ids": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "custom_json": &schema.Schema{ + Type: schema.TypeString, + StateFunc: normalizeJson, + Optional: true, + }, + + "auto_healing": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "install_updates_on_boot": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "instance_shutdown_timeout": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 120, + }, + + "drain_elb_on_shutdown": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "system_packages": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "stack_id": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + + "use_ebs_optimized_instances": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "ebs_volume": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "iops": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 0, + }, + + "mount_point": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "number_of_disks": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + + "raid_level": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + }, + + "size": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "standard", + }, + }, + }, + Set: func(v interface{}) int { + m := v.(map[string]interface{}) + return hashcode.String(m["mount_point"].(string)) + }, + }, + } + + if lt.CustomShortName { + resourceSchema["short_name"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + } + } + + if lt.DefaultLayerName != "" { + resourceSchema["name"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: lt.DefaultLayerName, + } + } else { + resourceSchema["name"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + } + } + + for key, def := range lt.Attributes { + resourceSchema[key] = &schema.Schema{ + Type: def.Type, + Default: def.Default, + Required: def.Required, + Optional: !def.Required, + } + } + + return &schema.Resource{ + Read: func(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + return lt.Read(d, client) + }, + Create: func(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + return lt.Create(d, client) + }, + Update: func(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + return lt.Update(d, client) + }, + Delete: func(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + return lt.Delete(d, client) + }, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: resourceSchema, + } +} + +func (lt *opsworksLayerType) Read(d *schema.ResourceData, client *opsworks.OpsWorks) error { + + req := &opsworks.DescribeLayersInput{ + LayerIds: []*string{ + aws.String(d.Id()), + }, + } + + log.Printf("[DEBUG] Reading OpsWorks layer: %s", d.Id()) + + resp, err := client.DescribeLayers(req) + if err != nil { + if awserr, ok := err.(awserr.Error); ok { + if awserr.Code() == "ResourceNotFoundException" { + d.SetId("") + return nil + } + } + return err + } + + layer := resp.Layers[0] + d.Set("id", layer.LayerId) + d.Set("auto_assign_elastic_ips", layer.AutoAssignElasticIps) + d.Set("auto_assign_public_ips", layer.AutoAssignPublicIps) + d.Set("custom_instance_profile_arn", layer.CustomInstanceProfileArn) + d.Set("custom_security_group_ids", flattenStringList(layer.CustomSecurityGroupIds)) + d.Set("auto_healing", layer.EnableAutoHealing) + d.Set("install_updates_on_boot", layer.InstallUpdatesOnBoot) + d.Set("name", layer.Name) + d.Set("system_packages", flattenStringList(layer.Packages)) + d.Set("stack_id", layer.StackId) + d.Set("use_ebs_optimized_instances", layer.UseEbsOptimizedInstances) + + if lt.CustomShortName { + d.Set("short_name", layer.Shortname) + } + + if v := layer.CustomJson; v == nil { + if err := d.Set("custom_json", ""); err != nil { + return err + } + } else if err := d.Set("custom_json", normalizeJson(*v)); err != nil { + return err + } + + lt.SetAttributeMap(d, layer.Attributes) + lt.SetLifecycleEventConfiguration(d, layer.LifecycleEventConfiguration) + lt.SetCustomRecipes(d, layer.CustomRecipes) + lt.SetVolumeConfigurations(d, layer.VolumeConfigurations) + + /* get ELB */ + ebsRequest := &opsworks.DescribeElasticLoadBalancersInput{ + LayerIds: []*string{ + aws.String(d.Id()), + }, + } + loadBalancers, err := client.DescribeElasticLoadBalancers(ebsRequest) + if err != nil { + return err + } + + if loadBalancers.ElasticLoadBalancers == nil || len(loadBalancers.ElasticLoadBalancers) == 0 { + d.Set("elastic_load_balancer", "") + } else { + loadBalancer := loadBalancers.ElasticLoadBalancers[0] + if loadBalancer != nil { + d.Set("elastic_load_balancer", loadBalancer.ElasticLoadBalancerName) + } + } + + return nil +} + +func (lt *opsworksLayerType) Create(d *schema.ResourceData, client *opsworks.OpsWorks) error { + + req := &opsworks.CreateLayerInput{ + AutoAssignElasticIps: aws.Bool(d.Get("auto_assign_elastic_ips").(bool)), + AutoAssignPublicIps: aws.Bool(d.Get("auto_assign_public_ips").(bool)), + CustomInstanceProfileArn: aws.String(d.Get("custom_instance_profile_arn").(string)), + CustomRecipes: lt.CustomRecipes(d), + CustomSecurityGroupIds: expandStringSet(d.Get("custom_security_group_ids").(*schema.Set)), + EnableAutoHealing: aws.Bool(d.Get("auto_healing").(bool)), + InstallUpdatesOnBoot: aws.Bool(d.Get("install_updates_on_boot").(bool)), + LifecycleEventConfiguration: lt.LifecycleEventConfiguration(d), + Name: aws.String(d.Get("name").(string)), + Packages: expandStringSet(d.Get("system_packages").(*schema.Set)), + Type: aws.String(lt.TypeName), + StackId: aws.String(d.Get("stack_id").(string)), + UseEbsOptimizedInstances: aws.Bool(d.Get("use_ebs_optimized_instances").(bool)), + Attributes: lt.AttributeMap(d), + VolumeConfigurations: lt.VolumeConfigurations(d), + } + + if lt.CustomShortName { + req.Shortname = aws.String(d.Get("short_name").(string)) + } else { + req.Shortname = aws.String(lt.TypeName) + } + + req.CustomJson = aws.String(d.Get("custom_json").(string)) + + log.Printf("[DEBUG] Creating OpsWorks layer: %s", d.Id()) + + resp, err := client.CreateLayer(req) + if err != nil { + return err + } + + layerId := *resp.LayerId + d.SetId(layerId) + d.Set("id", layerId) + + loadBalancer := aws.String(d.Get("elastic_load_balancer").(string)) + if loadBalancer != nil && *loadBalancer != "" { + log.Printf("[DEBUG] Attaching load balancer: %s", *loadBalancer) + _, err := client.AttachElasticLoadBalancer(&opsworks.AttachElasticLoadBalancerInput{ + ElasticLoadBalancerName: loadBalancer, + LayerId: &layerId, + }) + if err != nil { + return err + } + } + + return lt.Read(d, client) +} + +func (lt *opsworksLayerType) Update(d *schema.ResourceData, client *opsworks.OpsWorks) error { + + req := &opsworks.UpdateLayerInput{ + LayerId: aws.String(d.Id()), + AutoAssignElasticIps: aws.Bool(d.Get("auto_assign_elastic_ips").(bool)), + AutoAssignPublicIps: aws.Bool(d.Get("auto_assign_public_ips").(bool)), + CustomInstanceProfileArn: aws.String(d.Get("custom_instance_profile_arn").(string)), + CustomRecipes: lt.CustomRecipes(d), + CustomSecurityGroupIds: expandStringSet(d.Get("custom_security_group_ids").(*schema.Set)), + EnableAutoHealing: aws.Bool(d.Get("auto_healing").(bool)), + InstallUpdatesOnBoot: aws.Bool(d.Get("install_updates_on_boot").(bool)), + LifecycleEventConfiguration: lt.LifecycleEventConfiguration(d), + Name: aws.String(d.Get("name").(string)), + Packages: expandStringSet(d.Get("system_packages").(*schema.Set)), + UseEbsOptimizedInstances: aws.Bool(d.Get("use_ebs_optimized_instances").(bool)), + Attributes: lt.AttributeMap(d), + VolumeConfigurations: lt.VolumeConfigurations(d), + } + + if lt.CustomShortName { + req.Shortname = aws.String(d.Get("short_name").(string)) + } else { + req.Shortname = aws.String(lt.TypeName) + } + + req.CustomJson = aws.String(d.Get("custom_json").(string)) + + log.Printf("[DEBUG] Updating OpsWorks layer: %s", d.Id()) + + if d.HasChange("elastic_load_balancer") { + lbo, lbn := d.GetChange("elastic_load_balancer") + + loadBalancerOld := aws.String(lbo.(string)) + loadBalancerNew := aws.String(lbn.(string)) + + if loadBalancerOld != nil && *loadBalancerOld != "" { + log.Printf("[DEBUG] Dettaching load balancer: %s", *loadBalancerOld) + _, err := client.DetachElasticLoadBalancer(&opsworks.DetachElasticLoadBalancerInput{ + ElasticLoadBalancerName: loadBalancerOld, + LayerId: aws.String(d.Id()), + }) + if err != nil { + return err + } + } + + if loadBalancerNew != nil && *loadBalancerNew != "" { + log.Printf("[DEBUG] Attaching load balancer: %s", *loadBalancerNew) + _, err := client.AttachElasticLoadBalancer(&opsworks.AttachElasticLoadBalancerInput{ + ElasticLoadBalancerName: loadBalancerNew, + LayerId: aws.String(d.Id()), + }) + if err != nil { + return err + } + } + } + + _, err := client.UpdateLayer(req) + if err != nil { + return err + } + + return lt.Read(d, client) +} + +func (lt *opsworksLayerType) Delete(d *schema.ResourceData, client *opsworks.OpsWorks) error { + req := &opsworks.DeleteLayerInput{ + LayerId: aws.String(d.Id()), + } + + log.Printf("[DEBUG] Deleting OpsWorks layer: %s", d.Id()) + + _, err := client.DeleteLayer(req) + return err +} + +func (lt *opsworksLayerType) AttributeMap(d *schema.ResourceData) map[string]*string { + attrs := map[string]*string{} + + for key, def := range lt.Attributes { + value := d.Get(key) + switch def.Type { + case schema.TypeString: + strValue := value.(string) + attrs[def.AttrName] = &strValue + case schema.TypeInt: + intValue := value.(int) + strValue := strconv.Itoa(intValue) + attrs[def.AttrName] = &strValue + case schema.TypeBool: + boolValue := value.(bool) + if boolValue { + attrs[def.AttrName] = &opsworksTrueString + } else { + attrs[def.AttrName] = &opsworksFalseString + } + default: + // should never happen + panic(fmt.Errorf("Unsupported OpsWorks layer attribute type")) + } + } + + return attrs +} + +func (lt *opsworksLayerType) SetAttributeMap(d *schema.ResourceData, attrs map[string]*string) { + for key, def := range lt.Attributes { + // Ignore write-only attributes; we'll just keep what we already have stored. + // (The AWS API returns garbage placeholder values for these.) + if def.WriteOnly { + continue + } + + if strPtr, ok := attrs[def.AttrName]; ok && strPtr != nil { + strValue := *strPtr + + switch def.Type { + case schema.TypeString: + d.Set(key, strValue) + case schema.TypeInt: + intValue, err := strconv.Atoi(strValue) + if err == nil { + d.Set(key, intValue) + } else { + // Got garbage from the AWS API + d.Set(key, nil) + } + case schema.TypeBool: + boolValue := true + if strValue == opsworksFalseString { + boolValue = false + } + d.Set(key, boolValue) + default: + // should never happen + panic(fmt.Errorf("Unsupported OpsWorks layer attribute type")) + } + return + + } else { + d.Set(key, nil) + } + } +} + +func (lt *opsworksLayerType) LifecycleEventConfiguration(d *schema.ResourceData) *opsworks.LifecycleEventConfiguration { + return &opsworks.LifecycleEventConfiguration{ + Shutdown: &opsworks.ShutdownEventConfiguration{ + DelayUntilElbConnectionsDrained: aws.Bool(d.Get("drain_elb_on_shutdown").(bool)), + ExecutionTimeout: aws.Int64(int64(d.Get("instance_shutdown_timeout").(int))), + }, + } +} + +func (lt *opsworksLayerType) SetLifecycleEventConfiguration(d *schema.ResourceData, v *opsworks.LifecycleEventConfiguration) { + if v == nil || v.Shutdown == nil { + d.Set("drain_elb_on_shutdown", nil) + d.Set("instance_shutdown_timeout", nil) + } else { + d.Set("drain_elb_on_shutdown", v.Shutdown.DelayUntilElbConnectionsDrained) + d.Set("instance_shutdown_timeout", v.Shutdown.ExecutionTimeout) + } +} + +func (lt *opsworksLayerType) CustomRecipes(d *schema.ResourceData) *opsworks.Recipes { + return &opsworks.Recipes{ + Configure: expandStringList(d.Get("custom_configure_recipes").([]interface{})), + Deploy: expandStringList(d.Get("custom_deploy_recipes").([]interface{})), + Setup: expandStringList(d.Get("custom_setup_recipes").([]interface{})), + Shutdown: expandStringList(d.Get("custom_shutdown_recipes").([]interface{})), + Undeploy: expandStringList(d.Get("custom_undeploy_recipes").([]interface{})), + } +} + +func (lt *opsworksLayerType) SetCustomRecipes(d *schema.ResourceData, v *opsworks.Recipes) { + // Null out everything first, and then we'll consider what to put back. + d.Set("custom_configure_recipes", nil) + d.Set("custom_deploy_recipes", nil) + d.Set("custom_setup_recipes", nil) + d.Set("custom_shutdown_recipes", nil) + d.Set("custom_undeploy_recipes", nil) + + if v == nil { + return + } + + d.Set("custom_configure_recipes", flattenStringList(v.Configure)) + d.Set("custom_deploy_recipes", flattenStringList(v.Deploy)) + d.Set("custom_setup_recipes", flattenStringList(v.Setup)) + d.Set("custom_shutdown_recipes", flattenStringList(v.Shutdown)) + d.Set("custom_undeploy_recipes", flattenStringList(v.Undeploy)) +} + +func (lt *opsworksLayerType) VolumeConfigurations(d *schema.ResourceData) []*opsworks.VolumeConfiguration { + configuredVolumes := d.Get("ebs_volume").(*schema.Set).List() + result := make([]*opsworks.VolumeConfiguration, len(configuredVolumes)) + + for i := 0; i < len(configuredVolumes); i++ { + volumeData := configuredVolumes[i].(map[string]interface{}) + + result[i] = &opsworks.VolumeConfiguration{ + MountPoint: aws.String(volumeData["mount_point"].(string)), + NumberOfDisks: aws.Int64(int64(volumeData["number_of_disks"].(int))), + Size: aws.Int64(int64(volumeData["size"].(int))), + VolumeType: aws.String(volumeData["type"].(string)), + } + iops := int64(volumeData["iops"].(int)) + if iops != 0 { + result[i].Iops = aws.Int64(iops) + } + + raidLevelStr := volumeData["raid_level"].(string) + if raidLevelStr != "" { + raidLevel, err := strconv.Atoi(raidLevelStr) + if err == nil { + result[i].RaidLevel = aws.Int64(int64(raidLevel)) + } + } + } + + return result +} + +func (lt *opsworksLayerType) SetVolumeConfigurations(d *schema.ResourceData, v []*opsworks.VolumeConfiguration) { + newValue := make([]*map[string]interface{}, len(v)) + + for i := 0; i < len(v); i++ { + config := v[i] + data := make(map[string]interface{}) + newValue[i] = &data + + if config.Iops != nil { + data["iops"] = int(*config.Iops) + } else { + data["iops"] = 0 + } + if config.MountPoint != nil { + data["mount_point"] = *config.MountPoint + } + if config.NumberOfDisks != nil { + data["number_of_disks"] = int(*config.NumberOfDisks) + } + if config.RaidLevel != nil { + data["raid_level"] = strconv.Itoa(int(*config.RaidLevel)) + } + if config.Size != nil { + data["size"] = int(*config.Size) + } + if config.VolumeType != nil { + data["type"] = *config.VolumeType + } + } + + d.Set("ebs_volume", newValue) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/provider.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/provider.go new file mode 100644 index 000000000..d5880d730 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/provider.go @@ -0,0 +1,815 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/mutexkv" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +// Provider returns a terraform.ResourceProvider. +func Provider() terraform.ResourceProvider { + // TODO: Move the validation to this, requires conditional schemas + // TODO: Move the configuration to this, requires validation + + // The actual provider + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "access_key": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["access_key"], + }, + + "secret_key": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["secret_key"], + }, + + "profile": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["profile"], + }, + + "assume_role": assumeRoleSchema(), + + "shared_credentials_file": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["shared_credentials_file"], + }, + + "token": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["token"], + }, + + "region": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", + }, nil), + Description: descriptions["region"], + InputDefault: "us-east-1", + }, + + "max_retries": { + Type: schema.TypeInt, + Optional: true, + Default: 25, + Description: descriptions["max_retries"], + }, + + "allowed_account_ids": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + ConflictsWith: []string{"forbidden_account_ids"}, + Set: schema.HashString, + }, + + "forbidden_account_ids": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + ConflictsWith: []string{"allowed_account_ids"}, + Set: schema.HashString, + }, + + "dynamodb_endpoint": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["dynamodb_endpoint"], + Removed: "Use `dynamodb` inside `endpoints` block instead", + }, + + "kinesis_endpoint": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["kinesis_endpoint"], + Removed: "Use `kinesis` inside `endpoints` block instead", + }, + + "endpoints": endpointsSchema(), + + "insecure": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["insecure"], + }, + + "skip_credentials_validation": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["skip_credentials_validation"], + }, + + "skip_get_ec2_platforms": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["skip_get_ec2_platforms"], + }, + + "skip_region_validation": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["skip_region_validation"], + }, + + "skip_requesting_account_id": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["skip_requesting_account_id"], + }, + + "skip_metadata_api_check": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["skip_metadata_api_check"], + }, + + "s3_force_path_style": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["s3_force_path_style"], + }, + }, + + DataSourcesMap: map[string]*schema.Resource{ + "aws_acm_certificate": dataSourceAwsAcmCertificate(), + "aws_alb": dataSourceAwsAlb(), + "aws_alb_listener": dataSourceAwsAlbListener(), + "aws_ami": dataSourceAwsAmi(), + "aws_ami_ids": dataSourceAwsAmiIds(), + "aws_autoscaling_groups": dataSourceAwsAutoscalingGroups(), + "aws_availability_zone": dataSourceAwsAvailabilityZone(), + "aws_availability_zones": dataSourceAwsAvailabilityZones(), + "aws_billing_service_account": dataSourceAwsBillingServiceAccount(), + "aws_caller_identity": dataSourceAwsCallerIdentity(), + "aws_canonical_user_id": dataSourceAwsCanonicalUserId(), + "aws_cloudformation_stack": dataSourceAwsCloudFormationStack(), + "aws_db_instance": dataSourceAwsDbInstance(), + "aws_db_snapshot": dataSourceAwsDbSnapshot(), + "aws_ebs_snapshot": dataSourceAwsEbsSnapshot(), + "aws_ebs_snapshot_ids": dataSourceAwsEbsSnapshotIds(), + "aws_ebs_volume": dataSourceAwsEbsVolume(), + "aws_ecs_cluster": dataSourceAwsEcsCluster(), + "aws_ecs_container_definition": dataSourceAwsEcsContainerDefinition(), + "aws_ecs_task_definition": dataSourceAwsEcsTaskDefinition(), + "aws_efs_file_system": dataSourceAwsEfsFileSystem(), + "aws_eip": dataSourceAwsEip(), + "aws_elastic_beanstalk_solution_stack": dataSourceAwsElasticBeanstalkSolutionStack(), + "aws_elasticache_cluster": dataSourceAwsElastiCacheCluster(), + "aws_elb_hosted_zone_id": dataSourceAwsElbHostedZoneId(), + "aws_elb_service_account": dataSourceAwsElbServiceAccount(), + "aws_iam_account_alias": dataSourceAwsIamAccountAlias(), + "aws_iam_policy_document": dataSourceAwsIamPolicyDocument(), + "aws_iam_role": dataSourceAwsIAMRole(), + "aws_iam_server_certificate": dataSourceAwsIAMServerCertificate(), + "aws_instance": dataSourceAwsInstance(), + "aws_ip_ranges": dataSourceAwsIPRanges(), + "aws_kinesis_stream": dataSourceAwsKinesisStream(), + "aws_kms_alias": dataSourceAwsKmsAlias(), + "aws_kms_ciphertext": dataSourceAwsKmsCiphetext(), + "aws_kms_secret": dataSourceAwsKmsSecret(), + "aws_partition": dataSourceAwsPartition(), + "aws_prefix_list": dataSourceAwsPrefixList(), + "aws_redshift_service_account": dataSourceAwsRedshiftServiceAccount(), + "aws_region": dataSourceAwsRegion(), + "aws_route_table": dataSourceAwsRouteTable(), + "aws_route53_zone": dataSourceAwsRoute53Zone(), + "aws_s3_bucket_object": dataSourceAwsS3BucketObject(), + "aws_sns_topic": dataSourceAwsSnsTopic(), + "aws_ssm_parameter": dataSourceAwsSsmParameter(), + "aws_subnet": dataSourceAwsSubnet(), + "aws_subnet_ids": dataSourceAwsSubnetIDs(), + "aws_security_group": dataSourceAwsSecurityGroup(), + "aws_vpc": dataSourceAwsVpc(), + "aws_vpc_endpoint": dataSourceAwsVpcEndpoint(), + "aws_vpc_endpoint_service": dataSourceAwsVpcEndpointService(), + "aws_vpc_peering_connection": dataSourceAwsVpcPeeringConnection(), + "aws_vpn_gateway": dataSourceAwsVpnGateway(), + }, + + ResourcesMap: map[string]*schema.Resource{ + "aws_alb": resourceAwsAlb(), + "aws_alb_listener": resourceAwsAlbListener(), + "aws_alb_listener_rule": resourceAwsAlbListenerRule(), + "aws_alb_target_group": resourceAwsAlbTargetGroup(), + "aws_alb_target_group_attachment": resourceAwsAlbTargetGroupAttachment(), + "aws_ami": resourceAwsAmi(), + "aws_ami_copy": resourceAwsAmiCopy(), + "aws_ami_from_instance": resourceAwsAmiFromInstance(), + "aws_ami_launch_permission": resourceAwsAmiLaunchPermission(), + "aws_api_gateway_account": resourceAwsApiGatewayAccount(), + "aws_api_gateway_api_key": resourceAwsApiGatewayApiKey(), + "aws_api_gateway_authorizer": resourceAwsApiGatewayAuthorizer(), + "aws_api_gateway_base_path_mapping": resourceAwsApiGatewayBasePathMapping(), + "aws_api_gateway_client_certificate": resourceAwsApiGatewayClientCertificate(), + "aws_api_gateway_deployment": resourceAwsApiGatewayDeployment(), + "aws_api_gateway_domain_name": resourceAwsApiGatewayDomainName(), + "aws_api_gateway_integration": resourceAwsApiGatewayIntegration(), + "aws_api_gateway_integration_response": resourceAwsApiGatewayIntegrationResponse(), + "aws_api_gateway_method": resourceAwsApiGatewayMethod(), + "aws_api_gateway_method_response": resourceAwsApiGatewayMethodResponse(), + "aws_api_gateway_method_settings": resourceAwsApiGatewayMethodSettings(), + "aws_api_gateway_model": resourceAwsApiGatewayModel(), + "aws_api_gateway_resource": resourceAwsApiGatewayResource(), + "aws_api_gateway_rest_api": resourceAwsApiGatewayRestApi(), + "aws_api_gateway_stage": resourceAwsApiGatewayStage(), + "aws_api_gateway_usage_plan": resourceAwsApiGatewayUsagePlan(), + "aws_api_gateway_usage_plan_key": resourceAwsApiGatewayUsagePlanKey(), + "aws_app_cookie_stickiness_policy": resourceAwsAppCookieStickinessPolicy(), + "aws_appautoscaling_target": resourceAwsAppautoscalingTarget(), + "aws_appautoscaling_policy": resourceAwsAppautoscalingPolicy(), + "aws_autoscaling_attachment": resourceAwsAutoscalingAttachment(), + "aws_autoscaling_group": resourceAwsAutoscalingGroup(), + "aws_autoscaling_notification": resourceAwsAutoscalingNotification(), + "aws_autoscaling_policy": resourceAwsAutoscalingPolicy(), + "aws_autoscaling_schedule": resourceAwsAutoscalingSchedule(), + "aws_cloudformation_stack": resourceAwsCloudFormationStack(), + "aws_cloudfront_distribution": resourceAwsCloudFrontDistribution(), + "aws_cloudfront_origin_access_identity": resourceAwsCloudFrontOriginAccessIdentity(), + "aws_cloudtrail": resourceAwsCloudTrail(), + "aws_cloudwatch_event_rule": resourceAwsCloudWatchEventRule(), + "aws_cloudwatch_event_target": resourceAwsCloudWatchEventTarget(), + "aws_cloudwatch_log_destination": resourceAwsCloudWatchLogDestination(), + "aws_cloudwatch_log_destination_policy": resourceAwsCloudWatchLogDestinationPolicy(), + "aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(), + "aws_cloudwatch_log_metric_filter": resourceAwsCloudWatchLogMetricFilter(), + "aws_cloudwatch_log_stream": resourceAwsCloudWatchLogStream(), + "aws_cloudwatch_log_subscription_filter": resourceAwsCloudwatchLogSubscriptionFilter(), + "aws_config_config_rule": resourceAwsConfigConfigRule(), + "aws_config_configuration_recorder": resourceAwsConfigConfigurationRecorder(), + "aws_config_configuration_recorder_status": resourceAwsConfigConfigurationRecorderStatus(), + "aws_config_delivery_channel": resourceAwsConfigDeliveryChannel(), + "aws_cognito_identity_pool": resourceAwsCognitoIdentityPool(), + "aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(), + "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), + "aws_codedeploy_app": resourceAwsCodeDeployApp(), + "aws_codedeploy_deployment_config": resourceAwsCodeDeployDeploymentConfig(), + "aws_codedeploy_deployment_group": resourceAwsCodeDeployDeploymentGroup(), + "aws_codecommit_repository": resourceAwsCodeCommitRepository(), + "aws_codecommit_trigger": resourceAwsCodeCommitTrigger(), + "aws_codebuild_project": resourceAwsCodeBuildProject(), + "aws_codepipeline": resourceAwsCodePipeline(), + "aws_customer_gateway": resourceAwsCustomerGateway(), + "aws_db_event_subscription": resourceAwsDbEventSubscription(), + "aws_db_instance": resourceAwsDbInstance(), + "aws_db_option_group": resourceAwsDbOptionGroup(), + "aws_db_parameter_group": resourceAwsDbParameterGroup(), + "aws_db_security_group": resourceAwsDbSecurityGroup(), + "aws_db_snapshot": resourceAwsDbSnapshot(), + "aws_db_subnet_group": resourceAwsDbSubnetGroup(), + "aws_devicefarm_project": resourceAwsDevicefarmProject(), + "aws_directory_service_directory": resourceAwsDirectoryServiceDirectory(), + "aws_dms_certificate": resourceAwsDmsCertificate(), + "aws_dms_endpoint": resourceAwsDmsEndpoint(), + "aws_dms_replication_instance": resourceAwsDmsReplicationInstance(), + "aws_dms_replication_subnet_group": resourceAwsDmsReplicationSubnetGroup(), + "aws_dms_replication_task": resourceAwsDmsReplicationTask(), + "aws_dynamodb_table": resourceAwsDynamoDbTable(), + "aws_ebs_snapshot": resourceAwsEbsSnapshot(), + "aws_ebs_volume": resourceAwsEbsVolume(), + "aws_ecr_repository": resourceAwsEcrRepository(), + "aws_ecr_repository_policy": resourceAwsEcrRepositoryPolicy(), + "aws_ecs_cluster": resourceAwsEcsCluster(), + "aws_ecs_service": resourceAwsEcsService(), + "aws_ecs_task_definition": resourceAwsEcsTaskDefinition(), + "aws_efs_file_system": resourceAwsEfsFileSystem(), + "aws_efs_mount_target": resourceAwsEfsMountTarget(), + "aws_egress_only_internet_gateway": resourceAwsEgressOnlyInternetGateway(), + "aws_eip": resourceAwsEip(), + "aws_eip_association": resourceAwsEipAssociation(), + "aws_elasticache_cluster": resourceAwsElasticacheCluster(), + "aws_elasticache_parameter_group": resourceAwsElasticacheParameterGroup(), + "aws_elasticache_replication_group": resourceAwsElasticacheReplicationGroup(), + "aws_elasticache_security_group": resourceAwsElasticacheSecurityGroup(), + "aws_elasticache_subnet_group": resourceAwsElasticacheSubnetGroup(), + "aws_elastic_beanstalk_application": resourceAwsElasticBeanstalkApplication(), + "aws_elastic_beanstalk_application_version": resourceAwsElasticBeanstalkApplicationVersion(), + "aws_elastic_beanstalk_configuration_template": resourceAwsElasticBeanstalkConfigurationTemplate(), + "aws_elastic_beanstalk_environment": resourceAwsElasticBeanstalkEnvironment(), + "aws_elasticsearch_domain": resourceAwsElasticSearchDomain(), + "aws_elasticsearch_domain_policy": resourceAwsElasticSearchDomainPolicy(), + "aws_elastictranscoder_pipeline": resourceAwsElasticTranscoderPipeline(), + "aws_elastictranscoder_preset": resourceAwsElasticTranscoderPreset(), + "aws_elb": resourceAwsElb(), + "aws_elb_attachment": resourceAwsElbAttachment(), + "aws_emr_cluster": resourceAwsEMRCluster(), + "aws_emr_instance_group": resourceAwsEMRInstanceGroup(), + "aws_emr_security_configuration": resourceAwsEMRSecurityConfiguration(), + "aws_flow_log": resourceAwsFlowLog(), + "aws_glacier_vault": resourceAwsGlacierVault(), + "aws_iam_access_key": resourceAwsIamAccessKey(), + "aws_iam_account_alias": resourceAwsIamAccountAlias(), + "aws_iam_account_password_policy": resourceAwsIamAccountPasswordPolicy(), + "aws_iam_group_policy": resourceAwsIamGroupPolicy(), + "aws_iam_group": resourceAwsIamGroup(), + "aws_iam_group_membership": resourceAwsIamGroupMembership(), + "aws_iam_group_policy_attachment": resourceAwsIamGroupPolicyAttachment(), + "aws_iam_instance_profile": resourceAwsIamInstanceProfile(), + "aws_iam_openid_connect_provider": resourceAwsIamOpenIDConnectProvider(), + "aws_iam_policy": resourceAwsIamPolicy(), + "aws_iam_policy_attachment": resourceAwsIamPolicyAttachment(), + "aws_iam_role_policy_attachment": resourceAwsIamRolePolicyAttachment(), + "aws_iam_role_policy": resourceAwsIamRolePolicy(), + "aws_iam_role": resourceAwsIamRole(), + "aws_iam_saml_provider": resourceAwsIamSamlProvider(), + "aws_iam_server_certificate": resourceAwsIAMServerCertificate(), + "aws_iam_user_policy_attachment": resourceAwsIamUserPolicyAttachment(), + "aws_iam_user_policy": resourceAwsIamUserPolicy(), + "aws_iam_user_ssh_key": resourceAwsIamUserSshKey(), + "aws_iam_user": resourceAwsIamUser(), + "aws_iam_user_login_profile": resourceAwsIamUserLoginProfile(), + "aws_inspector_assessment_target": resourceAWSInspectorAssessmentTarget(), + "aws_inspector_assessment_template": resourceAWSInspectorAssessmentTemplate(), + "aws_inspector_resource_group": resourceAWSInspectorResourceGroup(), + "aws_instance": resourceAwsInstance(), + "aws_internet_gateway": resourceAwsInternetGateway(), + "aws_key_pair": resourceAwsKeyPair(), + "aws_kinesis_firehose_delivery_stream": resourceAwsKinesisFirehoseDeliveryStream(), + "aws_kinesis_stream": resourceAwsKinesisStream(), + "aws_kms_alias": resourceAwsKmsAlias(), + "aws_kms_key": resourceAwsKmsKey(), + "aws_lambda_function": resourceAwsLambdaFunction(), + "aws_lambda_event_source_mapping": resourceAwsLambdaEventSourceMapping(), + "aws_lambda_alias": resourceAwsLambdaAlias(), + "aws_lambda_permission": resourceAwsLambdaPermission(), + "aws_launch_configuration": resourceAwsLaunchConfiguration(), + "aws_lightsail_domain": resourceAwsLightsailDomain(), + "aws_lightsail_instance": resourceAwsLightsailInstance(), + "aws_lightsail_key_pair": resourceAwsLightsailKeyPair(), + "aws_lightsail_static_ip": resourceAwsLightsailStaticIp(), + "aws_lightsail_static_ip_attachment": resourceAwsLightsailStaticIpAttachment(), + "aws_lb_cookie_stickiness_policy": resourceAwsLBCookieStickinessPolicy(), + "aws_load_balancer_policy": resourceAwsLoadBalancerPolicy(), + "aws_load_balancer_backend_server_policy": resourceAwsLoadBalancerBackendServerPolicies(), + "aws_load_balancer_listener_policy": resourceAwsLoadBalancerListenerPolicies(), + "aws_lb_ssl_negotiation_policy": resourceAwsLBSSLNegotiationPolicy(), + "aws_main_route_table_association": resourceAwsMainRouteTableAssociation(), + "aws_nat_gateway": resourceAwsNatGateway(), + "aws_network_acl": resourceAwsNetworkAcl(), + "aws_default_network_acl": resourceAwsDefaultNetworkAcl(), + "aws_network_acl_rule": resourceAwsNetworkAclRule(), + "aws_network_interface": resourceAwsNetworkInterface(), + "aws_network_interface_attachment": resourceAwsNetworkInterfaceAttachment(), + "aws_opsworks_application": resourceAwsOpsworksApplication(), + "aws_opsworks_stack": resourceAwsOpsworksStack(), + "aws_opsworks_java_app_layer": resourceAwsOpsworksJavaAppLayer(), + "aws_opsworks_haproxy_layer": resourceAwsOpsworksHaproxyLayer(), + "aws_opsworks_static_web_layer": resourceAwsOpsworksStaticWebLayer(), + "aws_opsworks_php_app_layer": resourceAwsOpsworksPhpAppLayer(), + "aws_opsworks_rails_app_layer": resourceAwsOpsworksRailsAppLayer(), + "aws_opsworks_nodejs_app_layer": resourceAwsOpsworksNodejsAppLayer(), + "aws_opsworks_memcached_layer": resourceAwsOpsworksMemcachedLayer(), + "aws_opsworks_mysql_layer": resourceAwsOpsworksMysqlLayer(), + "aws_opsworks_ganglia_layer": resourceAwsOpsworksGangliaLayer(), + "aws_opsworks_custom_layer": resourceAwsOpsworksCustomLayer(), + "aws_opsworks_instance": resourceAwsOpsworksInstance(), + "aws_opsworks_user_profile": resourceAwsOpsworksUserProfile(), + "aws_opsworks_permission": resourceAwsOpsworksPermission(), + "aws_opsworks_rds_db_instance": resourceAwsOpsworksRdsDbInstance(), + "aws_placement_group": resourceAwsPlacementGroup(), + "aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(), + "aws_rds_cluster": resourceAwsRDSCluster(), + "aws_rds_cluster_instance": resourceAwsRDSClusterInstance(), + "aws_rds_cluster_parameter_group": resourceAwsRDSClusterParameterGroup(), + "aws_redshift_cluster": resourceAwsRedshiftCluster(), + "aws_redshift_security_group": resourceAwsRedshiftSecurityGroup(), + "aws_redshift_parameter_group": resourceAwsRedshiftParameterGroup(), + "aws_redshift_subnet_group": resourceAwsRedshiftSubnetGroup(), + "aws_route53_delegation_set": resourceAwsRoute53DelegationSet(), + "aws_route53_record": resourceAwsRoute53Record(), + "aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(), + "aws_route53_zone": resourceAwsRoute53Zone(), + "aws_route53_health_check": resourceAwsRoute53HealthCheck(), + "aws_route": resourceAwsRoute(), + "aws_route_table": resourceAwsRouteTable(), + "aws_default_route_table": resourceAwsDefaultRouteTable(), + "aws_route_table_association": resourceAwsRouteTableAssociation(), + "aws_ses_active_receipt_rule_set": resourceAwsSesActiveReceiptRuleSet(), + "aws_ses_domain_identity": resourceAwsSesDomainIdentity(), + "aws_ses_receipt_filter": resourceAwsSesReceiptFilter(), + "aws_ses_receipt_rule": resourceAwsSesReceiptRule(), + "aws_ses_receipt_rule_set": resourceAwsSesReceiptRuleSet(), + "aws_ses_configuration_set": resourceAwsSesConfigurationSet(), + "aws_ses_event_destination": resourceAwsSesEventDestination(), + "aws_s3_bucket": resourceAwsS3Bucket(), + "aws_s3_bucket_policy": resourceAwsS3BucketPolicy(), + "aws_s3_bucket_object": resourceAwsS3BucketObject(), + "aws_s3_bucket_notification": resourceAwsS3BucketNotification(), + "aws_security_group": resourceAwsSecurityGroup(), + "aws_default_security_group": resourceAwsDefaultSecurityGroup(), + "aws_security_group_rule": resourceAwsSecurityGroupRule(), + "aws_simpledb_domain": resourceAwsSimpleDBDomain(), + "aws_ssm_activation": resourceAwsSsmActivation(), + "aws_ssm_association": resourceAwsSsmAssociation(), + "aws_ssm_document": resourceAwsSsmDocument(), + "aws_ssm_maintenance_window": resourceAwsSsmMaintenanceWindow(), + "aws_ssm_maintenance_window_target": resourceAwsSsmMaintenanceWindowTarget(), + "aws_ssm_maintenance_window_task": resourceAwsSsmMaintenanceWindowTask(), + "aws_ssm_patch_baseline": resourceAwsSsmPatchBaseline(), + "aws_ssm_patch_group": resourceAwsSsmPatchGroup(), + "aws_ssm_parameter": resourceAwsSsmParameter(), + "aws_spot_datafeed_subscription": resourceAwsSpotDataFeedSubscription(), + "aws_spot_instance_request": resourceAwsSpotInstanceRequest(), + "aws_spot_fleet_request": resourceAwsSpotFleetRequest(), + "aws_sqs_queue": resourceAwsSqsQueue(), + "aws_sqs_queue_policy": resourceAwsSqsQueuePolicy(), + "aws_snapshot_create_volume_permission": resourceAwsSnapshotCreateVolumePermission(), + "aws_sns_topic": resourceAwsSnsTopic(), + "aws_sns_topic_policy": resourceAwsSnsTopicPolicy(), + "aws_sns_topic_subscription": resourceAwsSnsTopicSubscription(), + "aws_sfn_activity": resourceAwsSfnActivity(), + "aws_sfn_state_machine": resourceAwsSfnStateMachine(), + "aws_default_subnet": resourceAwsDefaultSubnet(), + "aws_subnet": resourceAwsSubnet(), + "aws_volume_attachment": resourceAwsVolumeAttachment(), + "aws_vpc_dhcp_options_association": resourceAwsVpcDhcpOptionsAssociation(), + "aws_default_vpc_dhcp_options": resourceAwsDefaultVpcDhcpOptions(), + "aws_vpc_dhcp_options": resourceAwsVpcDhcpOptions(), + "aws_vpc_peering_connection": resourceAwsVpcPeeringConnection(), + "aws_vpc_peering_connection_accepter": resourceAwsVpcPeeringConnectionAccepter(), + "aws_default_vpc": resourceAwsDefaultVpc(), + "aws_vpc": resourceAwsVpc(), + "aws_vpc_endpoint": resourceAwsVpcEndpoint(), + "aws_vpc_endpoint_route_table_association": resourceAwsVpcEndpointRouteTableAssociation(), + "aws_vpn_connection": resourceAwsVpnConnection(), + "aws_vpn_connection_route": resourceAwsVpnConnectionRoute(), + "aws_vpn_gateway": resourceAwsVpnGateway(), + "aws_vpn_gateway_attachment": resourceAwsVpnGatewayAttachment(), + "aws_vpn_gateway_route_propagation": resourceAwsVpnGatewayRoutePropagation(), + "aws_waf_byte_match_set": resourceAwsWafByteMatchSet(), + "aws_waf_ipset": resourceAwsWafIPSet(), + "aws_waf_rule": resourceAwsWafRule(), + "aws_waf_size_constraint_set": resourceAwsWafSizeConstraintSet(), + "aws_waf_web_acl": resourceAwsWafWebAcl(), + "aws_waf_xss_match_set": resourceAwsWafXssMatchSet(), + "aws_waf_sql_injection_match_set": resourceAwsWafSqlInjectionMatchSet(), + "aws_wafregional_byte_match_set": resourceAwsWafRegionalByteMatchSet(), + "aws_wafregional_ipset": resourceAwsWafRegionalIPSet(), + }, + ConfigureFunc: providerConfigure, + } +} + +var descriptions map[string]string + +func init() { + descriptions = map[string]string{ + "region": "The region where AWS operations will take place. Examples\n" + + "are us-east-1, us-west-2, etc.", + + "access_key": "The access key for API operations. You can retrieve this\n" + + "from the 'Security & Credentials' section of the AWS console.", + + "secret_key": "The secret key for API operations. You can retrieve this\n" + + "from the 'Security & Credentials' section of the AWS console.", + + "profile": "The profile for API operations. If not set, the default profile\n" + + "created with `aws configure` will be used.", + + "shared_credentials_file": "The path to the shared credentials file. If not set\n" + + "this defaults to ~/.aws/credentials.", + + "token": "session token. A session token is only required if you are\n" + + "using temporary security credentials.", + + "max_retries": "The maximum number of times an AWS API request is\n" + + "being executed. If the API request still fails, an error is\n" + + "thrown.", + + "cloudformation_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", + + "cloudwatch_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", + + "cloudwatchevents_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", + + "cloudwatchlogs_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", + + "devicefarm_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", + + "dynamodb_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n" + + "It's typically used to connect to dynamodb-local.", + + "kinesis_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n" + + "It's typically used to connect to kinesalite.", + + "kms_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", + + "iam_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", + + "ec2_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", + + "elb_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", + + "rds_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", + + "s3_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", + + "sns_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", + + "sqs_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", + + "insecure": "Explicitly allow the provider to perform \"insecure\" SSL requests. If omitted," + + "default value is `false`", + + "skip_credentials_validation": "Skip the credentials validation via STS API. " + + "Used for AWS API implementations that do not have STS available/implemented.", + + "skip_get_ec2_platforms": "Skip getting the supported EC2 platforms. " + + "Used by users that don't have ec2:DescribeAccountAttributes permissions.", + + "skip_region_validation": "Skip static validation of region name. " + + "Used by users of alternative AWS-like APIs or users w/ access to regions that are not public (yet).", + + "skip_requesting_account_id": "Skip requesting the account ID. " + + "Used for AWS API implementations that do not have IAM/STS API and/or metadata API.", + + "skip_medatadata_api_check": "Skip the AWS Metadata API check. " + + "Used for AWS API implementations that do not have a metadata api endpoint.", + + "s3_force_path_style": "Set this to true to force the request to use path-style addressing,\n" + + "i.e., http://s3.amazonaws.com/BUCKET/KEY. By default, the S3 client will\n" + + "use virtual hosted bucket addressing when possible\n" + + "(http://BUCKET.s3.amazonaws.com/KEY). Specific to the Amazon S3 service.", + + "assume_role_role_arn": "The ARN of an IAM role to assume prior to making API calls.", + + "assume_role_session_name": "The session name to use when assuming the role. If omitted," + + " no session name is passed to the AssumeRole call.", + + "assume_role_external_id": "The external ID to use when assuming the role. If omitted," + + " no external ID is passed to the AssumeRole call.", + + "assume_role_policy": "The permissions applied when assuming a role. You cannot use," + + " this policy to grant further permissions that are in excess to those of the, " + + " role that is being assumed.", + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + config := Config{ + AccessKey: d.Get("access_key").(string), + SecretKey: d.Get("secret_key").(string), + Profile: d.Get("profile").(string), + CredsFilename: d.Get("shared_credentials_file").(string), + Token: d.Get("token").(string), + Region: d.Get("region").(string), + MaxRetries: d.Get("max_retries").(int), + Insecure: d.Get("insecure").(bool), + SkipCredsValidation: d.Get("skip_credentials_validation").(bool), + SkipGetEC2Platforms: d.Get("skip_get_ec2_platforms").(bool), + SkipRegionValidation: d.Get("skip_region_validation").(bool), + SkipRequestingAccountId: d.Get("skip_requesting_account_id").(bool), + SkipMetadataApiCheck: d.Get("skip_metadata_api_check").(bool), + S3ForcePathStyle: d.Get("s3_force_path_style").(bool), + } + + assumeRoleList := d.Get("assume_role").(*schema.Set).List() + if len(assumeRoleList) == 1 { + assumeRole := assumeRoleList[0].(map[string]interface{}) + config.AssumeRoleARN = assumeRole["role_arn"].(string) + config.AssumeRoleSessionName = assumeRole["session_name"].(string) + config.AssumeRoleExternalID = assumeRole["external_id"].(string) + + if v := assumeRole["policy"].(string); v != "" { + config.AssumeRolePolicy = v + } + + log.Printf("[INFO] assume_role configuration set: (ARN: %q, SessionID: %q, ExternalID: %q, Policy: %q)", + config.AssumeRoleARN, config.AssumeRoleSessionName, config.AssumeRoleExternalID, config.AssumeRolePolicy) + } else { + log.Printf("[INFO] No assume_role block read from configuration") + } + + endpointsSet := d.Get("endpoints").(*schema.Set) + + for _, endpointsSetI := range endpointsSet.List() { + endpoints := endpointsSetI.(map[string]interface{}) + config.CloudFormationEndpoint = endpoints["cloudformation"].(string) + config.CloudWatchEndpoint = endpoints["cloudwatch"].(string) + config.CloudWatchEventsEndpoint = endpoints["cloudwatchevents"].(string) + config.CloudWatchLogsEndpoint = endpoints["cloudwatchlogs"].(string) + config.DeviceFarmEndpoint = endpoints["devicefarm"].(string) + config.DynamoDBEndpoint = endpoints["dynamodb"].(string) + config.Ec2Endpoint = endpoints["ec2"].(string) + config.ElbEndpoint = endpoints["elb"].(string) + config.IamEndpoint = endpoints["iam"].(string) + config.KinesisEndpoint = endpoints["kinesis"].(string) + config.KmsEndpoint = endpoints["kms"].(string) + config.RdsEndpoint = endpoints["rds"].(string) + config.S3Endpoint = endpoints["s3"].(string) + config.SnsEndpoint = endpoints["sns"].(string) + config.SqsEndpoint = endpoints["sqs"].(string) + } + + if v, ok := d.GetOk("allowed_account_ids"); ok { + config.AllowedAccountIds = v.(*schema.Set).List() + } + + if v, ok := d.GetOk("forbidden_account_ids"); ok { + config.ForbiddenAccountIds = v.(*schema.Set).List() + } + + return config.Client() +} + +// This is a global MutexKV for use within this plugin. +var awsMutexKV = mutexkv.NewMutexKV() + +func assumeRoleSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "role_arn": { + Type: schema.TypeString, + Optional: true, + Description: descriptions["assume_role_role_arn"], + }, + + "session_name": { + Type: schema.TypeString, + Optional: true, + Description: descriptions["assume_role_session_name"], + }, + + "external_id": { + Type: schema.TypeString, + Optional: true, + Description: descriptions["assume_role_external_id"], + }, + + "policy": { + Type: schema.TypeString, + Optional: true, + Description: descriptions["assume_role_policy"], + }, + }, + }, + Set: assumeRoleToHash, + } +} + +func assumeRoleToHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["role_arn"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["session_name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["external_id"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["policy"].(string))) + return hashcode.String(buf.String()) +} + +func endpointsSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloudwatch": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["cloudwatch_endpoint"], + }, + "cloudwatchevents": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["cloudwatchevents_endpoint"], + }, + "cloudwatchlogs": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["cloudwatchlogs_endpoint"], + }, + "cloudformation": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["cloudformation_endpoint"], + }, + "devicefarm": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["devicefarm_endpoint"], + }, + "dynamodb": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["dynamodb_endpoint"], + }, + "iam": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["iam_endpoint"], + }, + + "ec2": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["ec2_endpoint"], + }, + + "elb": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["elb_endpoint"], + }, + "kinesis": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["kinesis_endpoint"], + }, + "kms": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["kms_endpoint"], + }, + "rds": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["rds_endpoint"], + }, + "s3": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["s3_endpoint"], + }, + "sns": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["sns_endpoint"], + }, + "sqs": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["sqs_endpoint"], + }, + }, + }, + Set: endpointsToHash, + } +} + +func endpointsToHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["cloudwatch"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["cloudwatchevents"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["cloudwatchlogs"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["cloudformation"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["devicefarm"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["dynamodb"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["iam"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["ec2"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["elb"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["kinesis"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["kms"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["rds"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["s3"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["sns"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["sqs"].(string))) + + return hashcode.String(buf.String()) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_alb.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_alb.go new file mode 100644 index 000000000..d1652c680 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_alb.go @@ -0,0 +1,497 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsAlb() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsAlbCreate, + Read: resourceAwsAlbRead, + Update: resourceAwsAlbUpdate, + Delete: resourceAwsAlbDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "arn_suffix": { + Type: schema.TypeString, + Computed: true, + }, + + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: validateElbName, + }, + + "name_prefix": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateElbNamePrefix, + }, + + "internal": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "security_groups": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + Optional: true, + Set: schema.HashString, + }, + + "subnets": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Required: true, + Set: schema.HashString, + }, + + "access_logs": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + }, + "prefix": { + Type: schema.TypeString, + Optional: true, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + }, + }, + }, + + "enable_deletion_protection": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "idle_timeout": { + Type: schema.TypeInt, + Optional: true, + Default: 60, + }, + + "ip_address_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + + "vpc_id": { + Type: schema.TypeString, + Computed: true, + }, + + "zone_id": { + Type: schema.TypeString, + Computed: true, + }, + + "dns_name": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsAlbCreate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbv2conn + + var name string + if v, ok := d.GetOk("name"); ok { + name = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + name = resource.PrefixedUniqueId(v.(string)) + } else { + name = resource.PrefixedUniqueId("tf-lb-") + } + d.Set("name", name) + + elbOpts := &elbv2.CreateLoadBalancerInput{ + Name: aws.String(name), + Tags: tagsFromMapELBv2(d.Get("tags").(map[string]interface{})), + } + + if scheme, ok := d.GetOk("internal"); ok && scheme.(bool) { + elbOpts.Scheme = aws.String("internal") + } + + if v, ok := d.GetOk("security_groups"); ok { + elbOpts.SecurityGroups = expandStringList(v.(*schema.Set).List()) + } + + if v, ok := d.GetOk("subnets"); ok { + elbOpts.Subnets = expandStringList(v.(*schema.Set).List()) + } + + if v, ok := d.GetOk("ip_address_type"); ok { + elbOpts.IpAddressType = aws.String(v.(string)) + } + + log.Printf("[DEBUG] ALB create configuration: %#v", elbOpts) + + resp, err := elbconn.CreateLoadBalancer(elbOpts) + if err != nil { + return errwrap.Wrapf("Error creating Application Load Balancer: {{err}}", err) + } + + if len(resp.LoadBalancers) != 1 { + return fmt.Errorf("No load balancers returned following creation of %s", d.Get("name").(string)) + } + + lb := resp.LoadBalancers[0] + d.SetId(*lb.LoadBalancerArn) + log.Printf("[INFO] ALB ID: %s", d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"provisioning", "failed"}, + Target: []string{"active"}, + Refresh: func() (interface{}, string, error) { + describeResp, err := elbconn.DescribeLoadBalancers(&elbv2.DescribeLoadBalancersInput{ + LoadBalancerArns: []*string{lb.LoadBalancerArn}, + }) + if err != nil { + return nil, "", err + } + + if len(describeResp.LoadBalancers) != 1 { + return nil, "", fmt.Errorf("No load balancers returned for %s", *lb.LoadBalancerArn) + } + dLb := describeResp.LoadBalancers[0] + + log.Printf("[INFO] ALB state: %s", *dLb.State.Code) + + return describeResp, *dLb.State.Code, nil + }, + Timeout: d.Timeout(schema.TimeoutCreate), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + return resourceAwsAlbUpdate(d, meta) +} + +func resourceAwsAlbRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbv2conn + albArn := d.Id() + + describeAlbOpts := &elbv2.DescribeLoadBalancersInput{ + LoadBalancerArns: []*string{aws.String(albArn)}, + } + + describeResp, err := elbconn.DescribeLoadBalancers(describeAlbOpts) + if err != nil { + if isLoadBalancerNotFound(err) { + // The ALB is gone now, so just remove it from the state + log.Printf("[WARN] ALB %s not found in AWS, removing from state", d.Id()) + d.SetId("") + return nil + } + + return errwrap.Wrapf("Error retrieving ALB: {{err}}", err) + } + if len(describeResp.LoadBalancers) != 1 { + return fmt.Errorf("Unable to find ALB: %#v", describeResp.LoadBalancers) + } + + return flattenAwsAlbResource(d, meta, describeResp.LoadBalancers[0]) +} + +func resourceAwsAlbUpdate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbv2conn + + if !d.IsNewResource() { + if err := setElbV2Tags(elbconn, d); err != nil { + return errwrap.Wrapf("Error Modifying Tags on ALB: {{err}}", err) + } + } + + attributes := make([]*elbv2.LoadBalancerAttribute, 0) + + if d.HasChange("access_logs") { + logs := d.Get("access_logs").([]interface{}) + if len(logs) == 1 { + log := logs[0].(map[string]interface{}) + + attributes = append(attributes, + &elbv2.LoadBalancerAttribute{ + Key: aws.String("access_logs.s3.enabled"), + Value: aws.String(strconv.FormatBool(log["enabled"].(bool))), + }, + &elbv2.LoadBalancerAttribute{ + Key: aws.String("access_logs.s3.bucket"), + Value: aws.String(log["bucket"].(string)), + }) + + if prefix, ok := log["prefix"]; ok { + attributes = append(attributes, &elbv2.LoadBalancerAttribute{ + Key: aws.String("access_logs.s3.prefix"), + Value: aws.String(prefix.(string)), + }) + } + } else if len(logs) == 0 { + attributes = append(attributes, &elbv2.LoadBalancerAttribute{ + Key: aws.String("access_logs.s3.enabled"), + Value: aws.String("false"), + }) + } + } + + if d.HasChange("enable_deletion_protection") { + attributes = append(attributes, &elbv2.LoadBalancerAttribute{ + Key: aws.String("deletion_protection.enabled"), + Value: aws.String(fmt.Sprintf("%t", d.Get("enable_deletion_protection").(bool))), + }) + } + + if d.HasChange("idle_timeout") { + attributes = append(attributes, &elbv2.LoadBalancerAttribute{ + Key: aws.String("idle_timeout.timeout_seconds"), + Value: aws.String(fmt.Sprintf("%d", d.Get("idle_timeout").(int))), + }) + } + + if len(attributes) != 0 { + input := &elbv2.ModifyLoadBalancerAttributesInput{ + LoadBalancerArn: aws.String(d.Id()), + Attributes: attributes, + } + + log.Printf("[DEBUG] ALB Modify Load Balancer Attributes Request: %#v", input) + _, err := elbconn.ModifyLoadBalancerAttributes(input) + if err != nil { + return fmt.Errorf("Failure configuring ALB attributes: %s", err) + } + } + + if d.HasChange("security_groups") { + sgs := expandStringList(d.Get("security_groups").(*schema.Set).List()) + + params := &elbv2.SetSecurityGroupsInput{ + LoadBalancerArn: aws.String(d.Id()), + SecurityGroups: sgs, + } + _, err := elbconn.SetSecurityGroups(params) + if err != nil { + return fmt.Errorf("Failure Setting ALB Security Groups: %s", err) + } + + } + + if d.HasChange("subnets") { + subnets := expandStringList(d.Get("subnets").(*schema.Set).List()) + + params := &elbv2.SetSubnetsInput{ + LoadBalancerArn: aws.String(d.Id()), + Subnets: subnets, + } + + _, err := elbconn.SetSubnets(params) + if err != nil { + return fmt.Errorf("Failure Setting ALB Subnets: %s", err) + } + } + + if d.HasChange("ip_address_type") { + + params := &elbv2.SetIpAddressTypeInput{ + LoadBalancerArn: aws.String(d.Id()), + IpAddressType: aws.String(d.Get("ip_address_type").(string)), + } + + _, err := elbconn.SetIpAddressType(params) + if err != nil { + return fmt.Errorf("Failure Setting ALB IP Address Type: %s", err) + } + + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"active", "provisioning", "failed"}, + Target: []string{"active"}, + Refresh: func() (interface{}, string, error) { + describeResp, err := elbconn.DescribeLoadBalancers(&elbv2.DescribeLoadBalancersInput{ + LoadBalancerArns: []*string{aws.String(d.Id())}, + }) + if err != nil { + return nil, "", err + } + + if len(describeResp.LoadBalancers) != 1 { + return nil, "", fmt.Errorf("No load balancers returned for %s", d.Id()) + } + dLb := describeResp.LoadBalancers[0] + + log.Printf("[INFO] ALB state: %s", *dLb.State.Code) + + return describeResp, *dLb.State.Code, nil + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + _, err := stateConf.WaitForState() + if err != nil { + return err + } + + return resourceAwsAlbRead(d, meta) +} + +func resourceAwsAlbDelete(d *schema.ResourceData, meta interface{}) error { + albconn := meta.(*AWSClient).elbv2conn + + log.Printf("[INFO] Deleting ALB: %s", d.Id()) + + // Destroy the load balancer + deleteElbOpts := elbv2.DeleteLoadBalancerInput{ + LoadBalancerArn: aws.String(d.Id()), + } + if _, err := albconn.DeleteLoadBalancer(&deleteElbOpts); err != nil { + return fmt.Errorf("Error deleting ALB: %s", err) + } + + return nil +} + +// flattenSubnetsFromAvailabilityZones creates a slice of strings containing the subnet IDs +// for the ALB based on the AvailabilityZones structure returned by the API. +func flattenSubnetsFromAvailabilityZones(availabilityZones []*elbv2.AvailabilityZone) []string { + var result []string + for _, az := range availabilityZones { + result = append(result, *az.SubnetId) + } + return result +} + +func albSuffixFromARN(arn *string) string { + if arn == nil { + return "" + } + + if arnComponents := regexp.MustCompile(`arn:.*:loadbalancer/(.*)`).FindAllStringSubmatch(*arn, -1); len(arnComponents) == 1 { + if len(arnComponents[0]) == 2 { + return arnComponents[0][1] + } + } + + return "" +} + +// flattenAwsAlbResource takes a *elbv2.LoadBalancer and populates all respective resource fields. +func flattenAwsAlbResource(d *schema.ResourceData, meta interface{}, alb *elbv2.LoadBalancer) error { + elbconn := meta.(*AWSClient).elbv2conn + + d.Set("arn", alb.LoadBalancerArn) + d.Set("arn_suffix", albSuffixFromARN(alb.LoadBalancerArn)) + d.Set("name", alb.LoadBalancerName) + d.Set("internal", (alb.Scheme != nil && *alb.Scheme == "internal")) + d.Set("security_groups", flattenStringList(alb.SecurityGroups)) + d.Set("subnets", flattenSubnetsFromAvailabilityZones(alb.AvailabilityZones)) + d.Set("vpc_id", alb.VpcId) + d.Set("zone_id", alb.CanonicalHostedZoneId) + d.Set("dns_name", alb.DNSName) + d.Set("ip_address_type", alb.IpAddressType) + + respTags, err := elbconn.DescribeTags(&elbv2.DescribeTagsInput{ + ResourceArns: []*string{alb.LoadBalancerArn}, + }) + if err != nil { + return errwrap.Wrapf("Error retrieving ALB Tags: {{err}}", err) + } + + var et []*elbv2.Tag + if len(respTags.TagDescriptions) > 0 { + et = respTags.TagDescriptions[0].Tags + } + d.Set("tags", tagsToMapELBv2(et)) + + attributesResp, err := elbconn.DescribeLoadBalancerAttributes(&elbv2.DescribeLoadBalancerAttributesInput{ + LoadBalancerArn: aws.String(d.Id()), + }) + if err != nil { + return errwrap.Wrapf("Error retrieving ALB Attributes: {{err}}", err) + } + + accessLogMap := map[string]interface{}{} + for _, attr := range attributesResp.Attributes { + switch *attr.Key { + case "access_logs.s3.enabled": + accessLogMap["enabled"] = *attr.Value + case "access_logs.s3.bucket": + accessLogMap["bucket"] = *attr.Value + case "access_logs.s3.prefix": + accessLogMap["prefix"] = *attr.Value + case "idle_timeout.timeout_seconds": + timeout, err := strconv.Atoi(*attr.Value) + if err != nil { + return errwrap.Wrapf("Error parsing ALB timeout: {{err}}", err) + } + log.Printf("[DEBUG] Setting ALB Timeout Seconds: %d", timeout) + d.Set("idle_timeout", timeout) + case "deletion_protection.enabled": + protectionEnabled := (*attr.Value) == "true" + log.Printf("[DEBUG] Setting ALB Deletion Protection Enabled: %t", protectionEnabled) + d.Set("enable_deletion_protection", protectionEnabled) + } + } + + log.Printf("[DEBUG] Setting ALB Access Logs: %#v", accessLogMap) + if accessLogMap["bucket"] != "" || accessLogMap["prefix"] != "" { + d.Set("access_logs", []interface{}{accessLogMap}) + } else { + d.Set("access_logs", []interface{}{}) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_alb_listener.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_alb_listener.go new file mode 100644 index 000000000..f94e3b1a1 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_alb_listener.go @@ -0,0 +1,284 @@ +package aws + +import ( + "errors" + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsAlbListener() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsAlbListenerCreate, + Read: resourceAwsAlbListenerRead, + Update: resourceAwsAlbListenerUpdate, + Delete: resourceAwsAlbListenerDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "load_balancer_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "port": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validateAwsAlbListenerPort, + }, + + "protocol": { + Type: schema.TypeString, + Optional: true, + Default: "HTTP", + StateFunc: func(v interface{}) string { + return strings.ToUpper(v.(string)) + }, + ValidateFunc: validateAwsAlbListenerProtocol, + }, + + "ssl_policy": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "certificate_arn": { + Type: schema.TypeString, + Optional: true, + }, + + "default_action": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target_group_arn": { + Type: schema.TypeString, + Required: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAwsAlbListenerActionType, + }, + }, + }, + }, + }, + } +} + +func resourceAwsAlbListenerCreate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbv2conn + + albArn := d.Get("load_balancer_arn").(string) + + params := &elbv2.CreateListenerInput{ + LoadBalancerArn: aws.String(albArn), + Port: aws.Int64(int64(d.Get("port").(int))), + Protocol: aws.String(d.Get("protocol").(string)), + } + + if sslPolicy, ok := d.GetOk("ssl_policy"); ok { + params.SslPolicy = aws.String(sslPolicy.(string)) + } + + if certificateArn, ok := d.GetOk("certificate_arn"); ok { + params.Certificates = make([]*elbv2.Certificate, 1) + params.Certificates[0] = &elbv2.Certificate{ + CertificateArn: aws.String(certificateArn.(string)), + } + } + + if defaultActions := d.Get("default_action").([]interface{}); len(defaultActions) == 1 { + params.DefaultActions = make([]*elbv2.Action, len(defaultActions)) + + for i, defaultAction := range defaultActions { + defaultActionMap := defaultAction.(map[string]interface{}) + + params.DefaultActions[i] = &elbv2.Action{ + TargetGroupArn: aws.String(defaultActionMap["target_group_arn"].(string)), + Type: aws.String(defaultActionMap["type"].(string)), + } + } + } + + var resp *elbv2.CreateListenerOutput + + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + var err error + log.Printf("[DEBUG] Creating ALB listener for ARN: %s", d.Get("load_balancer_arn").(string)) + resp, err = elbconn.CreateListener(params) + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "CertificateNotFound" { + log.Printf("[WARN] Got an error while trying to create ALB listener for ARN: %s: %s", albArn, err) + return resource.RetryableError(err) + } + } + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) + + if err != nil { + return errwrap.Wrapf("Error creating ALB Listener: {{err}}", err) + } + + if len(resp.Listeners) == 0 { + return errors.New("Error creating ALB Listener: no listeners returned in response") + } + + d.SetId(*resp.Listeners[0].ListenerArn) + + return resourceAwsAlbListenerRead(d, meta) +} + +func resourceAwsAlbListenerRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbv2conn + + resp, err := elbconn.DescribeListeners(&elbv2.DescribeListenersInput{ + ListenerArns: []*string{aws.String(d.Id())}, + }) + if err != nil { + if isListenerNotFound(err) { + log.Printf("[WARN] DescribeListeners - removing %s from state", d.Id()) + d.SetId("") + return nil + } + return errwrap.Wrapf("Error retrieving Listener: {{err}}", err) + } + + if len(resp.Listeners) != 1 { + return fmt.Errorf("Error retrieving Listener %q", d.Id()) + } + + listener := resp.Listeners[0] + + d.Set("arn", listener.ListenerArn) + d.Set("load_balancer_arn", listener.LoadBalancerArn) + d.Set("port", listener.Port) + d.Set("protocol", listener.Protocol) + d.Set("ssl_policy", listener.SslPolicy) + + if listener.Certificates != nil && len(listener.Certificates) == 1 { + d.Set("certificate_arn", listener.Certificates[0].CertificateArn) + } + + defaultActions := make([]map[string]interface{}, 0) + if listener.DefaultActions != nil && len(listener.DefaultActions) > 0 { + for _, defaultAction := range listener.DefaultActions { + action := map[string]interface{}{ + "target_group_arn": *defaultAction.TargetGroupArn, + "type": *defaultAction.Type, + } + defaultActions = append(defaultActions, action) + } + } + d.Set("default_action", defaultActions) + + return nil +} + +func resourceAwsAlbListenerUpdate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbv2conn + + params := &elbv2.ModifyListenerInput{ + ListenerArn: aws.String(d.Id()), + Port: aws.Int64(int64(d.Get("port").(int))), + Protocol: aws.String(d.Get("protocol").(string)), + } + + if sslPolicy, ok := d.GetOk("ssl_policy"); ok { + params.SslPolicy = aws.String(sslPolicy.(string)) + } + + if certificateArn, ok := d.GetOk("certificate_arn"); ok { + params.Certificates = make([]*elbv2.Certificate, 1) + params.Certificates[0] = &elbv2.Certificate{ + CertificateArn: aws.String(certificateArn.(string)), + } + } + + if defaultActions := d.Get("default_action").([]interface{}); len(defaultActions) == 1 { + params.DefaultActions = make([]*elbv2.Action, len(defaultActions)) + + for i, defaultAction := range defaultActions { + defaultActionMap := defaultAction.(map[string]interface{}) + + params.DefaultActions[i] = &elbv2.Action{ + TargetGroupArn: aws.String(defaultActionMap["target_group_arn"].(string)), + Type: aws.String(defaultActionMap["type"].(string)), + } + } + } + + _, err := elbconn.ModifyListener(params) + if err != nil { + return errwrap.Wrapf("Error modifying ALB Listener: {{err}}", err) + } + + return resourceAwsAlbListenerRead(d, meta) +} + +func resourceAwsAlbListenerDelete(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbv2conn + + _, err := elbconn.DeleteListener(&elbv2.DeleteListenerInput{ + ListenerArn: aws.String(d.Id()), + }) + if err != nil { + return errwrap.Wrapf("Error deleting Listener: {{err}}", err) + } + + return nil +} + +func validateAwsAlbListenerPort(v interface{}, k string) (ws []string, errors []error) { + port := v.(int) + if port < 1 || port > 65536 { + errors = append(errors, fmt.Errorf("%q must be a valid port number (1-65536)", k)) + } + return +} + +func validateAwsAlbListenerProtocol(v interface{}, k string) (ws []string, errors []error) { + value := strings.ToLower(v.(string)) + if value == "http" || value == "https" { + return + } + + errors = append(errors, fmt.Errorf("%q must be either %q or %q", k, "HTTP", "HTTPS")) + return +} + +func validateAwsAlbListenerActionType(v interface{}, k string) (ws []string, errors []error) { + value := strings.ToLower(v.(string)) + if value != "forward" { + errors = append(errors, fmt.Errorf("%q must have the value %q", k, "forward")) + } + return +} + +func isListenerNotFound(err error) bool { + elberr, ok := err.(awserr.Error) + return ok && elberr.Code() == "ListenerNotFound" +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_alb_listener_rule.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_alb_listener_rule.go new file mode 100644 index 000000000..21292753c --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_alb_listener_rule.go @@ -0,0 +1,293 @@ +package aws + +import ( + "errors" + "fmt" + "log" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsAlbListenerRule() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsAlbListenerRuleCreate, + Read: resourceAwsAlbListenerRuleRead, + Update: resourceAwsAlbListenerRuleUpdate, + Delete: resourceAwsAlbListenerRuleDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "listener_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "priority": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validateAwsAlbListenerRulePriority, + }, + "action": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target_group_arn": { + Type: schema.TypeString, + Required: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAwsAlbListenerActionType, + }, + }, + }, + }, + "condition": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAwsListenerRuleField, + }, + "values": { + Type: schema.TypeList, + MaxItems: 1, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsAlbListenerRuleCreate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbv2conn + + params := &elbv2.CreateRuleInput{ + ListenerArn: aws.String(d.Get("listener_arn").(string)), + Priority: aws.Int64(int64(d.Get("priority").(int))), + } + + actions := d.Get("action").([]interface{}) + params.Actions = make([]*elbv2.Action, len(actions)) + for i, action := range actions { + actionMap := action.(map[string]interface{}) + params.Actions[i] = &elbv2.Action{ + TargetGroupArn: aws.String(actionMap["target_group_arn"].(string)), + Type: aws.String(actionMap["type"].(string)), + } + } + + conditions := d.Get("condition").([]interface{}) + params.Conditions = make([]*elbv2.RuleCondition, len(conditions)) + for i, condition := range conditions { + conditionMap := condition.(map[string]interface{}) + values := conditionMap["values"].([]interface{}) + params.Conditions[i] = &elbv2.RuleCondition{ + Field: aws.String(conditionMap["field"].(string)), + Values: make([]*string, len(values)), + } + for j, value := range values { + params.Conditions[i].Values[j] = aws.String(value.(string)) + } + } + + resp, err := elbconn.CreateRule(params) + if err != nil { + return errwrap.Wrapf("Error creating ALB Listener Rule: {{err}}", err) + } + + if len(resp.Rules) == 0 { + return errors.New("Error creating ALB Listener Rule: no rules returned in response") + } + + d.SetId(*resp.Rules[0].RuleArn) + + return resourceAwsAlbListenerRuleRead(d, meta) +} + +func resourceAwsAlbListenerRuleRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbv2conn + + resp, err := elbconn.DescribeRules(&elbv2.DescribeRulesInput{ + RuleArns: []*string{aws.String(d.Id())}, + }) + if err != nil { + if isRuleNotFound(err) { + log.Printf("[WARN] DescribeRules - removing %s from state", d.Id()) + d.SetId("") + return nil + } + return errwrap.Wrapf(fmt.Sprintf("Error retrieving Rules for listener %s: {{err}}", d.Id()), err) + } + + if len(resp.Rules) != 1 { + return fmt.Errorf("Error retrieving Rule %q", d.Id()) + } + + rule := resp.Rules[0] + + d.Set("arn", rule.RuleArn) + // Rules are evaluated in priority order, from the lowest value to the highest value. The default rule has the lowest priority. + if *rule.Priority == "default" { + d.Set("priority", 99999) + } else { + if priority, err := strconv.Atoi(*rule.Priority); err != nil { + return errwrap.Wrapf("Cannot convert rule priority %q to int: {{err}}", err) + } else { + d.Set("priority", priority) + } + } + + actions := make([]interface{}, len(rule.Actions)) + for i, action := range rule.Actions { + actionMap := make(map[string]interface{}) + actionMap["target_group_arn"] = *action.TargetGroupArn + actionMap["type"] = *action.Type + actions[i] = actionMap + } + d.Set("action", actions) + + conditions := make([]interface{}, len(rule.Conditions)) + for i, condition := range rule.Conditions { + conditionMap := make(map[string]interface{}) + conditionMap["field"] = *condition.Field + conditionValues := make([]string, len(condition.Values)) + for k, value := range condition.Values { + conditionValues[k] = *value + } + conditionMap["values"] = conditionValues + conditions[i] = conditionMap + } + d.Set("condition", conditions) + + return nil +} + +func resourceAwsAlbListenerRuleUpdate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbv2conn + + d.Partial(true) + + if d.HasChange("priority") { + params := &elbv2.SetRulePrioritiesInput{ + RulePriorities: []*elbv2.RulePriorityPair{ + { + RuleArn: aws.String(d.Id()), + Priority: aws.Int64(int64(d.Get("priority").(int))), + }, + }, + } + + _, err := elbconn.SetRulePriorities(params) + if err != nil { + return err + } + + d.SetPartial("priority") + } + + requestUpdate := false + params := &elbv2.ModifyRuleInput{ + RuleArn: aws.String(d.Id()), + } + + if d.HasChange("action") { + actions := d.Get("action").([]interface{}) + params.Actions = make([]*elbv2.Action, len(actions)) + for i, action := range actions { + actionMap := action.(map[string]interface{}) + params.Actions[i] = &elbv2.Action{ + TargetGroupArn: aws.String(actionMap["target_group_arn"].(string)), + Type: aws.String(actionMap["type"].(string)), + } + } + requestUpdate = true + d.SetPartial("action") + } + + if d.HasChange("condition") { + conditions := d.Get("condition").([]interface{}) + params.Conditions = make([]*elbv2.RuleCondition, len(conditions)) + for i, condition := range conditions { + conditionMap := condition.(map[string]interface{}) + values := conditionMap["values"].([]interface{}) + params.Conditions[i] = &elbv2.RuleCondition{ + Field: aws.String(conditionMap["field"].(string)), + Values: make([]*string, len(values)), + } + for j, value := range values { + params.Conditions[i].Values[j] = aws.String(value.(string)) + } + } + requestUpdate = true + d.SetPartial("condition") + } + + if requestUpdate { + resp, err := elbconn.ModifyRule(params) + if err != nil { + return errwrap.Wrapf("Error modifying ALB Listener Rule: {{err}}", err) + } + + if len(resp.Rules) == 0 { + return errors.New("Error modifying creating ALB Listener Rule: no rules returned in response") + } + } + + d.Partial(false) + + return resourceAwsAlbListenerRuleRead(d, meta) +} + +func resourceAwsAlbListenerRuleDelete(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbv2conn + + _, err := elbconn.DeleteRule(&elbv2.DeleteRuleInput{ + RuleArn: aws.String(d.Id()), + }) + if err != nil && !isRuleNotFound(err) { + return errwrap.Wrapf("Error deleting ALB Listener Rule: {{err}}", err) + } + return nil +} + +func validateAwsAlbListenerRulePriority(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 1 || value > 99999 { + errors = append(errors, fmt.Errorf("%q must be in the range 1-99999", k)) + } + return +} + +func validateAwsListenerRuleField(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 64 { + errors = append(errors, fmt.Errorf("%q must be a maximum of 64 characters", k)) + } + return +} + +func isRuleNotFound(err error) bool { + elberr, ok := err.(awserr.Error) + return ok && elberr.Code() == "RuleNotFound" +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_alb_target_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_alb_target_group.go new file mode 100644 index 000000000..cd4256839 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_alb_target_group.go @@ -0,0 +1,538 @@ +package aws + +import ( + "errors" + "fmt" + "log" + "regexp" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsAlbTargetGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsAlbTargetGroupCreate, + Read: resourceAwsAlbTargetGroupRead, + Update: resourceAwsAlbTargetGroupUpdate, + Delete: resourceAwsAlbTargetGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "arn_suffix": { + Type: schema.TypeString, + Computed: true, + }, + + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: validateAwsAlbTargetGroupName, + }, + "name_prefix": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateAwsAlbTargetGroupNamePrefix, + }, + + "port": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validateAwsAlbTargetGroupPort, + }, + + "protocol": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateAwsAlbTargetGroupProtocol, + }, + + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "deregistration_delay": { + Type: schema.TypeInt, + Optional: true, + Default: 300, + ValidateFunc: validateAwsAlbTargetGroupDeregistrationDelay, + }, + + "stickiness": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAwsAlbTargetGroupStickinessType, + }, + "cookie_duration": { + Type: schema.TypeInt, + Optional: true, + Default: 86400, + ValidateFunc: validateAwsAlbTargetGroupStickinessCookieDuration, + }, + }, + }, + }, + + "health_check": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "interval": { + Type: schema.TypeInt, + Optional: true, + Default: 30, + }, + + "path": { + Type: schema.TypeString, + Optional: true, + Default: "/", + ValidateFunc: validateAwsAlbTargetGroupHealthCheckPath, + }, + + "port": { + Type: schema.TypeString, + Optional: true, + Default: "traffic-port", + ValidateFunc: validateAwsAlbTargetGroupHealthCheckPort, + }, + + "protocol": { + Type: schema.TypeString, + Optional: true, + Default: "HTTP", + StateFunc: func(v interface{}) string { + return strings.ToUpper(v.(string)) + }, + ValidateFunc: validateAwsAlbTargetGroupHealthCheckProtocol, + }, + + "timeout": { + Type: schema.TypeInt, + Optional: true, + Default: 5, + ValidateFunc: validateAwsAlbTargetGroupHealthCheckTimeout, + }, + + "healthy_threshold": { + Type: schema.TypeInt, + Optional: true, + Default: 5, + ValidateFunc: validateAwsAlbTargetGroupHealthCheckHealthyThreshold, + }, + + "matcher": { + Type: schema.TypeString, + Optional: true, + Default: "200", + }, + + "unhealthy_threshold": { + Type: schema.TypeInt, + Optional: true, + Default: 2, + ValidateFunc: validateAwsAlbTargetGroupHealthCheckHealthyThreshold, + }, + }, + }, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsAlbTargetGroupCreate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbv2conn + + var groupName string + if v, ok := d.GetOk("name"); ok { + groupName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + groupName = resource.PrefixedUniqueId(v.(string)) + } else { + groupName = resource.PrefixedUniqueId("tf-") + } + + params := &elbv2.CreateTargetGroupInput{ + Name: aws.String(groupName), + Port: aws.Int64(int64(d.Get("port").(int))), + Protocol: aws.String(d.Get("protocol").(string)), + VpcId: aws.String(d.Get("vpc_id").(string)), + } + + if healthChecks := d.Get("health_check").([]interface{}); len(healthChecks) == 1 { + healthCheck := healthChecks[0].(map[string]interface{}) + + params.HealthCheckIntervalSeconds = aws.Int64(int64(healthCheck["interval"].(int))) + params.HealthCheckPath = aws.String(healthCheck["path"].(string)) + params.HealthCheckPort = aws.String(healthCheck["port"].(string)) + params.HealthCheckProtocol = aws.String(healthCheck["protocol"].(string)) + params.HealthCheckTimeoutSeconds = aws.Int64(int64(healthCheck["timeout"].(int))) + params.HealthyThresholdCount = aws.Int64(int64(healthCheck["healthy_threshold"].(int))) + params.UnhealthyThresholdCount = aws.Int64(int64(healthCheck["unhealthy_threshold"].(int))) + params.Matcher = &elbv2.Matcher{ + HttpCode: aws.String(healthCheck["matcher"].(string)), + } + } + + resp, err := elbconn.CreateTargetGroup(params) + if err != nil { + return errwrap.Wrapf("Error creating ALB Target Group: {{err}}", err) + } + + if len(resp.TargetGroups) == 0 { + return errors.New("Error creating ALB Target Group: no groups returned in response") + } + + targetGroupArn := resp.TargetGroups[0].TargetGroupArn + d.SetId(*targetGroupArn) + + return resourceAwsAlbTargetGroupUpdate(d, meta) +} + +func resourceAwsAlbTargetGroupRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbv2conn + + resp, err := elbconn.DescribeTargetGroups(&elbv2.DescribeTargetGroupsInput{ + TargetGroupArns: []*string{aws.String(d.Id())}, + }) + if err != nil { + if isTargetGroupNotFound(err) { + log.Printf("[DEBUG] DescribeTargetGroups - removing %s from state", d.Id()) + d.SetId("") + return nil + } + return errwrap.Wrapf("Error retrieving Target Group: {{err}}", err) + } + + if len(resp.TargetGroups) != 1 { + return fmt.Errorf("Error retrieving Target Group %q", d.Id()) + } + + targetGroup := resp.TargetGroups[0] + + d.Set("arn", targetGroup.TargetGroupArn) + d.Set("arn_suffix", albTargetGroupSuffixFromARN(targetGroup.TargetGroupArn)) + d.Set("name", targetGroup.TargetGroupName) + d.Set("port", targetGroup.Port) + d.Set("protocol", targetGroup.Protocol) + d.Set("vpc_id", targetGroup.VpcId) + + healthCheck := make(map[string]interface{}) + healthCheck["interval"] = *targetGroup.HealthCheckIntervalSeconds + healthCheck["path"] = *targetGroup.HealthCheckPath + healthCheck["port"] = *targetGroup.HealthCheckPort + healthCheck["protocol"] = *targetGroup.HealthCheckProtocol + healthCheck["timeout"] = *targetGroup.HealthCheckTimeoutSeconds + healthCheck["healthy_threshold"] = *targetGroup.HealthyThresholdCount + healthCheck["unhealthy_threshold"] = *targetGroup.UnhealthyThresholdCount + healthCheck["matcher"] = *targetGroup.Matcher.HttpCode + d.Set("health_check", []interface{}{healthCheck}) + + attrResp, err := elbconn.DescribeTargetGroupAttributes(&elbv2.DescribeTargetGroupAttributesInput{ + TargetGroupArn: aws.String(d.Id()), + }) + if err != nil { + return errwrap.Wrapf("Error retrieving Target Group Attributes: {{err}}", err) + } + + stickinessMap := map[string]interface{}{} + for _, attr := range attrResp.Attributes { + switch *attr.Key { + case "stickiness.enabled": + enabled, err := strconv.ParseBool(*attr.Value) + if err != nil { + return fmt.Errorf("Error converting stickiness.enabled to bool: %s", *attr.Value) + } + stickinessMap["enabled"] = enabled + case "stickiness.type": + stickinessMap["type"] = *attr.Value + case "stickiness.lb_cookie.duration_seconds": + duration, err := strconv.Atoi(*attr.Value) + if err != nil { + return fmt.Errorf("Error converting stickiness.lb_cookie.duration_seconds to int: %s", *attr.Value) + } + stickinessMap["cookie_duration"] = duration + case "deregistration_delay.timeout_seconds": + timeout, err := strconv.Atoi(*attr.Value) + if err != nil { + return fmt.Errorf("Error converting deregistration_delay.timeout_seconds to int: %s", *attr.Value) + } + d.Set("deregistration_delay", timeout) + } + } + + if err := d.Set("stickiness", []interface{}{stickinessMap}); err != nil { + return err + } + + tagsResp, err := elbconn.DescribeTags(&elbv2.DescribeTagsInput{ + ResourceArns: []*string{aws.String(d.Id())}, + }) + if err != nil { + return errwrap.Wrapf("Error retrieving Target Group Tags: {{err}}", err) + } + for _, t := range tagsResp.TagDescriptions { + if *t.ResourceArn == d.Id() { + if err := d.Set("tags", tagsToMapELBv2(t.Tags)); err != nil { + return err + } + } + } + + return nil +} + +func resourceAwsAlbTargetGroupUpdate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbv2conn + + if err := setElbV2Tags(elbconn, d); err != nil { + return errwrap.Wrapf("Error Modifying Tags on ALB Target Group: {{err}}", err) + } + + if d.HasChange("health_check") { + healthChecks := d.Get("health_check").([]interface{}) + + var params *elbv2.ModifyTargetGroupInput + if len(healthChecks) == 1 { + healthCheck := healthChecks[0].(map[string]interface{}) + + params = &elbv2.ModifyTargetGroupInput{ + TargetGroupArn: aws.String(d.Id()), + HealthCheckIntervalSeconds: aws.Int64(int64(healthCheck["interval"].(int))), + HealthCheckPath: aws.String(healthCheck["path"].(string)), + HealthCheckPort: aws.String(healthCheck["port"].(string)), + HealthCheckProtocol: aws.String(healthCheck["protocol"].(string)), + HealthCheckTimeoutSeconds: aws.Int64(int64(healthCheck["timeout"].(int))), + HealthyThresholdCount: aws.Int64(int64(healthCheck["healthy_threshold"].(int))), + UnhealthyThresholdCount: aws.Int64(int64(healthCheck["unhealthy_threshold"].(int))), + Matcher: &elbv2.Matcher{ + HttpCode: aws.String(healthCheck["matcher"].(string)), + }, + } + } else { + params = &elbv2.ModifyTargetGroupInput{ + TargetGroupArn: aws.String(d.Id()), + } + } + + _, err := elbconn.ModifyTargetGroup(params) + if err != nil { + return errwrap.Wrapf("Error modifying Target Group: {{err}}", err) + } + } + + var attrs []*elbv2.TargetGroupAttribute + + if d.HasChange("deregistration_delay") { + attrs = append(attrs, &elbv2.TargetGroupAttribute{ + Key: aws.String("deregistration_delay.timeout_seconds"), + Value: aws.String(fmt.Sprintf("%d", d.Get("deregistration_delay").(int))), + }) + } + + if d.HasChange("stickiness") { + stickinessBlocks := d.Get("stickiness").([]interface{}) + if len(stickinessBlocks) == 1 { + stickiness := stickinessBlocks[0].(map[string]interface{}) + + attrs = append(attrs, + &elbv2.TargetGroupAttribute{ + Key: aws.String("stickiness.enabled"), + Value: aws.String(strconv.FormatBool(stickiness["enabled"].(bool))), + }, + &elbv2.TargetGroupAttribute{ + Key: aws.String("stickiness.type"), + Value: aws.String(stickiness["type"].(string)), + }, + &elbv2.TargetGroupAttribute{ + Key: aws.String("stickiness.lb_cookie.duration_seconds"), + Value: aws.String(fmt.Sprintf("%d", stickiness["cookie_duration"].(int))), + }) + } else if len(stickinessBlocks) == 0 { + attrs = append(attrs, &elbv2.TargetGroupAttribute{ + Key: aws.String("stickiness.enabled"), + Value: aws.String("false"), + }) + } + } + + if len(attrs) > 0 { + params := &elbv2.ModifyTargetGroupAttributesInput{ + TargetGroupArn: aws.String(d.Id()), + Attributes: attrs, + } + + _, err := elbconn.ModifyTargetGroupAttributes(params) + if err != nil { + return errwrap.Wrapf("Error modifying Target Group Attributes: {{err}}", err) + } + } + + return resourceAwsAlbTargetGroupRead(d, meta) +} + +func resourceAwsAlbTargetGroupDelete(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbv2conn + + _, err := elbconn.DeleteTargetGroup(&elbv2.DeleteTargetGroupInput{ + TargetGroupArn: aws.String(d.Id()), + }) + if err != nil { + return errwrap.Wrapf("Error deleting Target Group: {{err}}", err) + } + + return nil +} + +func isTargetGroupNotFound(err error) bool { + elberr, ok := err.(awserr.Error) + return ok && elberr.Code() == "TargetGroupNotFound" +} + +func validateAwsAlbTargetGroupHealthCheckPath(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 1024 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 1024 characters: %q", k, value)) + } + return +} + +func validateAwsAlbTargetGroupHealthCheckPort(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if value == "traffic-port" { + return + } + + port, err := strconv.Atoi(value) + if err != nil { + errors = append(errors, fmt.Errorf("%q must be a valid port number (1-65536) or %q", k, "traffic-port")) + } + + if port < 1 || port > 65536 { + errors = append(errors, fmt.Errorf("%q must be a valid port number (1-65536) or %q", k, "traffic-port")) + } + + return +} + +func validateAwsAlbTargetGroupHealthCheckHealthyThreshold(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 2 || value > 10 { + errors = append(errors, fmt.Errorf("%q must be an integer between 2 and 10", k)) + } + return +} + +func validateAwsAlbTargetGroupHealthCheckTimeout(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 2 || value > 60 { + errors = append(errors, fmt.Errorf("%q must be an integer between 2 and 60", k)) + } + return +} + +func validateAwsAlbTargetGroupHealthCheckProtocol(v interface{}, k string) (ws []string, errors []error) { + value := strings.ToLower(v.(string)) + if value == "http" || value == "https" { + return + } + + errors = append(errors, fmt.Errorf("%q must be either %q or %q", k, "HTTP", "HTTPS")) + return +} + +func validateAwsAlbTargetGroupPort(v interface{}, k string) (ws []string, errors []error) { + port := v.(int) + if port < 1 || port > 65536 { + errors = append(errors, fmt.Errorf("%q must be a valid port number (1-65536)", k)) + } + return +} + +func validateAwsAlbTargetGroupProtocol(v interface{}, k string) (ws []string, errors []error) { + protocol := strings.ToLower(v.(string)) + if protocol == "http" || protocol == "https" { + return + } + + errors = append(errors, fmt.Errorf("%q must be either %q or %q", k, "HTTP", "HTTPS")) + return +} + +func validateAwsAlbTargetGroupDeregistrationDelay(v interface{}, k string) (ws []string, errors []error) { + delay := v.(int) + if delay < 0 || delay > 3600 { + errors = append(errors, fmt.Errorf("%q must be in the range 0-3600 seconds", k)) + } + return +} + +func validateAwsAlbTargetGroupStickinessType(v interface{}, k string) (ws []string, errors []error) { + stickinessType := v.(string) + if stickinessType != "lb_cookie" { + errors = append(errors, fmt.Errorf("%q must have the value %q", k, "lb_cookie")) + } + return +} + +func validateAwsAlbTargetGroupStickinessCookieDuration(v interface{}, k string) (ws []string, errors []error) { + duration := v.(int) + if duration < 1 || duration > 604800 { + errors = append(errors, fmt.Errorf("%q must be a between 1 second and 1 week (1-604800 seconds))", k)) + } + return +} + +func albTargetGroupSuffixFromARN(arn *string) string { + if arn == nil { + return "" + } + + if arnComponents := regexp.MustCompile(`arn:.*:targetgroup/(.*)`).FindAllStringSubmatch(*arn, -1); len(arnComponents) == 1 { + if len(arnComponents[0]) == 2 { + return fmt.Sprintf("targetgroup/%s", arnComponents[0][1]) + } + } + + return "" +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_alb_target_group_attachment.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_alb_target_group_attachment.go new file mode 100644 index 000000000..55a3b7392 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_alb_target_group_attachment.go @@ -0,0 +1,141 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsAlbTargetGroupAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsAlbAttachmentCreate, + Read: resourceAwsAlbAttachmentRead, + Delete: resourceAwsAlbAttachmentDelete, + + Schema: map[string]*schema.Schema{ + "target_group_arn": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + + "target_id": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + + "port": { + Type: schema.TypeInt, + ForceNew: true, + Optional: true, + }, + }, + } +} + +func resourceAwsAlbAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbv2conn + + target := &elbv2.TargetDescription{ + Id: aws.String(d.Get("target_id").(string)), + } + + if v, ok := d.GetOk("port"); ok { + target.Port = aws.Int64(int64(v.(int))) + } + + params := &elbv2.RegisterTargetsInput{ + TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), + Targets: []*elbv2.TargetDescription{target}, + } + + log.Printf("[INFO] Registering Target %s with Target Group %s", d.Get("target_id").(string), + d.Get("target_group_arn").(string)) + + _, err := elbconn.RegisterTargets(params) + if err != nil { + return errwrap.Wrapf("Error registering targets with target group: {{err}}", err) + } + + d.SetId(resource.PrefixedUniqueId(fmt.Sprintf("%s-", d.Get("target_group_arn")))) + + return nil +} + +func resourceAwsAlbAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbv2conn + + target := &elbv2.TargetDescription{ + Id: aws.String(d.Get("target_id").(string)), + } + + if v, ok := d.GetOk("port"); ok { + target.Port = aws.Int64(int64(v.(int))) + } + + params := &elbv2.DeregisterTargetsInput{ + TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), + Targets: []*elbv2.TargetDescription{target}, + } + + _, err := elbconn.DeregisterTargets(params) + if err != nil && !isTargetGroupNotFound(err) { + return errwrap.Wrapf("Error deregistering Targets: {{err}}", err) + } + + d.SetId("") + + return nil +} + +// resourceAwsAlbAttachmentRead requires all of the fields in order to describe the correct +// target, so there is no work to do beyond ensuring that the target and group still exist. +func resourceAwsAlbAttachmentRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbv2conn + + target := &elbv2.TargetDescription{ + Id: aws.String(d.Get("target_id").(string)), + } + + if v, ok := d.GetOk("port"); ok { + target.Port = aws.Int64(int64(v.(int))) + } + + resp, err := elbconn.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{ + TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), + Targets: []*elbv2.TargetDescription{target}, + }) + if err != nil { + if isTargetGroupNotFound(err) { + log.Printf("[WARN] Target group does not exist, removing target attachment %s", d.Id()) + d.SetId("") + return nil + } + if isInvalidTarget(err) { + log.Printf("[WARN] Target does not exist, removing target attachment %s", d.Id()) + d.SetId("") + return nil + } + return errwrap.Wrapf("Error reading Target Health: {{err}}", err) + } + + if len(resp.TargetHealthDescriptions) != 1 { + log.Printf("[WARN] Target does not exist, removing target attachment %s", d.Id()) + d.SetId("") + return nil + } + + return nil +} + +func isInvalidTarget(err error) bool { + elberr, ok := err.(awserr.Error) + return ok && elberr.Code() == "InvalidTarget" +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami.go new file mode 100644 index 000000000..d01c402ed --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami.go @@ -0,0 +1,562 @@ +package aws + +import ( + "bytes" + "errors" + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +const ( + AWSAMIRetryTimeout = 40 * time.Minute + AWSAMIDeleteRetryTimeout = 90 * time.Minute + AWSAMIRetryDelay = 5 * time.Second + AWSAMIRetryMinTimeout = 3 * time.Second +) + +func resourceAwsAmi() *schema.Resource { + // Our schema is shared also with aws_ami_copy and aws_ami_from_instance + resourceSchema := resourceAwsAmiCommonSchema(false) + + return &schema.Resource{ + Create: resourceAwsAmiCreate, + + Schema: resourceSchema, + + // The Read, Update and Delete operations are shared with aws_ami_copy + // and aws_ami_from_instance, since they differ only in how the image + // is created. + Read: resourceAwsAmiRead, + Update: resourceAwsAmiUpdate, + Delete: resourceAwsAmiDelete, + } +} + +func resourceAwsAmiCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).ec2conn + + req := &ec2.RegisterImageInput{ + Name: aws.String(d.Get("name").(string)), + Description: aws.String(d.Get("description").(string)), + Architecture: aws.String(d.Get("architecture").(string)), + ImageLocation: aws.String(d.Get("image_location").(string)), + RootDeviceName: aws.String(d.Get("root_device_name").(string)), + SriovNetSupport: aws.String(d.Get("sriov_net_support").(string)), + VirtualizationType: aws.String(d.Get("virtualization_type").(string)), + } + + if kernelId := d.Get("kernel_id").(string); kernelId != "" { + req.KernelId = aws.String(kernelId) + } + if ramdiskId := d.Get("ramdisk_id").(string); ramdiskId != "" { + req.RamdiskId = aws.String(ramdiskId) + } + + ebsBlockDevsSet := d.Get("ebs_block_device").(*schema.Set) + ephemeralBlockDevsSet := d.Get("ephemeral_block_device").(*schema.Set) + for _, ebsBlockDevI := range ebsBlockDevsSet.List() { + ebsBlockDev := ebsBlockDevI.(map[string]interface{}) + blockDev := &ec2.BlockDeviceMapping{ + DeviceName: aws.String(ebsBlockDev["device_name"].(string)), + Ebs: &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(ebsBlockDev["delete_on_termination"].(bool)), + VolumeType: aws.String(ebsBlockDev["volume_type"].(string)), + }, + } + if iops, ok := ebsBlockDev["iops"]; ok { + if iop := iops.(int); iop != 0 { + blockDev.Ebs.Iops = aws.Int64(int64(iop)) + } + } + if size, ok := ebsBlockDev["volume_size"]; ok { + if s := size.(int); s != 0 { + blockDev.Ebs.VolumeSize = aws.Int64(int64(s)) + } + } + encrypted := ebsBlockDev["encrypted"].(bool) + if snapshotId := ebsBlockDev["snapshot_id"].(string); snapshotId != "" { + blockDev.Ebs.SnapshotId = aws.String(snapshotId) + if encrypted { + return errors.New("can't set both 'snapshot_id' and 'encrypted'") + } + } else if encrypted { + blockDev.Ebs.Encrypted = aws.Bool(true) + } + req.BlockDeviceMappings = append(req.BlockDeviceMappings, blockDev) + } + for _, ephemeralBlockDevI := range ephemeralBlockDevsSet.List() { + ephemeralBlockDev := ephemeralBlockDevI.(map[string]interface{}) + blockDev := &ec2.BlockDeviceMapping{ + DeviceName: aws.String(ephemeralBlockDev["device_name"].(string)), + VirtualName: aws.String(ephemeralBlockDev["virtual_name"].(string)), + } + req.BlockDeviceMappings = append(req.BlockDeviceMappings, blockDev) + } + + res, err := client.RegisterImage(req) + if err != nil { + return err + } + + id := *res.ImageId + d.SetId(id) + d.Partial(true) // make sure we record the id even if the rest of this gets interrupted + d.Set("id", id) + d.Set("manage_ebs_block_devices", false) + d.SetPartial("id") + d.SetPartial("manage_ebs_block_devices") + d.Partial(false) + + _, err = resourceAwsAmiWaitForAvailable(id, client) + if err != nil { + return err + } + + return resourceAwsAmiUpdate(d, meta) +} + +func resourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).ec2conn + id := d.Id() + + req := &ec2.DescribeImagesInput{ + ImageIds: []*string{aws.String(id)}, + } + + res, err := client.DescribeImages(req) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidAMIID.NotFound" { + log.Printf("[DEBUG] %s no longer exists, so we'll drop it from the state", id) + d.SetId("") + return nil + } + + return err + } + + if len(res.Images) != 1 { + d.SetId("") + return nil + } + + image := res.Images[0] + state := *image.State + + if state == "pending" { + // This could happen if a user manually adds an image we didn't create + // to the state. We'll wait for the image to become available + // before we continue. We should never take this branch in normal + // circumstances since we would've waited for availability during + // the "Create" step. + image, err = resourceAwsAmiWaitForAvailable(id, client) + if err != nil { + return err + } + state = *image.State + } + + if state == "deregistered" { + d.SetId("") + return nil + } + + if state != "available" { + return fmt.Errorf("AMI has become %s", state) + } + + d.Set("name", image.Name) + d.Set("description", image.Description) + d.Set("image_location", image.ImageLocation) + d.Set("architecture", image.Architecture) + d.Set("kernel_id", image.KernelId) + d.Set("ramdisk_id", image.RamdiskId) + d.Set("root_device_name", image.RootDeviceName) + d.Set("sriov_net_support", image.SriovNetSupport) + d.Set("virtualization_type", image.VirtualizationType) + + var ebsBlockDevs []map[string]interface{} + var ephemeralBlockDevs []map[string]interface{} + + for _, blockDev := range image.BlockDeviceMappings { + if blockDev.Ebs != nil { + ebsBlockDev := map[string]interface{}{ + "device_name": *blockDev.DeviceName, + "delete_on_termination": *blockDev.Ebs.DeleteOnTermination, + "encrypted": *blockDev.Ebs.Encrypted, + "iops": 0, + "volume_size": int(*blockDev.Ebs.VolumeSize), + "volume_type": *blockDev.Ebs.VolumeType, + } + if blockDev.Ebs.Iops != nil { + ebsBlockDev["iops"] = int(*blockDev.Ebs.Iops) + } + // The snapshot ID might not be set. + if blockDev.Ebs.SnapshotId != nil { + ebsBlockDev["snapshot_id"] = *blockDev.Ebs.SnapshotId + } + ebsBlockDevs = append(ebsBlockDevs, ebsBlockDev) + } else { + ephemeralBlockDevs = append(ephemeralBlockDevs, map[string]interface{}{ + "device_name": *blockDev.DeviceName, + "virtual_name": *blockDev.VirtualName, + }) + } + } + + d.Set("ebs_block_device", ebsBlockDevs) + d.Set("ephemeral_block_device", ephemeralBlockDevs) + + d.Set("tags", tagsToMap(image.Tags)) + + return nil +} + +func resourceAwsAmiUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).ec2conn + + d.Partial(true) + + if err := setTags(client, d); err != nil { + return err + } else { + d.SetPartial("tags") + } + + if d.Get("description").(string) != "" { + _, err := client.ModifyImageAttribute(&ec2.ModifyImageAttributeInput{ + ImageId: aws.String(d.Id()), + Description: &ec2.AttributeValue{ + Value: aws.String(d.Get("description").(string)), + }, + }) + if err != nil { + return err + } + d.SetPartial("description") + } + + d.Partial(false) + + return resourceAwsAmiRead(d, meta) +} + +func resourceAwsAmiDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).ec2conn + + req := &ec2.DeregisterImageInput{ + ImageId: aws.String(d.Id()), + } + + _, err := client.DeregisterImage(req) + if err != nil { + return err + } + + // If we're managing the EBS snapshots then we need to delete those too. + if d.Get("manage_ebs_snapshots").(bool) { + errs := map[string]error{} + ebsBlockDevsSet := d.Get("ebs_block_device").(*schema.Set) + req := &ec2.DeleteSnapshotInput{} + for _, ebsBlockDevI := range ebsBlockDevsSet.List() { + ebsBlockDev := ebsBlockDevI.(map[string]interface{}) + snapshotId := ebsBlockDev["snapshot_id"].(string) + if snapshotId != "" { + req.SnapshotId = aws.String(snapshotId) + _, err := client.DeleteSnapshot(req) + if err != nil { + errs[snapshotId] = err + } + } + } + + if len(errs) > 0 { + errParts := []string{"Errors while deleting associated EBS snapshots:"} + for snapshotId, err := range errs { + errParts = append(errParts, fmt.Sprintf("%s: %s", snapshotId, err)) + } + errParts = append(errParts, "These are no longer managed by Terraform and must be deleted manually.") + return errors.New(strings.Join(errParts, "\n")) + } + } + + // Verify that the image is actually removed, if not we need to wait for it to be removed + if err := resourceAwsAmiWaitForDestroy(d.Id(), client); err != nil { + return err + } + + // No error, ami was deleted successfully + d.SetId("") + return nil +} + +func AMIStateRefreshFunc(client *ec2.EC2, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + emptyResp := &ec2.DescribeImagesOutput{} + + resp, err := client.DescribeImages(&ec2.DescribeImagesInput{ImageIds: []*string{aws.String(id)}}) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidAMIID.NotFound" { + return emptyResp, "destroyed", nil + } else if resp != nil && len(resp.Images) == 0 { + return emptyResp, "destroyed", nil + } else { + return emptyResp, "", fmt.Errorf("Error on refresh: %+v", err) + } + } + + if resp == nil || resp.Images == nil || len(resp.Images) == 0 { + return emptyResp, "destroyed", nil + } + + // AMI is valid, so return it's state + return resp.Images[0], *resp.Images[0].State, nil + } +} + +func resourceAwsAmiWaitForDestroy(id string, client *ec2.EC2) error { + log.Printf("Waiting for AMI %s to be deleted...", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"available", "pending", "failed"}, + Target: []string{"destroyed"}, + Refresh: AMIStateRefreshFunc(client, id), + Timeout: AWSAMIDeleteRetryTimeout, + Delay: AWSAMIRetryDelay, + MinTimeout: AWSAMIRetryTimeout, + } + + _, err := stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for AMI (%s) to be deleted: %v", id, err) + } + + return nil +} + +func resourceAwsAmiWaitForAvailable(id string, client *ec2.EC2) (*ec2.Image, error) { + log.Printf("Waiting for AMI %s to become available...", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"available"}, + Refresh: AMIStateRefreshFunc(client, id), + Timeout: AWSAMIRetryTimeout, + Delay: AWSAMIRetryDelay, + MinTimeout: AWSAMIRetryMinTimeout, + } + + info, err := stateConf.WaitForState() + if err != nil { + return nil, fmt.Errorf("Error waiting for AMI (%s) to be ready: %v", id, err) + } + return info.(*ec2.Image), nil +} + +func resourceAwsAmiCommonSchema(computed bool) map[string]*schema.Schema { + // The "computed" parameter controls whether we're making + // a schema for an AMI that's been implicitly registered (aws_ami_copy, aws_ami_from_instance) + // or whether we're making a schema for an explicit registration (aws_ami). + // When set, almost every attribute is marked as "computed". + // When not set, only the "id" attribute is computed. + // "name" and "description" are never computed, since they must always + // be provided by the user. + + var virtualizationTypeDefault interface{} + var deleteEbsOnTerminationDefault interface{} + var sriovNetSupportDefault interface{} + var architectureDefault interface{} + var volumeTypeDefault interface{} + if !computed { + virtualizationTypeDefault = "paravirtual" + deleteEbsOnTerminationDefault = true + sriovNetSupportDefault = "simple" + architectureDefault = "x86_64" + volumeTypeDefault = "standard" + } + + return map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "image_location": { + Type: schema.TypeString, + Optional: !computed, + Computed: true, + ForceNew: !computed, + }, + "architecture": { + Type: schema.TypeString, + Optional: !computed, + Computed: computed, + ForceNew: !computed, + Default: architectureDefault, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "kernel_id": { + Type: schema.TypeString, + Optional: !computed, + Computed: computed, + ForceNew: !computed, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "ramdisk_id": { + Type: schema.TypeString, + Optional: !computed, + Computed: computed, + ForceNew: !computed, + }, + "root_device_name": { + Type: schema.TypeString, + Optional: !computed, + Computed: computed, + ForceNew: !computed, + }, + "sriov_net_support": { + Type: schema.TypeString, + Optional: !computed, + Computed: computed, + ForceNew: !computed, + Default: sriovNetSupportDefault, + }, + "virtualization_type": { + Type: schema.TypeString, + Optional: !computed, + Computed: computed, + ForceNew: !computed, + Default: virtualizationTypeDefault, + }, + + // The following block device attributes intentionally mimick the + // corresponding attributes on aws_instance, since they have the + // same meaning. + // However, we don't use root_block_device here because the constraint + // on which root device attributes can be overridden for an instance to + // not apply when registering an AMI. + + "ebs_block_device": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delete_on_termination": { + Type: schema.TypeBool, + Optional: !computed, + Default: deleteEbsOnTerminationDefault, + ForceNew: !computed, + Computed: computed, + }, + + "device_name": { + Type: schema.TypeString, + Required: !computed, + ForceNew: !computed, + Computed: computed, + }, + + "encrypted": { + Type: schema.TypeBool, + Optional: !computed, + Computed: computed, + ForceNew: !computed, + }, + + "iops": { + Type: schema.TypeInt, + Optional: !computed, + Computed: computed, + ForceNew: !computed, + }, + + "snapshot_id": { + Type: schema.TypeString, + Optional: !computed, + Computed: computed, + ForceNew: !computed, + }, + + "volume_size": { + Type: schema.TypeInt, + Optional: !computed, + Computed: true, + ForceNew: !computed, + }, + + "volume_type": { + Type: schema.TypeString, + Optional: !computed, + Computed: computed, + ForceNew: !computed, + Default: volumeTypeDefault, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["snapshot_id"].(string))) + return hashcode.String(buf.String()) + }, + }, + + "ephemeral_block_device": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "device_name": { + Type: schema.TypeString, + Required: !computed, + Computed: computed, + }, + + "virtual_name": { + Type: schema.TypeString, + Required: !computed, + Computed: computed, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["virtual_name"].(string))) + return hashcode.String(buf.String()) + }, + }, + + "tags": tagsSchema(), + + // Not a public attribute; used to let the aws_ami_copy and aws_ami_from_instance + // resources record that they implicitly created new EBS snapshots that we should + // now manage. Not set by aws_ami, since the snapshots used there are presumed to + // be independently managed. + "manage_ebs_snapshots": { + Type: schema.TypeBool, + Computed: true, + ForceNew: true, + }, + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_copy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_copy.go new file mode 100644 index 000000000..3452d5b52 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_copy.go @@ -0,0 +1,90 @@ +package aws + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsAmiCopy() *schema.Resource { + // Inherit all of the common AMI attributes from aws_ami, since we're + // implicitly creating an aws_ami resource. + resourceSchema := resourceAwsAmiCommonSchema(true) + + // Additional attributes unique to the copy operation. + resourceSchema["source_ami_id"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + } + resourceSchema["source_ami_region"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + } + + resourceSchema["encrypted"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + } + + resourceSchema["kms_key_id"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateArn, + } + + return &schema.Resource{ + Create: resourceAwsAmiCopyCreate, + + Schema: resourceSchema, + + // The remaining operations are shared with the generic aws_ami resource, + // since the aws_ami_copy resource only differs in how it's created. + Read: resourceAwsAmiRead, + Update: resourceAwsAmiUpdate, + Delete: resourceAwsAmiDelete, + } +} + +func resourceAwsAmiCopyCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).ec2conn + + req := &ec2.CopyImageInput{ + Name: aws.String(d.Get("name").(string)), + Description: aws.String(d.Get("description").(string)), + SourceImageId: aws.String(d.Get("source_ami_id").(string)), + SourceRegion: aws.String(d.Get("source_ami_region").(string)), + Encrypted: aws.Bool(d.Get("encrypted").(bool)), + } + + if v, ok := d.GetOk("kms_key_id"); ok { + req.KmsKeyId = aws.String(v.(string)) + } + + res, err := client.CopyImage(req) + if err != nil { + return err + } + + id := *res.ImageId + d.SetId(id) + d.Partial(true) // make sure we record the id even if the rest of this gets interrupted + d.Set("id", id) + d.Set("manage_ebs_snapshots", true) + d.SetPartial("id") + d.SetPartial("manage_ebs_snapshots") + d.Partial(false) + + _, err = resourceAwsAmiWaitForAvailable(id, client) + if err != nil { + return err + } + + return resourceAwsAmiUpdate(d, meta) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_from_instance.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_from_instance.go new file mode 100644 index 000000000..cc272d3c1 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_from_instance.go @@ -0,0 +1,70 @@ +package aws + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsAmiFromInstance() *schema.Resource { + // Inherit all of the common AMI attributes from aws_ami, since we're + // implicitly creating an aws_ami resource. + resourceSchema := resourceAwsAmiCommonSchema(true) + + // Additional attributes unique to the copy operation. + resourceSchema["source_instance_id"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + } + resourceSchema["snapshot_without_reboot"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + } + + return &schema.Resource{ + Create: resourceAwsAmiFromInstanceCreate, + + Schema: resourceSchema, + + // The remaining operations are shared with the generic aws_ami resource, + // since the aws_ami_copy resource only differs in how it's created. + Read: resourceAwsAmiRead, + Update: resourceAwsAmiUpdate, + Delete: resourceAwsAmiDelete, + } +} + +func resourceAwsAmiFromInstanceCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).ec2conn + + req := &ec2.CreateImageInput{ + Name: aws.String(d.Get("name").(string)), + Description: aws.String(d.Get("description").(string)), + InstanceId: aws.String(d.Get("source_instance_id").(string)), + NoReboot: aws.Bool(d.Get("snapshot_without_reboot").(bool)), + } + + res, err := client.CreateImage(req) + if err != nil { + return err + } + + id := *res.ImageId + d.SetId(id) + d.Partial(true) // make sure we record the id even if the rest of this gets interrupted + d.Set("id", id) + d.Set("manage_ebs_snapshots", true) + d.SetPartial("id") + d.SetPartial("manage_ebs_snapshots") + d.Partial(false) + + _, err = resourceAwsAmiWaitForAvailable(id, client) + if err != nil { + return err + } + + return resourceAwsAmiUpdate(d, meta) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_launch_permission.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_launch_permission.go new file mode 100644 index 000000000..278e9d9ab --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ami_launch_permission.go @@ -0,0 +1,114 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsAmiLaunchPermission() *schema.Resource { + return &schema.Resource{ + Exists: resourceAwsAmiLaunchPermissionExists, + Create: resourceAwsAmiLaunchPermissionCreate, + Read: resourceAwsAmiLaunchPermissionRead, + Delete: resourceAwsAmiLaunchPermissionDelete, + + Schema: map[string]*schema.Schema{ + "image_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "account_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsAmiLaunchPermissionExists(d *schema.ResourceData, meta interface{}) (bool, error) { + conn := meta.(*AWSClient).ec2conn + + image_id := d.Get("image_id").(string) + account_id := d.Get("account_id").(string) + return hasLaunchPermission(conn, image_id, account_id) +} + +func resourceAwsAmiLaunchPermissionCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + image_id := d.Get("image_id").(string) + account_id := d.Get("account_id").(string) + + _, err := conn.ModifyImageAttribute(&ec2.ModifyImageAttributeInput{ + ImageId: aws.String(image_id), + Attribute: aws.String("launchPermission"), + LaunchPermission: &ec2.LaunchPermissionModifications{ + Add: []*ec2.LaunchPermission{ + &ec2.LaunchPermission{UserId: aws.String(account_id)}, + }, + }, + }) + if err != nil { + return fmt.Errorf("error creating ami launch permission: %s", err) + } + + d.SetId(fmt.Sprintf("%s-%s", image_id, account_id)) + return nil +} + +func resourceAwsAmiLaunchPermissionRead(d *schema.ResourceData, meta interface{}) error { + return nil +} + +func resourceAwsAmiLaunchPermissionDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + image_id := d.Get("image_id").(string) + account_id := d.Get("account_id").(string) + + _, err := conn.ModifyImageAttribute(&ec2.ModifyImageAttributeInput{ + ImageId: aws.String(image_id), + Attribute: aws.String("launchPermission"), + LaunchPermission: &ec2.LaunchPermissionModifications{ + Remove: []*ec2.LaunchPermission{ + &ec2.LaunchPermission{UserId: aws.String(account_id)}, + }, + }, + }) + if err != nil { + return fmt.Errorf("error removing ami launch permission: %s", err) + } + + return nil +} + +func hasLaunchPermission(conn *ec2.EC2, image_id string, account_id string) (bool, error) { + attrs, err := conn.DescribeImageAttribute(&ec2.DescribeImageAttributeInput{ + ImageId: aws.String(image_id), + Attribute: aws.String("launchPermission"), + }) + if err != nil { + // When an AMI disappears out from under a launch permission resource, we will + // see either InvalidAMIID.NotFound or InvalidAMIID.Unavailable. + if ec2err, ok := err.(awserr.Error); ok && strings.HasPrefix(ec2err.Code(), "InvalidAMIID") { + log.Printf("[DEBUG] %s no longer exists, so we'll drop launch permission for %s from the state", image_id, account_id) + return false, nil + } + return false, err + } + + for _, lp := range attrs.LaunchPermissions { + if *lp.UserId == account_id { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_account.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_account.go new file mode 100644 index 000000000..7b786270a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_account.go @@ -0,0 +1,127 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsApiGatewayAccount() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayAccountUpdate, + Read: resourceAwsApiGatewayAccountRead, + Update: resourceAwsApiGatewayAccountUpdate, + Delete: resourceAwsApiGatewayAccountDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "cloudwatch_role_arn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "throttle_settings": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "burst_limit": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + "rate_limit": &schema.Schema{ + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsApiGatewayAccountRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[INFO] Reading API Gateway Account %s", d.Id()) + account, err := conn.GetAccount(&apigateway.GetAccountInput{}) + if err != nil { + return err + } + + log.Printf("[DEBUG] Received API Gateway Account: %s", account) + + if _, ok := d.GetOk("cloudwatch_role_arn"); ok { + // CloudwatchRoleArn cannot be empty nor made empty via API + // This resource can however be useful w/out defining cloudwatch_role_arn + // (e.g. for referencing throttle_settings) + d.Set("cloudwatch_role_arn", account.CloudwatchRoleArn) + } + d.Set("throttle_settings", flattenApiGatewayThrottleSettings(account.ThrottleSettings)) + + return nil +} + +func resourceAwsApiGatewayAccountUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + input := apigateway.UpdateAccountInput{} + operations := make([]*apigateway.PatchOperation, 0) + + if d.HasChange("cloudwatch_role_arn") { + arn := d.Get("cloudwatch_role_arn").(string) + if len(arn) > 0 { + // Unfortunately AWS API doesn't allow empty ARNs, + // even though that's default settings for new AWS accounts + // BadRequestException: The role ARN is not well formed + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/cloudwatchRoleArn"), + Value: aws.String(arn), + }) + } + } + input.PatchOperations = operations + + log.Printf("[INFO] Updating API Gateway Account: %s", input) + + // Retry due to eventual consistency of IAM + expectedErrMsg := "The role ARN does not have required permissions set to API Gateway" + otherErrMsg := "API Gateway could not successfully write to CloudWatch Logs using the ARN specified" + var out *apigateway.Account + var err error + err = resource.Retry(2*time.Minute, func() *resource.RetryError { + out, err = conn.UpdateAccount(&input) + + if err != nil { + if isAWSErr(err, "BadRequestException", expectedErrMsg) || + isAWSErr(err, "BadRequestException", otherErrMsg) { + log.Printf("[DEBUG] Retrying API Gateway Account update: %s", err) + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + + return nil + }) + if err != nil { + return fmt.Errorf("Updating API Gateway Account failed: %s", err) + } + log.Printf("[DEBUG] API Gateway Account updated: %s", out) + + d.SetId("api-gateway-account") + return resourceAwsApiGatewayAccountRead(d, meta) +} + +func resourceAwsApiGatewayAccountDelete(d *schema.ResourceData, meta interface{}) error { + // There is no API for "deleting" account or resetting it to "default" settings + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_api_key.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_api_key.go new file mode 100644 index 000000000..66a7154de --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_api_key.go @@ -0,0 +1,202 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsApiGatewayApiKey() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayApiKeyCreate, + Read: resourceAwsApiGatewayApiKeyRead, + Update: resourceAwsApiGatewayApiKeyUpdate, + Delete: resourceAwsApiGatewayApiKeyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Default: "Managed by Terraform", + }, + + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "stage_key": { + Type: schema.TypeSet, + Optional: true, + Deprecated: "Since the API Gateway usage plans feature was launched on August 11, 2016, usage plans are now required to associate an API key with an API stage", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rest_api_id": { + Type: schema.TypeString, + Required: true, + }, + + "stage_name": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + + "created_date": { + Type: schema.TypeString, + Computed: true, + }, + + "last_updated_date": { + Type: schema.TypeString, + Computed: true, + }, + + "value": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Sensitive: true, + ValidateFunc: validateApiGatewayApiKeyValue, + }, + }, + } +} + +func resourceAwsApiGatewayApiKeyCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Creating API Gateway API Key") + + apiKey, err := conn.CreateApiKey(&apigateway.CreateApiKeyInput{ + Name: aws.String(d.Get("name").(string)), + Description: aws.String(d.Get("description").(string)), + Enabled: aws.Bool(d.Get("enabled").(bool)), + Value: aws.String(d.Get("value").(string)), + StageKeys: expandApiGatewayStageKeys(d), + }) + if err != nil { + return fmt.Errorf("Error creating API Gateway: %s", err) + } + + d.SetId(*apiKey.Id) + + return resourceAwsApiGatewayApiKeyRead(d, meta) +} + +func resourceAwsApiGatewayApiKeyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Reading API Gateway API Key: %s", d.Id()) + + apiKey, err := conn.GetApiKey(&apigateway.GetApiKeyInput{ + ApiKey: aws.String(d.Id()), + IncludeValue: aws.Bool(true), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + d.SetId("") + return nil + } + + return err + } + + d.Set("name", apiKey.Name) + d.Set("description", apiKey.Description) + d.Set("enabled", apiKey.Enabled) + d.Set("stage_key", flattenApiGatewayStageKeys(apiKey.StageKeys)) + d.Set("value", apiKey.Value) + + if err := d.Set("created_date", apiKey.CreatedDate.Format(time.RFC3339)); err != nil { + log.Printf("[DEBUG] Error setting created_date: %s", err) + } + + if err := d.Set("last_updated_date", apiKey.LastUpdatedDate.Format(time.RFC3339)); err != nil { + log.Printf("[DEBUG] Error setting last_updated_date: %s", err) + } + + return nil +} + +func resourceAwsApiGatewayApiKeyUpdateOperations(d *schema.ResourceData) []*apigateway.PatchOperation { + operations := make([]*apigateway.PatchOperation, 0) + if d.HasChange("enabled") { + isEnabled := "false" + if d.Get("enabled").(bool) { + isEnabled = "true" + } + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/enabled"), + Value: aws.String(isEnabled), + }) + } + + if d.HasChange("description") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/description"), + Value: aws.String(d.Get("description").(string)), + }) + } + + if d.HasChange("stage_key") { + operations = append(operations, expandApiGatewayStageKeyOperations(d)...) + } + return operations +} + +func resourceAwsApiGatewayApiKeyUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[DEBUG] Updating API Gateway API Key: %s", d.Id()) + + _, err := conn.UpdateApiKey(&apigateway.UpdateApiKeyInput{ + ApiKey: aws.String(d.Id()), + PatchOperations: resourceAwsApiGatewayApiKeyUpdateOperations(d), + }) + if err != nil { + return err + } + + return resourceAwsApiGatewayApiKeyRead(d, meta) +} + +func resourceAwsApiGatewayApiKeyDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Deleting API Gateway API Key: %s", d.Id()) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteApiKey(&apigateway.DeleteApiKeyInput{ + ApiKey: aws.String(d.Id()), + }) + + if err == nil { + return nil + } + + if apigatewayErr, ok := err.(awserr.Error); ok && apigatewayErr.Code() == "NotFoundException" { + return nil + } + + return resource.NonRetryableError(err) + }) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_authorizer.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_authorizer.go new file mode 100644 index 000000000..8f881e185 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_authorizer.go @@ -0,0 +1,212 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsApiGatewayAuthorizer() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayAuthorizerCreate, + Read: resourceAwsApiGatewayAuthorizerRead, + Update: resourceAwsApiGatewayAuthorizerUpdate, + Delete: resourceAwsApiGatewayAuthorizerDelete, + + Schema: map[string]*schema.Schema{ + "authorizer_uri": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "identity_source": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "method.request.header.Authorization", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "rest_api_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "TOKEN", + }, + "authorizer_credentials": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "authorizer_result_ttl_in_seconds": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateIntegerInRange(0, 3600), + }, + "identity_validation_expression": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceAwsApiGatewayAuthorizerCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + input := apigateway.CreateAuthorizerInput{ + AuthorizerUri: aws.String(d.Get("authorizer_uri").(string)), + IdentitySource: aws.String(d.Get("identity_source").(string)), + Name: aws.String(d.Get("name").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + Type: aws.String(d.Get("type").(string)), + } + + if v, ok := d.GetOk("authorizer_credentials"); ok { + input.AuthorizerCredentials = aws.String(v.(string)) + } + if v, ok := d.GetOk("authorizer_result_ttl_in_seconds"); ok { + input.AuthorizerResultTtlInSeconds = aws.Int64(int64(v.(int))) + } + if v, ok := d.GetOk("identity_validation_expression"); ok { + input.IdentityValidationExpression = aws.String(v.(string)) + } + + log.Printf("[INFO] Creating API Gateway Authorizer: %s", input) + out, err := conn.CreateAuthorizer(&input) + if err != nil { + return fmt.Errorf("Error creating API Gateway Authorizer: %s", err) + } + + d.SetId(*out.Id) + + return resourceAwsApiGatewayAuthorizerRead(d, meta) +} + +func resourceAwsApiGatewayAuthorizerRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[INFO] Reading API Gateway Authorizer %s", d.Id()) + input := apigateway.GetAuthorizerInput{ + AuthorizerId: aws.String(d.Id()), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + } + + authorizer, err := conn.GetAuthorizer(&input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + log.Printf("[WARN] No API Gateway Authorizer found: %s", input) + d.SetId("") + return nil + } + return err + } + log.Printf("[DEBUG] Received API Gateway Authorizer: %s", authorizer) + + d.Set("authorizer_credentials", authorizer.AuthorizerCredentials) + d.Set("authorizer_result_ttl_in_seconds", authorizer.AuthorizerResultTtlInSeconds) + d.Set("authorizer_uri", authorizer.AuthorizerUri) + d.Set("identity_source", authorizer.IdentitySource) + d.Set("identity_validation_expression", authorizer.IdentityValidationExpression) + d.Set("name", authorizer.Name) + d.Set("type", authorizer.Type) + + return nil +} + +func resourceAwsApiGatewayAuthorizerUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + input := apigateway.UpdateAuthorizerInput{ + AuthorizerId: aws.String(d.Id()), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + } + + operations := make([]*apigateway.PatchOperation, 0) + + if d.HasChange("authorizer_uri") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/authorizerUri"), + Value: aws.String(d.Get("authorizer_uri").(string)), + }) + } + if d.HasChange("identity_source") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/identitySource"), + Value: aws.String(d.Get("identity_source").(string)), + }) + } + if d.HasChange("name") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/name"), + Value: aws.String(d.Get("name").(string)), + }) + } + if d.HasChange("type") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/type"), + Value: aws.String(d.Get("type").(string)), + }) + } + if d.HasChange("authorizer_credentials") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/authorizerCredentials"), + Value: aws.String(d.Get("authorizer_credentials").(string)), + }) + } + if d.HasChange("authorizer_result_ttl_in_seconds") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/authorizerResultTtlInSeconds"), + Value: aws.String(fmt.Sprintf("%d", d.Get("authorizer_result_ttl_in_seconds").(int))), + }) + } + if d.HasChange("identity_validation_expression") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/identityValidationExpression"), + Value: aws.String(d.Get("identity_validation_expression").(string)), + }) + } + input.PatchOperations = operations + + log.Printf("[INFO] Updating API Gateway Authorizer: %s", input) + _, err := conn.UpdateAuthorizer(&input) + if err != nil { + return fmt.Errorf("Updating API Gateway Authorizer failed: %s", err) + } + + return resourceAwsApiGatewayAuthorizerRead(d, meta) +} + +func resourceAwsApiGatewayAuthorizerDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + input := apigateway.DeleteAuthorizerInput{ + AuthorizerId: aws.String(d.Id()), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + } + log.Printf("[INFO] Deleting API Gateway Authorizer: %s", input) + _, err := conn.DeleteAuthorizer(&input) + if err != nil { + // XXX: Figure out a way to delete the method that depends on the authorizer first + // otherwise the authorizer will be dangling until the API is deleted + if !strings.Contains(err.Error(), "ConflictException") { + return fmt.Errorf("Deleting API Gateway Authorizer failed: %s", err) + } + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_base_path_mapping.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_base_path_mapping.go new file mode 100644 index 000000000..a04171916 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_base_path_mapping.go @@ -0,0 +1,146 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +const emptyBasePathMappingValue = "(none)" + +func resourceAwsApiGatewayBasePathMapping() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayBasePathMappingCreate, + Read: resourceAwsApiGatewayBasePathMappingRead, + Delete: resourceAwsApiGatewayBasePathMappingDelete, + + Schema: map[string]*schema.Schema{ + "api_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "base_path": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "stage_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "domain_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsApiGatewayBasePathMappingCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + err := resource.Retry(30*time.Second, func() *resource.RetryError { + _, err := conn.CreateBasePathMapping(&apigateway.CreateBasePathMappingInput{ + RestApiId: aws.String(d.Get("api_id").(string)), + DomainName: aws.String(d.Get("domain_name").(string)), + BasePath: aws.String(d.Get("base_path").(string)), + Stage: aws.String(d.Get("stage_name").(string)), + }) + + if err != nil { + if err, ok := err.(awserr.Error); ok && err.Code() != "BadRequestException" { + return resource.NonRetryableError(err) + } + + return resource.RetryableError( + fmt.Errorf("Error creating Gateway base path mapping: %s", err), + ) + } + + return nil + }) + + if err != nil { + return fmt.Errorf("Error creating Gateway base path mapping: %s", err) + } + + id := fmt.Sprintf("%s/%s", d.Get("domain_name").(string), d.Get("base_path").(string)) + d.SetId(id) + + return resourceAwsApiGatewayBasePathMappingRead(d, meta) +} + +func resourceAwsApiGatewayBasePathMappingRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + domainName := d.Get("domain_name").(string) + basePath := d.Get("base_path").(string) + + if domainName == "" { + return nil + } + + if basePath == "" { + basePath = emptyBasePathMappingValue + } + + mapping, err := conn.GetBasePathMapping(&apigateway.GetBasePathMappingInput{ + DomainName: aws.String(domainName), + BasePath: aws.String(basePath), + }) + if err != nil { + if err, ok := err.(awserr.Error); ok && err.Code() == "NotFoundException" { + log.Printf("[WARN] API gateway base path mapping %s has vanished\n", d.Id()) + d.SetId("") + return nil + } + + return fmt.Errorf("Error reading Gateway base path mapping: %s", err) + } + + mappingBasePath := *mapping.BasePath + + if mappingBasePath == emptyBasePathMappingValue { + mappingBasePath = "" + } + + d.Set("base_path", mappingBasePath) + d.Set("api_id", mapping.RestApiId) + d.Set("stage_name", mapping.Stage) + + return nil +} + +func resourceAwsApiGatewayBasePathMappingDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + basePath := d.Get("base_path").(string) + + if basePath == "" { + basePath = emptyBasePathMappingValue + } + + _, err := conn.DeleteBasePathMapping(&apigateway.DeleteBasePathMappingInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + BasePath: aws.String(basePath), + }) + + if err != nil { + if err, ok := err.(awserr.Error); ok && err.Code() == "NotFoundException" { + return nil + } + + return err + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_client_certificate.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_client_certificate.go new file mode 100644 index 000000000..77bb7f600 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_client_certificate.go @@ -0,0 +1,125 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsApiGatewayClientCertificate() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayClientCertificateCreate, + Read: resourceAwsApiGatewayClientCertificateRead, + Update: resourceAwsApiGatewayClientCertificateUpdate, + Delete: resourceAwsApiGatewayClientCertificateDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "description": { + Type: schema.TypeString, + Optional: true, + }, + "created_date": { + Type: schema.TypeString, + Computed: true, + }, + "expiration_date": { + Type: schema.TypeString, + Computed: true, + }, + "pem_encoded_certificate": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsApiGatewayClientCertificateCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + input := apigateway.GenerateClientCertificateInput{} + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + log.Printf("[DEBUG] Generating API Gateway Client Certificate: %s", input) + out, err := conn.GenerateClientCertificate(&input) + if err != nil { + return fmt.Errorf("Failed to generate client certificate: %s", err) + } + + d.SetId(*out.ClientCertificateId) + + return resourceAwsApiGatewayClientCertificateRead(d, meta) +} + +func resourceAwsApiGatewayClientCertificateRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + input := apigateway.GetClientCertificateInput{ + ClientCertificateId: aws.String(d.Id()), + } + out, err := conn.GetClientCertificate(&input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + log.Printf("[WARN] API Gateway Client Certificate %s not found, removing", d.Id()) + d.SetId("") + return nil + } + return err + } + log.Printf("[DEBUG] Received API Gateway Client Certificate: %s", out) + + d.Set("description", out.Description) + d.Set("created_date", out.CreatedDate) + d.Set("expiration_date", out.ExpirationDate) + d.Set("pem_encoded_certificate", out.PemEncodedCertificate) + + return nil +} + +func resourceAwsApiGatewayClientCertificateUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + operations := make([]*apigateway.PatchOperation, 0) + if d.HasChange("description") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/description"), + Value: aws.String(d.Get("description").(string)), + }) + } + + input := apigateway.UpdateClientCertificateInput{ + ClientCertificateId: aws.String(d.Id()), + PatchOperations: operations, + } + + log.Printf("[DEBUG] Updating API Gateway Client Certificate: %s", input) + _, err := conn.UpdateClientCertificate(&input) + if err != nil { + return fmt.Errorf("Updating API Gateway Client Certificate failed: %s", err) + } + + return resourceAwsApiGatewayClientCertificateRead(d, meta) +} + +func resourceAwsApiGatewayClientCertificateDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Deleting API Gateway Client Certificate: %s", d.Id()) + input := apigateway.DeleteClientCertificateInput{ + ClientCertificateId: aws.String(d.Id()), + } + _, err := conn.DeleteClientCertificate(&input) + if err != nil { + return fmt.Errorf("Deleting API Gateway Client Certificate failed: %s", err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_deployment.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_deployment.go new file mode 100644 index 000000000..f4c1daf20 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_deployment.go @@ -0,0 +1,200 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsApiGatewayDeployment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayDeploymentCreate, + Read: resourceAwsApiGatewayDeploymentRead, + Update: resourceAwsApiGatewayDeploymentUpdate, + Delete: resourceAwsApiGatewayDeploymentDelete, + + Schema: map[string]*schema.Schema{ + "rest_api_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "stage_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + }, + + "stage_description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: schema.TypeString, + }, + + "created_date": { + Type: schema.TypeString, + Computed: true, + }, + + "invoke_url": { + Type: schema.TypeString, + Computed: true, + }, + + "execution_arn": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsApiGatewayDeploymentCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + // Create the gateway + log.Printf("[DEBUG] Creating API Gateway Deployment") + + variables := make(map[string]string) + for k, v := range d.Get("variables").(map[string]interface{}) { + variables[k] = v.(string) + } + + var err error + deployment, err := conn.CreateDeployment(&apigateway.CreateDeploymentInput{ + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StageName: aws.String(d.Get("stage_name").(string)), + Description: aws.String(d.Get("description").(string)), + StageDescription: aws.String(d.Get("stage_description").(string)), + Variables: aws.StringMap(variables), + }) + if err != nil { + return fmt.Errorf("Error creating API Gateway Deployment: %s", err) + } + + d.SetId(*deployment.Id) + log.Printf("[DEBUG] API Gateway Deployment ID: %s", d.Id()) + + return resourceAwsApiGatewayDeploymentRead(d, meta) +} + +func resourceAwsApiGatewayDeploymentRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[DEBUG] Reading API Gateway Deployment %s", d.Id()) + restApiId := d.Get("rest_api_id").(string) + out, err := conn.GetDeployment(&apigateway.GetDeploymentInput{ + RestApiId: aws.String(restApiId), + DeploymentId: aws.String(d.Id()), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + d.SetId("") + return nil + } + return err + } + log.Printf("[DEBUG] Received API Gateway Deployment: %s", out) + d.Set("description", out.Description) + + region := meta.(*AWSClient).region + stageName := d.Get("stage_name").(string) + + d.Set("invoke_url", buildApiGatewayInvokeURL(restApiId, region, stageName)) + + accountId := meta.(*AWSClient).accountid + arn, err := buildApiGatewayExecutionARN(restApiId, region, accountId) + if err != nil { + return err + } + d.Set("execution_arn", arn+"/"+stageName) + + if err := d.Set("created_date", out.CreatedDate.Format(time.RFC3339)); err != nil { + log.Printf("[DEBUG] Error setting created_date: %s", err) + } + + return nil +} + +func resourceAwsApiGatewayDeploymentUpdateOperations(d *schema.ResourceData) []*apigateway.PatchOperation { + operations := make([]*apigateway.PatchOperation, 0) + + if d.HasChange("description") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/description"), + Value: aws.String(d.Get("description").(string)), + }) + } + + return operations +} + +func resourceAwsApiGatewayDeploymentUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[DEBUG] Updating API Gateway API Key: %s", d.Id()) + + _, err := conn.UpdateDeployment(&apigateway.UpdateDeploymentInput{ + DeploymentId: aws.String(d.Id()), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + PatchOperations: resourceAwsApiGatewayDeploymentUpdateOperations(d), + }) + if err != nil { + return err + } + + return resourceAwsApiGatewayDeploymentRead(d, meta) +} + +func resourceAwsApiGatewayDeploymentDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Deleting API Gateway Deployment: %s", d.Id()) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + log.Printf("[DEBUG] schema is %#v", d) + if _, err := conn.DeleteStage(&apigateway.DeleteStageInput{ + StageName: aws.String(d.Get("stage_name").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + }); err == nil { + return nil + } + + _, err := conn.DeleteDeployment(&apigateway.DeleteDeploymentInput{ + DeploymentId: aws.String(d.Id()), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + }) + if err == nil { + return nil + } + + apigatewayErr, ok := err.(awserr.Error) + if apigatewayErr.Code() == "NotFoundException" { + return nil + } + + if !ok { + return resource.NonRetryableError(err) + } + + return resource.NonRetryableError(err) + }) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_domain_name.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_domain_name.go new file mode 100644 index 000000000..be90c40ec --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_domain_name.go @@ -0,0 +1,210 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsApiGatewayDomainName() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayDomainNameCreate, + Read: resourceAwsApiGatewayDomainNameRead, + Update: resourceAwsApiGatewayDomainNameUpdate, + Delete: resourceAwsApiGatewayDomainNameDelete, + + Schema: map[string]*schema.Schema{ + + //According to AWS Documentation, ACM will be the only way to add certificates + //to ApiGateway DomainNames. When this happens, we will be deprecating all certificate methods + //except certificate_arn. We are not quite sure when this will happen. + "certificate_body": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + ConflictsWith: []string{"certificate_arn"}, + }, + + "certificate_chain": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + ConflictsWith: []string{"certificate_arn"}, + }, + + "certificate_name": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"certificate_arn"}, + }, + + "certificate_private_key": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Sensitive: true, + ConflictsWith: []string{"certificate_arn"}, + }, + + "domain_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "certificate_arn": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"certificate_body", "certificate_chain", "certificate_name", "certificate_private_key"}, + }, + + "cloudfront_domain_name": { + Type: schema.TypeString, + Computed: true, + }, + + "certificate_upload_date": { + Type: schema.TypeString, + Computed: true, + }, + + "cloudfront_zone_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsApiGatewayDomainNameCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Creating API Gateway Domain Name") + + params := &apigateway.CreateDomainNameInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + } + + if v, ok := d.GetOk("certificate_arn"); ok { + params.CertificateArn = aws.String(v.(string)) + } + + if v, ok := d.GetOk("certificate_name"); ok { + params.CertificateName = aws.String(v.(string)) + } + + if v, ok := d.GetOk("certificate_body"); ok { + params.CertificateBody = aws.String(v.(string)) + } + + if v, ok := d.GetOk("certificate_chain"); ok { + params.CertificateChain = aws.String(v.(string)) + } + + if v, ok := d.GetOk("certificate_private_key"); ok { + params.CertificatePrivateKey = aws.String(v.(string)) + } + + domainName, err := conn.CreateDomainName(params) + if err != nil { + return fmt.Errorf("Error creating API Gateway Domain Name: %s", err) + } + + d.SetId(*domainName.DomainName) + d.Set("cloudfront_domain_name", domainName.DistributionDomainName) + d.Set("cloudfront_zone_id", cloudFrontRoute53ZoneID) + + return resourceAwsApiGatewayDomainNameRead(d, meta) +} + +func resourceAwsApiGatewayDomainNameRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Reading API Gateway Domain Name %s", d.Id()) + + domainName, err := conn.GetDomainName(&apigateway.GetDomainNameInput{ + DomainName: aws.String(d.Id()), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + log.Printf("[WARN] API gateway domain name %s has vanished\n", d.Id()) + d.SetId("") + return nil + } + + return err + } + + d.Set("certificate_name", domainName.CertificateName) + if err := d.Set("certificate_upload_date", domainName.CertificateUploadDate.Format(time.RFC3339)); err != nil { + log.Printf("[DEBUG] Error setting certificate_upload_date: %s", err) + } + d.Set("cloudfront_domain_name", domainName.DistributionDomainName) + d.Set("domain_name", domainName.DomainName) + d.Set("certificate_arn", domainName.CertificateArn) + + return nil +} + +func resourceAwsApiGatewayDomainNameUpdateOperations(d *schema.ResourceData) []*apigateway.PatchOperation { + operations := make([]*apigateway.PatchOperation, 0) + + if d.HasChange("certificate_name") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/certificateName"), + Value: aws.String(d.Get("certificate_name").(string)), + }) + } + + if d.HasChange("certificate_arn") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/certificateArn"), + Value: aws.String(d.Get("certificate_arn").(string)), + }) + } + + return operations +} + +func resourceAwsApiGatewayDomainNameUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Updating API Gateway Domain Name %s", d.Id()) + + _, err := conn.UpdateDomainName(&apigateway.UpdateDomainNameInput{ + DomainName: aws.String(d.Id()), + PatchOperations: resourceAwsApiGatewayDomainNameUpdateOperations(d), + }) + + if err != nil { + return err + } + + return resourceAwsApiGatewayDomainNameRead(d, meta) +} + +func resourceAwsApiGatewayDomainNameDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Deleting API Gateway Domain Name: %s", d.Id()) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteDomainName(&apigateway.DeleteDomainNameInput{ + DomainName: aws.String(d.Id()), + }) + + if err == nil { + return nil + } + + if apigatewayErr, ok := err.(awserr.Error); ok && apigatewayErr.Code() == "NotFoundException" { + return nil + } + + return resource.NonRetryableError(err) + }) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_integration.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_integration.go new file mode 100644 index 000000000..f782e11ea --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_integration.go @@ -0,0 +1,348 @@ +package aws + +import ( + "encoding/json" + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "strings" +) + +func resourceAwsApiGatewayIntegration() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayIntegrationCreate, + Read: resourceAwsApiGatewayIntegrationRead, + Update: resourceAwsApiGatewayIntegrationUpdate, + Delete: resourceAwsApiGatewayIntegrationDelete, + + Schema: map[string]*schema.Schema{ + "rest_api_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "resource_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "http_method": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateHTTPMethod, + }, + + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateApiGatewayIntegrationType, + }, + + "uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "credentials": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "integration_http_method": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateHTTPMethod, + }, + + "request_templates": { + Type: schema.TypeMap, + Optional: true, + Elem: schema.TypeString, + }, + + "request_parameters": { + Type: schema.TypeMap, + Elem: schema.TypeString, + Optional: true, + ConflictsWith: []string{"request_parameters_in_json"}, + }, + + "request_parameters_in_json": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"request_parameters"}, + Deprecated: "Use field request_parameters instead", + }, + + "content_handling": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateApiGatewayIntegrationContentHandling, + }, + + "passthrough_behavior": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateApiGatewayIntegrationPassthroughBehavior, + }, + }, + } +} + +func resourceAwsApiGatewayIntegrationCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Print("[DEBUG] Creating API Gateway Integration") + var integrationHttpMethod *string + if v, ok := d.GetOk("integration_http_method"); ok { + integrationHttpMethod = aws.String(v.(string)) + } + var uri *string + if v, ok := d.GetOk("uri"); ok { + uri = aws.String(v.(string)) + } + templates := make(map[string]string) + for k, v := range d.Get("request_templates").(map[string]interface{}) { + templates[k] = v.(string) + } + + parameters := make(map[string]string) + if kv, ok := d.GetOk("request_parameters"); ok { + for k, v := range kv.(map[string]interface{}) { + parameters[k] = v.(string) + } + } + + if v, ok := d.GetOk("request_parameters_in_json"); ok { + if err := json.Unmarshal([]byte(v.(string)), ¶meters); err != nil { + return fmt.Errorf("Error unmarshaling request_parameters_in_json: %s", err) + } + } + + var passthroughBehavior *string + if v, ok := d.GetOk("passthrough_behavior"); ok { + passthroughBehavior = aws.String(v.(string)) + } + + var credentials *string + if val, ok := d.GetOk("credentials"); ok { + credentials = aws.String(val.(string)) + } + + var contentHandling *string + if val, ok := d.GetOk("content_handling"); ok { + contentHandling = aws.String(val.(string)) + } + + _, err := conn.PutIntegration(&apigateway.PutIntegrationInput{ + HttpMethod: aws.String(d.Get("http_method").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + Type: aws.String(d.Get("type").(string)), + IntegrationHttpMethod: integrationHttpMethod, + Uri: uri, + RequestParameters: aws.StringMap(parameters), + RequestTemplates: aws.StringMap(templates), + Credentials: credentials, + CacheNamespace: nil, + CacheKeyParameters: nil, + PassthroughBehavior: passthroughBehavior, + ContentHandling: contentHandling, + }) + if err != nil { + return fmt.Errorf("Error creating API Gateway Integration: %s", err) + } + + d.SetId(fmt.Sprintf("agi-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string))) + + return resourceAwsApiGatewayIntegrationRead(d, meta) +} + +func resourceAwsApiGatewayIntegrationRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[DEBUG] Reading API Gateway Integration: %s", d.Id()) + integration, err := conn.GetIntegration(&apigateway.GetIntegrationInput{ + HttpMethod: aws.String(d.Get("http_method").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + d.SetId("") + return nil + } + return err + } + log.Printf("[DEBUG] Received API Gateway Integration: %s", integration) + d.SetId(fmt.Sprintf("agi-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string))) + + // AWS converts "" to null on their side, convert it back + if v, ok := integration.RequestTemplates["application/json"]; ok && v == nil { + integration.RequestTemplates["application/json"] = aws.String("") + } + + d.Set("request_templates", aws.StringValueMap(integration.RequestTemplates)) + d.Set("type", integration.Type) + d.Set("request_parameters", aws.StringValueMap(integration.RequestParameters)) + d.Set("request_parameters_in_json", aws.StringValueMap(integration.RequestParameters)) + d.Set("passthrough_behavior", integration.PassthroughBehavior) + + if integration.Uri != nil { + d.Set("uri", integration.Uri) + } + + if integration.Credentials != nil { + d.Set("credentials", integration.Credentials) + } + + if integration.ContentHandling != nil { + d.Set("content_handling", integration.ContentHandling) + } + + return nil +} + +func resourceAwsApiGatewayIntegrationUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[DEBUG] Updating API Gateway Integration: %s", d.Id()) + operations := make([]*apigateway.PatchOperation, 0) + + // https://docs.aws.amazon.com/apigateway/api-reference/link-relation/integration-update/#remarks + // According to the above documentation, only a few parts are addable / removable. + if d.HasChange("request_templates") { + o, n := d.GetChange("request_templates") + prefix := "requestTemplates" + + os := o.(map[string]interface{}) + ns := n.(map[string]interface{}) + + // Handle Removal + for k := range os { + if _, ok := ns[k]; !ok { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("remove"), + Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))), + }) + } + } + + for k, v := range ns { + // Handle replaces + if _, ok := os[k]; ok { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))), + Value: aws.String(v.(string)), + }) + } + + // Handle additions + if _, ok := os[k]; !ok { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("add"), + Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))), + Value: aws.String(v.(string)), + }) + } + } + } + + if d.HasChange("request_parameters") { + o, n := d.GetChange("request_parameters") + prefix := "requestParameters" + + os := o.(map[string]interface{}) + ns := n.(map[string]interface{}) + + // Handle Removal + for k := range os { + if _, ok := ns[k]; !ok { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("remove"), + Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))), + }) + } + } + + for k, v := range ns { + // Handle replaces + if _, ok := os[k]; ok { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))), + Value: aws.String(v.(string)), + }) + } + + // Handle additions + if _, ok := os[k]; !ok { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("add"), + Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))), + Value: aws.String(v.(string)), + }) + } + } + } + + params := &apigateway.UpdateIntegrationInput{ + HttpMethod: aws.String(d.Get("http_method").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + PatchOperations: operations, + } + + _, err := conn.UpdateIntegration(params) + if err != nil { + return fmt.Errorf("Error updating API Gateway Integration: %s", err) + } + + d.SetId(fmt.Sprintf("agi-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string))) + + return resourceAwsApiGatewayIntegrationRead(d, meta) +} + +func resourceAwsApiGatewayIntegrationDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Deleting API Gateway Integration: %s", d.Id()) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteIntegration(&apigateway.DeleteIntegrationInput{ + HttpMethod: aws.String(d.Get("http_method").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + }) + if err == nil { + return nil + } + + apigatewayErr, ok := err.(awserr.Error) + if apigatewayErr.Code() == "NotFoundException" { + return nil + } + + if !ok { + return resource.NonRetryableError(err) + } + + return resource.NonRetryableError(err) + }) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_integration_response.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_integration_response.go new file mode 100644 index 000000000..24c66f28e --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_integration_response.go @@ -0,0 +1,184 @@ +package aws + +import ( + "encoding/json" + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsApiGatewayIntegrationResponse() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayIntegrationResponseCreate, + Read: resourceAwsApiGatewayIntegrationResponseRead, + Update: resourceAwsApiGatewayIntegrationResponseCreate, + Delete: resourceAwsApiGatewayIntegrationResponseDelete, + + Schema: map[string]*schema.Schema{ + "rest_api_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "resource_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "http_method": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateHTTPMethod, + }, + + "status_code": { + Type: schema.TypeString, + Required: true, + }, + + "selection_pattern": { + Type: schema.TypeString, + Optional: true, + }, + + "response_templates": { + Type: schema.TypeMap, + Optional: true, + Elem: schema.TypeString, + }, + + "response_parameters": { + Type: schema.TypeMap, + Elem: schema.TypeString, + Optional: true, + ConflictsWith: []string{"response_parameters_in_json"}, + }, + + "response_parameters_in_json": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"response_parameters"}, + Deprecated: "Use field response_parameters instead", + }, + + "content_handling": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateApiGatewayIntegrationContentHandling, + }, + }, + } +} + +func resourceAwsApiGatewayIntegrationResponseCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + templates := make(map[string]string) + for k, v := range d.Get("response_templates").(map[string]interface{}) { + templates[k] = v.(string) + } + + parameters := make(map[string]string) + if kv, ok := d.GetOk("response_parameters"); ok { + for k, v := range kv.(map[string]interface{}) { + parameters[k] = v.(string) + } + } + if v, ok := d.GetOk("response_parameters_in_json"); ok { + if err := json.Unmarshal([]byte(v.(string)), ¶meters); err != nil { + return fmt.Errorf("Error unmarshaling response_parameters_in_json: %s", err) + } + } + var contentHandling *string + if val, ok := d.GetOk("content_handling"); ok { + contentHandling = aws.String(val.(string)) + } + + input := apigateway.PutIntegrationResponseInput{ + HttpMethod: aws.String(d.Get("http_method").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StatusCode: aws.String(d.Get("status_code").(string)), + ResponseTemplates: aws.StringMap(templates), + ResponseParameters: aws.StringMap(parameters), + ContentHandling: contentHandling, + } + if v, ok := d.GetOk("selection_pattern"); ok { + input.SelectionPattern = aws.String(v.(string)) + } + + _, err := conn.PutIntegrationResponse(&input) + if err != nil { + return fmt.Errorf("Error creating API Gateway Integration Response: %s", err) + } + + d.SetId(fmt.Sprintf("agir-%s-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string), d.Get("status_code").(string))) + log.Printf("[DEBUG] API Gateway Integration Response ID: %s", d.Id()) + + return resourceAwsApiGatewayIntegrationResponseRead(d, meta) +} + +func resourceAwsApiGatewayIntegrationResponseRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[DEBUG] Reading API Gateway Integration Response %s", d.Id()) + integrationResponse, err := conn.GetIntegrationResponse(&apigateway.GetIntegrationResponseInput{ + HttpMethod: aws.String(d.Get("http_method").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StatusCode: aws.String(d.Get("status_code").(string)), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + d.SetId("") + return nil + } + return err + } + + log.Printf("[DEBUG] Received API Gateway Integration Response: %s", integrationResponse) + + d.SetId(fmt.Sprintf("agir-%s-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string), d.Get("status_code").(string))) + d.Set("response_templates", integrationResponse.ResponseTemplates) + d.Set("selection_pattern", integrationResponse.SelectionPattern) + d.Set("response_parameters", aws.StringValueMap(integrationResponse.ResponseParameters)) + d.Set("response_parameters_in_json", aws.StringValueMap(integrationResponse.ResponseParameters)) + return nil +} + +func resourceAwsApiGatewayIntegrationResponseDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Deleting API Gateway Integration Response: %s", d.Id()) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteIntegrationResponse(&apigateway.DeleteIntegrationResponseInput{ + HttpMethod: aws.String(d.Get("http_method").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StatusCode: aws.String(d.Get("status_code").(string)), + }) + if err == nil { + return nil + } + + apigatewayErr, ok := err.(awserr.Error) + if apigatewayErr.Code() == "NotFoundException" { + return nil + } + + if !ok { + return resource.NonRetryableError(err) + } + + return resource.NonRetryableError(err) + }) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method.go new file mode 100644 index 000000000..577c44e15 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method.go @@ -0,0 +1,270 @@ +package aws + +import ( + "encoding/json" + "fmt" + "log" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsApiGatewayMethod() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayMethodCreate, + Read: resourceAwsApiGatewayMethodRead, + Update: resourceAwsApiGatewayMethodUpdate, + Delete: resourceAwsApiGatewayMethodDelete, + + Schema: map[string]*schema.Schema{ + "rest_api_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "resource_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "http_method": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateHTTPMethod, + }, + + "authorization": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "authorizer_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "api_key_required": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "request_models": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: schema.TypeString, + }, + + "request_parameters": &schema.Schema{ + Type: schema.TypeMap, + Elem: schema.TypeBool, + Optional: true, + ConflictsWith: []string{"request_parameters_in_json"}, + }, + + "request_parameters_in_json": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"request_parameters"}, + Deprecated: "Use field request_parameters instead", + }, + }, + } +} + +func resourceAwsApiGatewayMethodCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + input := apigateway.PutMethodInput{ + AuthorizationType: aws.String(d.Get("authorization").(string)), + HttpMethod: aws.String(d.Get("http_method").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + ApiKeyRequired: aws.Bool(d.Get("api_key_required").(bool)), + } + + models := make(map[string]string) + for k, v := range d.Get("request_models").(map[string]interface{}) { + models[k] = v.(string) + } + if len(models) > 0 { + input.RequestModels = aws.StringMap(models) + } + + parameters := make(map[string]bool) + if kv, ok := d.GetOk("request_parameters"); ok { + for k, v := range kv.(map[string]interface{}) { + parameters[k], ok = v.(bool) + if !ok { + value, _ := strconv.ParseBool(v.(string)) + parameters[k] = value + } + } + input.RequestParameters = aws.BoolMap(parameters) + } + if v, ok := d.GetOk("request_parameters_in_json"); ok { + if err := json.Unmarshal([]byte(v.(string)), ¶meters); err != nil { + return fmt.Errorf("Error unmarshaling request_parameters_in_json: %s", err) + } + input.RequestParameters = aws.BoolMap(parameters) + } + + if v, ok := d.GetOk("authorizer_id"); ok { + input.AuthorizerId = aws.String(v.(string)) + } + + _, err := conn.PutMethod(&input) + if err != nil { + return fmt.Errorf("Error creating API Gateway Method: %s", err) + } + + d.SetId(fmt.Sprintf("agm-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string))) + log.Printf("[DEBUG] API Gateway Method ID: %s", d.Id()) + + return nil +} + +func resourceAwsApiGatewayMethodRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[DEBUG] Reading API Gateway Method %s", d.Id()) + out, err := conn.GetMethod(&apigateway.GetMethodInput{ + HttpMethod: aws.String(d.Get("http_method").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + d.SetId("") + return nil + } + return err + } + log.Printf("[DEBUG] Received API Gateway Method: %s", out) + d.SetId(fmt.Sprintf("agm-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string))) + d.Set("request_parameters", aws.BoolValueMap(out.RequestParameters)) + d.Set("request_parameters_in_json", aws.BoolValueMap(out.RequestParameters)) + d.Set("api_key_required", out.ApiKeyRequired) + d.Set("authorization_type", out.AuthorizationType) + d.Set("authorizer_id", out.AuthorizerId) + d.Set("request_models", aws.StringValueMap(out.RequestModels)) + + return nil +} + +func resourceAwsApiGatewayMethodUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[DEBUG] Reading API Gateway Method %s", d.Id()) + operations := make([]*apigateway.PatchOperation, 0) + if d.HasChange("resource_id") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/resourceId"), + Value: aws.String(d.Get("resource_id").(string)), + }) + } + + if d.HasChange("request_models") { + operations = append(operations, expandApiGatewayRequestResponseModelOperations(d, "request_models", "requestModels")...) + } + + if d.HasChange("request_parameters_in_json") { + ops, err := deprecatedExpandApiGatewayMethodParametersJSONOperations(d, "request_parameters_in_json", "requestParameters") + if err != nil { + return err + } + operations = append(operations, ops...) + } + + if d.HasChange("request_parameters") { + parameters := make(map[string]bool) + var ok bool + for k, v := range d.Get("request_parameters").(map[string]interface{}) { + parameters[k], ok = v.(bool) + if !ok { + value, _ := strconv.ParseBool(v.(string)) + parameters[k] = value + } + } + ops, err := expandApiGatewayMethodParametersOperations(d, "request_parameters", "requestParameters") + if err != nil { + return err + } + operations = append(operations, ops...) + } + + if d.HasChange("authorization") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/authorizationType"), + Value: aws.String(d.Get("authorization").(string)), + }) + } + + if d.HasChange("authorizer_id") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/authorizerId"), + Value: aws.String(d.Get("authorizer_id").(string)), + }) + } + + if d.HasChange("api_key_required") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/apiKeyRequired"), + Value: aws.String(fmt.Sprintf("%t", d.Get("api_key_required").(bool))), + }) + } + + method, err := conn.UpdateMethod(&apigateway.UpdateMethodInput{ + HttpMethod: aws.String(d.Get("http_method").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + PatchOperations: operations, + }) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Received API Gateway Method: %s", method) + + return resourceAwsApiGatewayMethodRead(d, meta) +} + +func resourceAwsApiGatewayMethodDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Deleting API Gateway Method: %s", d.Id()) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteMethod(&apigateway.DeleteMethodInput{ + HttpMethod: aws.String(d.Get("http_method").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + }) + if err == nil { + return nil + } + + apigatewayErr, ok := err.(awserr.Error) + if apigatewayErr.Code() == "NotFoundException" { + return nil + } + + if !ok { + return resource.NonRetryableError(err) + } + + return resource.NonRetryableError(err) + }) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method_response.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method_response.go new file mode 100644 index 000000000..b0b929ad7 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method_response.go @@ -0,0 +1,210 @@ +package aws + +import ( + "encoding/json" + "fmt" + "log" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsApiGatewayMethodResponse() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayMethodResponseCreate, + Read: resourceAwsApiGatewayMethodResponseRead, + Update: resourceAwsApiGatewayMethodResponseUpdate, + Delete: resourceAwsApiGatewayMethodResponseDelete, + + Schema: map[string]*schema.Schema{ + "rest_api_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "resource_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "http_method": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateHTTPMethod, + }, + + "status_code": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "response_models": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: schema.TypeString, + }, + + "response_parameters": &schema.Schema{ + Type: schema.TypeMap, + Elem: schema.TypeBool, + Optional: true, + ConflictsWith: []string{"response_parameters_in_json"}, + }, + + "response_parameters_in_json": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"response_parameters"}, + Deprecated: "Use field response_parameters instead", + }, + }, + } +} + +func resourceAwsApiGatewayMethodResponseCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + models := make(map[string]string) + for k, v := range d.Get("response_models").(map[string]interface{}) { + models[k] = v.(string) + } + + parameters := make(map[string]bool) + if kv, ok := d.GetOk("response_parameters"); ok { + for k, v := range kv.(map[string]interface{}) { + parameters[k], ok = v.(bool) + if !ok { + value, _ := strconv.ParseBool(v.(string)) + parameters[k] = value + } + } + } + if v, ok := d.GetOk("response_parameters_in_json"); ok { + if err := json.Unmarshal([]byte(v.(string)), ¶meters); err != nil { + return fmt.Errorf("Error unmarshaling request_parameters_in_json: %s", err) + } + } + + _, err := conn.PutMethodResponse(&apigateway.PutMethodResponseInput{ + HttpMethod: aws.String(d.Get("http_method").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StatusCode: aws.String(d.Get("status_code").(string)), + ResponseModels: aws.StringMap(models), + ResponseParameters: aws.BoolMap(parameters), + }) + if err != nil { + return fmt.Errorf("Error creating API Gateway Method Response: %s", err) + } + + d.SetId(fmt.Sprintf("agmr-%s-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string), d.Get("status_code").(string))) + log.Printf("[DEBUG] API Gateway Method ID: %s", d.Id()) + + return nil +} + +func resourceAwsApiGatewayMethodResponseRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[DEBUG] Reading API Gateway Method %s", d.Id()) + methodResponse, err := conn.GetMethodResponse(&apigateway.GetMethodResponseInput{ + HttpMethod: aws.String(d.Get("http_method").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StatusCode: aws.String(d.Get("status_code").(string)), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + d.SetId("") + return nil + } + return err + } + + log.Printf("[DEBUG] Received API Gateway Method: %s", methodResponse) + d.Set("response_models", aws.StringValueMap(methodResponse.ResponseModels)) + d.Set("response_parameters", aws.BoolValueMap(methodResponse.ResponseParameters)) + d.Set("response_parameters_in_json", aws.BoolValueMap(methodResponse.ResponseParameters)) + d.SetId(fmt.Sprintf("agmr-%s-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string), d.Get("status_code").(string))) + + return nil +} + +func resourceAwsApiGatewayMethodResponseUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[DEBUG] Updating API Gateway Method Response %s", d.Id()) + operations := make([]*apigateway.PatchOperation, 0) + + if d.HasChange("response_models") { + operations = append(operations, expandApiGatewayRequestResponseModelOperations(d, "response_models", "responseModels")...) + } + + if d.HasChange("response_parameters_in_json") { + ops, err := deprecatedExpandApiGatewayMethodParametersJSONOperations(d, "response_parameters_in_json", "responseParameters") + if err != nil { + return err + } + operations = append(operations, ops...) + } + + if d.HasChange("response_parameters") { + ops, err := expandApiGatewayMethodParametersOperations(d, "response_parameters", "responseParameters") + if err != nil { + return err + } + operations = append(operations, ops...) + } + + out, err := conn.UpdateMethodResponse(&apigateway.UpdateMethodResponseInput{ + HttpMethod: aws.String(d.Get("http_method").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StatusCode: aws.String(d.Get("status_code").(string)), + PatchOperations: operations, + }) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Received API Gateway Method Response: %s", out) + + return resourceAwsApiGatewayMethodResponseRead(d, meta) +} + +func resourceAwsApiGatewayMethodResponseDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Deleting API Gateway Method Response: %s", d.Id()) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteMethodResponse(&apigateway.DeleteMethodResponseInput{ + HttpMethod: aws.String(d.Get("http_method").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StatusCode: aws.String(d.Get("status_code").(string)), + }) + if err == nil { + return nil + } + + apigatewayErr, ok := err.(awserr.Error) + if apigatewayErr.Code() == "NotFoundException" { + return nil + } + + if !ok { + return resource.NonRetryableError(err) + } + + return resource.NonRetryableError(err) + }) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method_settings.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method_settings.go new file mode 100644 index 000000000..06d5efd01 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_method_settings.go @@ -0,0 +1,248 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsApiGatewayMethodSettings() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayMethodSettingsUpdate, + Read: resourceAwsApiGatewayMethodSettingsRead, + Update: resourceAwsApiGatewayMethodSettingsUpdate, + Delete: resourceAwsApiGatewayMethodSettingsDelete, + + Schema: map[string]*schema.Schema{ + "rest_api_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "stage_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "method_path": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "settings": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metrics_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + "logging_level": { + Type: schema.TypeString, + Optional: true, + }, + "data_trace_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + "throttling_burst_limit": { + Type: schema.TypeInt, + Optional: true, + }, + "throttling_rate_limit": { + Type: schema.TypeFloat, + Optional: true, + }, + "caching_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + "cache_ttl_in_seconds": { + Type: schema.TypeInt, + Optional: true, + }, + "cache_data_encrypted": { + Type: schema.TypeBool, + Optional: true, + }, + "require_authorization_for_cache_control": { + Type: schema.TypeBool, + Optional: true, + }, + "unauthorized_cache_control_header_strategy": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsApiGatewayMethodSettingsRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[DEBUG] Reading API Gateway Method Settings %s", d.Id()) + input := apigateway.GetStageInput{ + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StageName: aws.String(d.Get("stage_name").(string)), + } + stage, err := conn.GetStage(&input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + log.Printf("[WARN] API Gateway Stage %s not found, removing method settings", d.Id()) + d.SetId("") + return nil + } + return err + } + log.Printf("[DEBUG] Received API Gateway Stage: %s", stage) + + methodPath := d.Get("method_path").(string) + settings, ok := stage.MethodSettings[methodPath] + if !ok { + log.Printf("[WARN] API Gateway Method Settings for %q not found, removing", methodPath) + d.SetId("") + return nil + } + + d.Set("settings.0.metrics_enabled", settings.MetricsEnabled) + d.Set("settings.0.logging_level", settings.LoggingLevel) + d.Set("settings.0.data_trace_enabled", settings.DataTraceEnabled) + d.Set("settings.0.throttling_burst_limit", settings.ThrottlingBurstLimit) + d.Set("settings.0.throttling_rate_limit", settings.ThrottlingRateLimit) + d.Set("settings.0.caching_enabled", settings.CachingEnabled) + d.Set("settings.0.cache_ttl_in_seconds", settings.CacheTtlInSeconds) + d.Set("settings.0.cache_data_encrypted", settings.CacheDataEncrypted) + d.Set("settings.0.require_authorization_for_cache_control", settings.RequireAuthorizationForCacheControl) + d.Set("settings.0.unauthorized_cache_control_header_strategy", settings.UnauthorizedCacheControlHeaderStrategy) + + return nil +} + +func resourceAwsApiGatewayMethodSettingsUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + methodPath := d.Get("method_path").(string) + prefix := fmt.Sprintf("/%s/", methodPath) + + ops := make([]*apigateway.PatchOperation, 0) + if d.HasChange("settings.0.metrics_enabled") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "metrics/enabled"), + Value: aws.String(fmt.Sprintf("%t", d.Get("settings.0.metrics_enabled").(bool))), + }) + } + if d.HasChange("settings.0.logging_level") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "logging/loglevel"), + Value: aws.String(d.Get("settings.0.logging_level").(string)), + }) + } + if d.HasChange("settings.0.data_trace_enabled") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "logging/dataTrace"), + Value: aws.String(fmt.Sprintf("%t", d.Get("settings.0.data_trace_enabled").(bool))), + }) + } + + if d.HasChange("settings.0.throttling_burst_limit") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "throttling/burstLimit"), + Value: aws.String(fmt.Sprintf("%d", d.Get("settings.0.throttling_burst_limit").(int))), + }) + } + if d.HasChange("settings.0.throttling_rate_limit") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "throttling/rateLimit"), + Value: aws.String(fmt.Sprintf("%f", d.Get("settings.0.throttling_rate_limit").(float64))), + }) + } + if d.HasChange("settings.0.caching_enabled") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "caching/enabled"), + Value: aws.String(fmt.Sprintf("%t", d.Get("settings.0.caching_enabled").(bool))), + }) + } + if d.HasChange("settings.0.cache_ttl_in_seconds") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "caching/ttlInSeconds"), + Value: aws.String(fmt.Sprintf("%d", d.Get("settings.0.cache_ttl_in_seconds").(int))), + }) + } + if d.HasChange("settings.0.cache_data_encrypted") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "caching/dataEncrypted"), + Value: aws.String(fmt.Sprintf("%d", d.Get("settings.0.cache_data_encrypted").(int))), + }) + } + if d.HasChange("settings.0.require_authorization_for_cache_control") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "caching/requireAuthorizationForCacheControl"), + Value: aws.String(fmt.Sprintf("%t", d.Get("settings.0.require_authorization_for_cache_control").(bool))), + }) + } + if d.HasChange("settings.0.unauthorized_cache_control_header_strategy") { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + "caching/unauthorizedCacheControlHeaderStrategy"), + Value: aws.String(d.Get("settings.0.unauthorized_cache_control_header_strategy").(string)), + }) + } + + restApiId := d.Get("rest_api_id").(string) + stageName := d.Get("stage_name").(string) + input := apigateway.UpdateStageInput{ + RestApiId: aws.String(restApiId), + StageName: aws.String(stageName), + PatchOperations: ops, + } + log.Printf("[DEBUG] Updating API Gateway Stage: %s", input) + _, err := conn.UpdateStage(&input) + if err != nil { + return fmt.Errorf("Updating API Gateway Stage failed: %s", err) + } + + d.SetId(restApiId + "-" + stageName + "-" + methodPath) + + return resourceAwsApiGatewayMethodSettingsRead(d, meta) +} + +func resourceAwsApiGatewayMethodSettingsDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Deleting API Gateway Method Settings: %s", d.Id()) + + input := apigateway.UpdateStageInput{ + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StageName: aws.String(d.Get("stage_name").(string)), + PatchOperations: []*apigateway.PatchOperation{ + { + Op: aws.String("remove"), + Path: aws.String(fmt.Sprintf("/%s", d.Get("method_path").(string))), + }, + }, + } + log.Printf("[DEBUG] Updating API Gateway Stage: %s", input) + _, err := conn.UpdateStage(&input) + if err != nil { + return fmt.Errorf("Updating API Gateway Stage failed: %s", err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_model.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_model.go new file mode 100644 index 000000000..3f2721889 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_model.go @@ -0,0 +1,168 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsApiGatewayModel() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayModelCreate, + Read: resourceAwsApiGatewayModelRead, + Update: resourceAwsApiGatewayModelUpdate, + Delete: resourceAwsApiGatewayModelDelete, + + Schema: map[string]*schema.Schema{ + "rest_api_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "schema": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "content_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsApiGatewayModelCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Creating API Gateway Model") + + var description *string + if v, ok := d.GetOk("description"); ok { + description = aws.String(v.(string)) + } + var schema *string + if v, ok := d.GetOk("schema"); ok { + schema = aws.String(v.(string)) + } + + var err error + model, err := conn.CreateModel(&apigateway.CreateModelInput{ + Name: aws.String(d.Get("name").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + ContentType: aws.String(d.Get("content_type").(string)), + + Description: description, + Schema: schema, + }) + + if err != nil { + return fmt.Errorf("Error creating API Gateway Model: %s", err) + } + + d.SetId(*model.Id) + + return nil +} + +func resourceAwsApiGatewayModelRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[DEBUG] Reading API Gateway Model %s", d.Id()) + out, err := conn.GetModel(&apigateway.GetModelInput{ + ModelName: aws.String(d.Get("name").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + d.SetId("") + return nil + } + return err + } + log.Printf("[DEBUG] Received API Gateway Model: %s", out) + d.SetId(*out.Id) + d.Set("description", out.Description) + d.Set("schema", out.Schema) + d.Set("content_type", out.ContentType) + + return nil +} + +func resourceAwsApiGatewayModelUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[DEBUG] Reading API Gateway Model %s", d.Id()) + operations := make([]*apigateway.PatchOperation, 0) + if d.HasChange("description") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/description"), + Value: aws.String(d.Get("description").(string)), + }) + } + if d.HasChange("schema") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/schema"), + Value: aws.String(d.Get("schema").(string)), + }) + } + + out, err := conn.UpdateModel(&apigateway.UpdateModelInput{ + ModelName: aws.String(d.Get("name").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + PatchOperations: operations, + }) + if err != nil { + return err + } + log.Printf("[DEBUG] Received API Gateway Model: %s", out) + + return resourceAwsApiGatewayModelRead(d, meta) +} + +func resourceAwsApiGatewayModelDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Deleting API Gateway Model: %s", d.Id()) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + log.Printf("[DEBUG] schema is %#v", d) + _, err := conn.DeleteModel(&apigateway.DeleteModelInput{ + ModelName: aws.String(d.Get("name").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + }) + if err == nil { + return nil + } + + apigatewayErr, ok := err.(awserr.Error) + if apigatewayErr.Code() == "NotFoundException" { + return nil + } + + if !ok { + return resource.NonRetryableError(err) + } + + return resource.NonRetryableError(err) + }) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_resource.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_resource.go new file mode 100644 index 000000000..04f136174 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_resource.go @@ -0,0 +1,149 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsApiGatewayResource() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayResourceCreate, + Read: resourceAwsApiGatewayResourceRead, + Update: resourceAwsApiGatewayResourceUpdate, + Delete: resourceAwsApiGatewayResourceDelete, + + Schema: map[string]*schema.Schema{ + "rest_api_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "parent_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "path_part": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "path": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsApiGatewayResourceCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Creating API Gateway Resource for API %s", d.Get("rest_api_id").(string)) + + var err error + resource, err := conn.CreateResource(&apigateway.CreateResourceInput{ + ParentId: aws.String(d.Get("parent_id").(string)), + PathPart: aws.String(d.Get("path_part").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + }) + + if err != nil { + return fmt.Errorf("Error creating API Gateway Resource: %s", err) + } + + d.SetId(*resource.Id) + d.Set("path", resource.Path) + + return nil +} + +func resourceAwsApiGatewayResourceRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[DEBUG] Reading API Gateway Resource %s", d.Id()) + resource, err := conn.GetResource(&apigateway.GetResourceInput{ + ResourceId: aws.String(d.Id()), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + d.SetId("") + return nil + } + return err + } + + d.Set("parent_id", resource.ParentId) + d.Set("path_part", resource.PathPart) + d.Set("path", resource.Path) + + return nil +} + +func resourceAwsApiGatewayResourceUpdateOperations(d *schema.ResourceData) []*apigateway.PatchOperation { + operations := make([]*apigateway.PatchOperation, 0) + if d.HasChange("path_part") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/pathPart"), + Value: aws.String(d.Get("path_part").(string)), + }) + } + + if d.HasChange("parent_id") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/parentId"), + Value: aws.String(d.Get("parent_id").(string)), + }) + } + return operations +} + +func resourceAwsApiGatewayResourceUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[DEBUG] Updating API Gateway Resource %s", d.Id()) + _, err := conn.UpdateResource(&apigateway.UpdateResourceInput{ + ResourceId: aws.String(d.Id()), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + PatchOperations: resourceAwsApiGatewayResourceUpdateOperations(d), + }) + + if err != nil { + return err + } + + return resourceAwsApiGatewayResourceRead(d, meta) +} + +func resourceAwsApiGatewayResourceDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Deleting API Gateway Resource: %s", d.Id()) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + log.Printf("[DEBUG] schema is %#v", d) + _, err := conn.DeleteResource(&apigateway.DeleteResourceInput{ + ResourceId: aws.String(d.Id()), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + }) + if err == nil { + return nil + } + + if apigatewayErr, ok := err.(awserr.Error); ok && apigatewayErr.Code() == "NotFoundException" { + return nil + } + + return resource.NonRetryableError(err) + }) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_rest_api.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_rest_api.go new file mode 100644 index 000000000..02d84ded4 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_rest_api.go @@ -0,0 +1,189 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsApiGatewayRestApi() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayRestApiCreate, + Read: resourceAwsApiGatewayRestApiRead, + Update: resourceAwsApiGatewayRestApiUpdate, + Delete: resourceAwsApiGatewayRestApiDelete, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + }, + + "binary_media_types": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "root_resource_id": { + Type: schema.TypeString, + Computed: true, + }, + + "created_date": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsApiGatewayRestApiCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Creating API Gateway") + + var description *string + if d.Get("description").(string) != "" { + description = aws.String(d.Get("description").(string)) + } + + params := &apigateway.CreateRestApiInput{ + Name: aws.String(d.Get("name").(string)), + Description: description, + } + + binaryMediaTypes, binaryMediaTypesOk := d.GetOk("binary_media_types") + if binaryMediaTypesOk { + params.BinaryMediaTypes = expandStringList(binaryMediaTypes.([]interface{})) + } + + gateway, err := conn.CreateRestApi(params) + if err != nil { + return fmt.Errorf("Error creating API Gateway: %s", err) + } + + d.SetId(*gateway.Id) + + if err = resourceAwsApiGatewayRestApiRefreshResources(d, meta); err != nil { + return err + } + + return resourceAwsApiGatewayRestApiRead(d, meta) +} + +func resourceAwsApiGatewayRestApiRefreshResources(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + resp, err := conn.GetResources(&apigateway.GetResourcesInput{ + RestApiId: aws.String(d.Id()), + }) + if err != nil { + return err + } + + for _, item := range resp.Items { + if *item.Path == "/" { + d.Set("root_resource_id", item.Id) + break + } + } + + return nil +} + +func resourceAwsApiGatewayRestApiRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Reading API Gateway %s", d.Id()) + + api, err := conn.GetRestApi(&apigateway.GetRestApiInput{ + RestApiId: aws.String(d.Id()), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + d.SetId("") + return nil + } + return err + } + + d.Set("name", api.Name) + d.Set("description", api.Description) + d.Set("binary_media_types", api.BinaryMediaTypes) + + if err := d.Set("created_date", api.CreatedDate.Format(time.RFC3339)); err != nil { + log.Printf("[DEBUG] Error setting created_date: %s", err) + } + + return nil +} + +func resourceAwsApiGatewayRestApiUpdateOperations(d *schema.ResourceData) []*apigateway.PatchOperation { + operations := make([]*apigateway.PatchOperation, 0) + + if d.HasChange("name") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/name"), + Value: aws.String(d.Get("name").(string)), + }) + } + + if d.HasChange("description") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/description"), + Value: aws.String(d.Get("description").(string)), + }) + } + + return operations +} + +func resourceAwsApiGatewayRestApiUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Updating API Gateway %s", d.Id()) + + _, err := conn.UpdateRestApi(&apigateway.UpdateRestApiInput{ + RestApiId: aws.String(d.Id()), + PatchOperations: resourceAwsApiGatewayRestApiUpdateOperations(d), + }) + + if err != nil { + return err + } + log.Printf("[DEBUG] Updated API Gateway %s", d.Id()) + + return resourceAwsApiGatewayRestApiRead(d, meta) +} + +func resourceAwsApiGatewayRestApiDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Deleting API Gateway: %s", d.Id()) + + return resource.Retry(10*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteRestApi(&apigateway.DeleteRestApiInput{ + RestApiId: aws.String(d.Id()), + }) + if err == nil { + return nil + } + + if apigatewayErr, ok := err.(awserr.Error); ok && apigatewayErr.Code() == "NotFoundException" { + return nil + } + + return resource.NonRetryableError(err) + }) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_stage.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_stage.go new file mode 100644 index 000000000..1b8579e3d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_stage.go @@ -0,0 +1,342 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsApiGatewayStage() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayStageCreate, + Read: resourceAwsApiGatewayStageRead, + Update: resourceAwsApiGatewayStageUpdate, + Delete: resourceAwsApiGatewayStageDelete, + + Schema: map[string]*schema.Schema{ + "cache_cluster_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + "cache_cluster_size": { + Type: schema.TypeString, + Optional: true, + }, + "client_certificate_id": { + Type: schema.TypeString, + Optional: true, + }, + "deployment_id": { + Type: schema.TypeString, + Required: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "documentation_version": { + Type: schema.TypeString, + Optional: true, + }, + "rest_api_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "stage_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "variables": { + Type: schema.TypeMap, + Optional: true, + }, + }, + } +} + +func resourceAwsApiGatewayStageCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + d.Partial(true) + + input := apigateway.CreateStageInput{ + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StageName: aws.String(d.Get("stage_name").(string)), + DeploymentId: aws.String(d.Get("deployment_id").(string)), + } + + waitForCache := false + if v, ok := d.GetOk("cache_cluster_enabled"); ok { + input.CacheClusterEnabled = aws.Bool(v.(bool)) + waitForCache = true + } + if v, ok := d.GetOk("cache_cluster_size"); ok { + input.CacheClusterSize = aws.String(v.(string)) + waitForCache = true + } + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + if v, ok := d.GetOk("documentation_version"); ok { + input.DocumentationVersion = aws.String(v.(string)) + } + if vars, ok := d.GetOk("variables"); ok { + variables := make(map[string]string, 0) + for k, v := range vars.(map[string]interface{}) { + variables[k] = v.(string) + } + input.Variables = aws.StringMap(variables) + } + + out, err := conn.CreateStage(&input) + if err != nil { + return fmt.Errorf("Error creating API Gateway Stage: %s", err) + } + + d.SetId(fmt.Sprintf("ags-%s-%s", d.Get("rest_api_id").(string), d.Get("stage_name").(string))) + + d.SetPartial("rest_api_id") + d.SetPartial("stage_name") + d.SetPartial("deployment_id") + d.SetPartial("description") + d.SetPartial("variables") + + if waitForCache && *out.CacheClusterStatus != "NOT_AVAILABLE" { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + "CREATE_IN_PROGRESS", + "DELETE_IN_PROGRESS", + "FLUSH_IN_PROGRESS", + }, + Target: []string{"AVAILABLE"}, + Refresh: apiGatewayStageCacheRefreshFunc(conn, + d.Get("rest_api_id").(string), + d.Get("stage_name").(string)), + Timeout: 90 * time.Minute, + } + + _, err := stateConf.WaitForState() + if err != nil { + return err + } + } + + d.SetPartial("cache_cluster_enabled") + d.SetPartial("cache_cluster_size") + d.Partial(false) + + if _, ok := d.GetOk("client_certificate_id"); ok { + return resourceAwsApiGatewayStageUpdate(d, meta) + } + return resourceAwsApiGatewayStageRead(d, meta) +} + +func resourceAwsApiGatewayStageRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[DEBUG] Reading API Gateway Stage %s", d.Id()) + input := apigateway.GetStageInput{ + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StageName: aws.String(d.Get("stage_name").(string)), + } + stage, err := conn.GetStage(&input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + log.Printf("[WARN] API Gateway Stage %s not found, removing", d.Id()) + d.SetId("") + return nil + } + return err + } + log.Printf("[DEBUG] Received API Gateway Stage: %s", stage) + + d.Set("client_certificate_id", stage.ClientCertificateId) + + if stage.CacheClusterStatus != nil && *stage.CacheClusterStatus == "DELETE_IN_PROGRESS" { + d.Set("cache_cluster_enabled", false) + d.Set("cache_cluster_size", nil) + } else { + d.Set("cache_cluster_enabled", stage.CacheClusterEnabled) + d.Set("cache_cluster_size", stage.CacheClusterSize) + } + + d.Set("deployment_id", stage.DeploymentId) + d.Set("description", stage.Description) + d.Set("documentation_version", stage.DocumentationVersion) + d.Set("variables", aws.StringValueMap(stage.Variables)) + + return nil +} + +func resourceAwsApiGatewayStageUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + d.Partial(true) + operations := make([]*apigateway.PatchOperation, 0) + waitForCache := false + if d.HasChange("cache_cluster_enabled") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/cacheClusterEnabled"), + Value: aws.String(fmt.Sprintf("%t", d.Get("cache_cluster_enabled").(bool))), + }) + waitForCache = true + } + if d.HasChange("cache_cluster_size") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/cacheClusterSize"), + Value: aws.String(d.Get("cache_cluster_size").(string)), + }) + waitForCache = true + } + if d.HasChange("client_certificate_id") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/clientCertificateId"), + Value: aws.String(d.Get("client_certificate_id").(string)), + }) + } + if d.HasChange("deployment_id") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/deploymentId"), + Value: aws.String(d.Get("deployment_id").(string)), + }) + } + if d.HasChange("description") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/description"), + Value: aws.String(d.Get("description").(string)), + }) + } + if d.HasChange("documentation_version") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/documentationVersion"), + Value: aws.String(d.Get("documentation_version").(string)), + }) + } + if d.HasChange("variables") { + o, n := d.GetChange("variables") + oldV := o.(map[string]interface{}) + newV := n.(map[string]interface{}) + operations = append(operations, diffVariablesOps("/variables/", oldV, newV)...) + } + + input := apigateway.UpdateStageInput{ + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StageName: aws.String(d.Get("stage_name").(string)), + PatchOperations: operations, + } + log.Printf("[DEBUG] Updating API Gateway Stage: %s", input) + out, err := conn.UpdateStage(&input) + if err != nil { + return fmt.Errorf("Updating API Gateway Stage failed: %s", err) + } + + d.SetPartial("client_certificate_id") + d.SetPartial("deployment_id") + d.SetPartial("description") + d.SetPartial("variables") + + if waitForCache && *out.CacheClusterStatus != "NOT_AVAILABLE" { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + "CREATE_IN_PROGRESS", + "FLUSH_IN_PROGRESS", + }, + Target: []string{ + "AVAILABLE", + // There's an AWS API bug (raised & confirmed in Sep 2016 by support) + // which causes the stage to remain in deletion state forever + "DELETE_IN_PROGRESS", + }, + Refresh: apiGatewayStageCacheRefreshFunc(conn, + d.Get("rest_api_id").(string), + d.Get("stage_name").(string)), + Timeout: 30 * time.Minute, + } + + _, err := stateConf.WaitForState() + if err != nil { + return err + } + } + + d.SetPartial("cache_cluster_enabled") + d.SetPartial("cache_cluster_size") + d.Partial(false) + + return resourceAwsApiGatewayStageRead(d, meta) +} + +func diffVariablesOps(prefix string, oldVars, newVars map[string]interface{}) []*apigateway.PatchOperation { + ops := make([]*apigateway.PatchOperation, 0) + + for k, _ := range oldVars { + if _, ok := newVars[k]; !ok { + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("remove"), + Path: aws.String(prefix + k), + }) + } + } + + for k, v := range newVars { + newValue := v.(string) + + if oldV, ok := oldVars[k]; ok { + oldValue := oldV.(string) + if oldValue == newValue { + continue + } + } + ops = append(ops, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String(prefix + k), + Value: aws.String(newValue), + }) + } + + return ops +} + +func apiGatewayStageCacheRefreshFunc(conn *apigateway.APIGateway, apiId, stageName string) func() (interface{}, string, error) { + return func() (interface{}, string, error) { + input := apigateway.GetStageInput{ + RestApiId: aws.String(apiId), + StageName: aws.String(stageName), + } + out, err := conn.GetStage(&input) + if err != nil { + return 42, "", err + } + + return out, *out.CacheClusterStatus, nil + } +} + +func resourceAwsApiGatewayStageDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Deleting API Gateway Stage: %s", d.Id()) + input := apigateway.DeleteStageInput{ + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StageName: aws.String(d.Get("stage_name").(string)), + } + _, err := conn.DeleteStage(&input) + if err != nil { + return fmt.Errorf("Deleting API Gateway Stage failed: %s", err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_usage_plan.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_usage_plan.go new file mode 100644 index 000000000..0d4930d08 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_usage_plan.go @@ -0,0 +1,499 @@ +package aws + +import ( + "fmt" + "log" + "strconv" + "time" + + "errors" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsApiGatewayUsagePlan() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayUsagePlanCreate, + Read: resourceAwsApiGatewayUsagePlanRead, + Update: resourceAwsApiGatewayUsagePlanUpdate, + Delete: resourceAwsApiGatewayUsagePlanDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, // Required since not addable nor removable afterwards + }, + + "description": { + Type: schema.TypeString, + Optional: true, + }, + + "api_stages": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "api_id": { + Type: schema.TypeString, + Required: true, + }, + + "stage": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + + "quota_settings": { + Type: schema.TypeSet, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Required: true, // Required as not removable singularly + }, + + "offset": { + Type: schema.TypeInt, + Default: 0, + Optional: true, + }, + + "period": { + Type: schema.TypeString, + Required: true, // Required as not removable + ValidateFunc: validateApiGatewayUsagePlanQuotaSettingsPeriod, + }, + }, + }, + }, + + "throttle_settings": { + Type: schema.TypeSet, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "burst_limit": { + Type: schema.TypeInt, + Default: 0, + Optional: true, + }, + + "rate_limit": { + Type: schema.TypeInt, + Default: 0, + Optional: true, + }, + }, + }, + }, + + "product_code": { + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceAwsApiGatewayUsagePlanCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Print("[DEBUG] Creating API Gateway Usage Plan") + + params := &apigateway.CreateUsagePlanInput{ + Name: aws.String(d.Get("name").(string)), + } + + if v, ok := d.GetOk("description"); ok { + params.Description = aws.String(v.(string)) + } + + if s, ok := d.GetOk("api_stages"); ok { + stages := s.([]interface{}) + as := make([]*apigateway.ApiStage, 0) + + for _, v := range stages { + sv := v.(map[string]interface{}) + stage := &apigateway.ApiStage{} + + if v, ok := sv["api_id"].(string); ok && v != "" { + stage.ApiId = aws.String(v) + } + + if v, ok := sv["stage"].(string); ok && v != "" { + stage.Stage = aws.String(v) + } + + as = append(as, stage) + } + + if len(as) > 0 { + params.ApiStages = as + } + } + + if v, ok := d.GetOk("quota_settings"); ok { + settings := v.(*schema.Set).List() + q, ok := settings[0].(map[string]interface{}) + + if errors := validateApiGatewayUsagePlanQuotaSettings(q); len(errors) > 0 { + return fmt.Errorf("Error validating the quota settings: %v", errors) + } + + if !ok { + return errors.New("At least one field is expected inside quota_settings") + } + + qs := &apigateway.QuotaSettings{} + + if sv, ok := q["limit"].(int); ok { + qs.Limit = aws.Int64(int64(sv)) + } + + if sv, ok := q["offset"].(int); ok { + qs.Offset = aws.Int64(int64(sv)) + } + + if sv, ok := q["period"].(string); ok && sv != "" { + qs.Period = aws.String(sv) + } + + params.Quota = qs + } + + if v, ok := d.GetOk("throttle_settings"); ok { + settings := v.(*schema.Set).List() + q, ok := settings[0].(map[string]interface{}) + + if !ok { + return errors.New("At least one field is expected inside throttle_settings") + } + + ts := &apigateway.ThrottleSettings{} + + if sv, ok := q["burst_limit"].(int); ok { + ts.BurstLimit = aws.Int64(int64(sv)) + } + + if sv, ok := q["rate_limit"].(float64); ok { + ts.RateLimit = aws.Float64(float64(sv)) + } + + params.Throttle = ts + } + + up, err := conn.CreateUsagePlan(params) + if err != nil { + return fmt.Errorf("Error creating API Gateway Usage Plan: %s", err) + } + + d.SetId(*up.Id) + + // Handle case of adding the product code since not addable when + // creating the Usage Plan initially. + if v, ok := d.GetOk("product_code"); ok { + updateParameters := &apigateway.UpdateUsagePlanInput{ + UsagePlanId: aws.String(d.Id()), + PatchOperations: []*apigateway.PatchOperation{ + { + Op: aws.String("add"), + Path: aws.String("/productCode"), + Value: aws.String(v.(string)), + }, + }, + } + + up, err = conn.UpdateUsagePlan(updateParameters) + if err != nil { + return fmt.Errorf("Error creating the API Gateway Usage Plan product code: %s", err) + } + } + + return resourceAwsApiGatewayUsagePlanRead(d, meta) +} + +func resourceAwsApiGatewayUsagePlanRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Reading API Gateway Usage Plan: %s", d.Id()) + + up, err := conn.GetUsagePlan(&apigateway.GetUsagePlanInput{ + UsagePlanId: aws.String(d.Id()), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + d.SetId("") + return nil + } + return err + } + + d.Set("name", up.Name) + d.Set("description", up.Description) + d.Set("product_code", up.ProductCode) + + if up.ApiStages != nil { + if err := d.Set("api_stages", flattenApiGatewayUsageApiStages(up.ApiStages)); err != nil { + return fmt.Errorf("[DEBUG] Error setting api_stages error: %#v", err) + } + } + + if up.Throttle != nil { + if err := d.Set("throttle_settings", flattenApiGatewayUsagePlanThrottling(up.Throttle)); err != nil { + return fmt.Errorf("[DEBUG] Error setting throttle_settings error: %#v", err) + } + } + + if up.Quota != nil { + if err := d.Set("quota_settings", flattenApiGatewayUsagePlanQuota(up.Quota)); err != nil { + return fmt.Errorf("[DEBUG] Error setting quota_settings error: %#v", err) + } + } + + return nil +} + +func resourceAwsApiGatewayUsagePlanUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Print("[DEBUG] Updating API Gateway Usage Plan") + + operations := make([]*apigateway.PatchOperation, 0) + + if d.HasChange("name") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/name"), + Value: aws.String(d.Get("name").(string)), + }) + } + + if d.HasChange("description") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/description"), + Value: aws.String(d.Get("description").(string)), + }) + } + + if d.HasChange("product_code") { + v, ok := d.GetOk("product_code") + + if ok { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/productCode"), + Value: aws.String(v.(string)), + }) + } else { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("remove"), + Path: aws.String("/productCode"), + }) + } + } + + if d.HasChange("api_stages") { + o, n := d.GetChange("api_stages") + old := o.([]interface{}) + new := n.([]interface{}) + + // Remove every stages associated. Simpler to remove and add new ones, + // since there are no replacings. + for _, v := range old { + m := v.(map[string]interface{}) + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("remove"), + Path: aws.String("/apiStages"), + Value: aws.String(fmt.Sprintf("%s:%s", m["api_id"].(string), m["stage"].(string))), + }) + } + + // Handle additions + if len(new) > 0 { + for _, v := range new { + m := v.(map[string]interface{}) + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("add"), + Path: aws.String("/apiStages"), + Value: aws.String(fmt.Sprintf("%s:%s", m["api_id"].(string), m["stage"].(string))), + }) + } + } + } + + if d.HasChange("throttle_settings") { + o, n := d.GetChange("throttle_settings") + + os := o.(*schema.Set) + ns := n.(*schema.Set) + diff := ns.Difference(os).List() + + // Handle Removal + if len(diff) == 0 { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("remove"), + Path: aws.String("/throttle"), + }) + } + + if len(diff) > 0 { + d := diff[0].(map[string]interface{}) + + // Handle Replaces + if o != nil && n != nil { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/throttle/rateLimit"), + Value: aws.String(strconv.Itoa(d["rate_limit"].(int))), + }) + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/throttle/burstLimit"), + Value: aws.String(strconv.Itoa(d["burst_limit"].(int))), + }) + } + + // Handle Additions + if o == nil && n != nil { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("add"), + Path: aws.String("/throttle/rateLimit"), + Value: aws.String(strconv.Itoa(d["rate_limit"].(int))), + }) + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("add"), + Path: aws.String("/throttle/burstLimit"), + Value: aws.String(strconv.Itoa(d["burst_limit"].(int))), + }) + } + } + } + + if d.HasChange("quota_settings") { + o, n := d.GetChange("quota_settings") + + os := o.(*schema.Set) + ns := n.(*schema.Set) + diff := ns.Difference(os).List() + + // Handle Removal + if len(diff) == 0 { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("remove"), + Path: aws.String("/quota"), + }) + } + + if len(diff) > 0 { + d := diff[0].(map[string]interface{}) + + if errors := validateApiGatewayUsagePlanQuotaSettings(d); len(errors) > 0 { + return fmt.Errorf("Error validating the quota settings: %v", errors) + } + + // Handle Replaces + if o != nil && n != nil { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/quota/limit"), + Value: aws.String(strconv.Itoa(d["limit"].(int))), + }) + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/quota/offset"), + Value: aws.String(strconv.Itoa(d["offset"].(int))), + }) + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/quota/period"), + Value: aws.String(d["period"].(string)), + }) + } + + // Handle Additions + if o == nil && n != nil { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("add"), + Path: aws.String("/quota/limit"), + Value: aws.String(strconv.Itoa(d["limit"].(int))), + }) + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("add"), + Path: aws.String("/quota/offset"), + Value: aws.String(strconv.Itoa(d["offset"].(int))), + }) + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("add"), + Path: aws.String("/quota/period"), + Value: aws.String(d["period"].(string)), + }) + } + } + } + + params := &apigateway.UpdateUsagePlanInput{ + UsagePlanId: aws.String(d.Id()), + PatchOperations: operations, + } + + _, err := conn.UpdateUsagePlan(params) + if err != nil { + return fmt.Errorf("Error updating API Gateway Usage Plan: %s", err) + } + + return resourceAwsApiGatewayUsagePlanRead(d, meta) +} + +func resourceAwsApiGatewayUsagePlanDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + // Removing existing api stages associated + if apistages, ok := d.GetOk("api_stages"); ok { + log.Printf("[DEBUG] Deleting API Stages associated with Usage Plan: %s", d.Id()) + stages := apistages.([]interface{}) + operations := []*apigateway.PatchOperation{} + + for _, v := range stages { + sv := v.(map[string]interface{}) + + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("remove"), + Path: aws.String("/apiStages"), + Value: aws.String(fmt.Sprintf("%s:%s", sv["api_id"].(string), sv["stage"].(string))), + }) + } + + _, err := conn.UpdateUsagePlan(&apigateway.UpdateUsagePlanInput{ + UsagePlanId: aws.String(d.Id()), + PatchOperations: operations, + }) + if err != nil { + return fmt.Errorf("Error removing API Stages associated with Usage Plan: %s", err) + } + } + + log.Printf("[DEBUG] Deleting API Gateway Usage Plan: %s", d.Id()) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteUsagePlan(&apigateway.DeleteUsagePlanInput{ + UsagePlanId: aws.String(d.Id()), + }) + + if err == nil { + return nil + } + + return resource.NonRetryableError(err) + }) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_usage_plan_key.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_usage_plan_key.go new file mode 100644 index 000000000..2433da48b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_api_gateway_usage_plan_key.go @@ -0,0 +1,114 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsApiGatewayUsagePlanKey() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsApiGatewayUsagePlanKeyCreate, + Read: resourceAwsApiGatewayUsagePlanKeyRead, + Delete: resourceAwsApiGatewayUsagePlanKeyDelete, + + Schema: map[string]*schema.Schema{ + "key_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "key_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "usage_plan_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": { + Type: schema.TypeString, + Computed: true, + }, + + "value": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsApiGatewayUsagePlanKeyCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Print("[DEBUG] Creating API Gateway Usage Plan Key") + + params := &apigateway.CreateUsagePlanKeyInput{ + KeyId: aws.String(d.Get("key_id").(string)), + KeyType: aws.String(d.Get("key_type").(string)), + UsagePlanId: aws.String(d.Get("usage_plan_id").(string)), + } + + up, err := conn.CreateUsagePlanKey(params) + if err != nil { + return fmt.Errorf("Error creating API Gateway Usage Plan Key: %s", err) + } + + d.SetId(*up.Id) + + return resourceAwsApiGatewayUsagePlanKeyRead(d, meta) +} + +func resourceAwsApiGatewayUsagePlanKeyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + log.Printf("[DEBUG] Reading API Gateway Usage Plan Key: %s", d.Id()) + + up, err := conn.GetUsagePlanKey(&apigateway.GetUsagePlanKeyInput{ + UsagePlanId: aws.String(d.Get("usage_plan_id").(string)), + KeyId: aws.String(d.Get("key_id").(string)), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + d.SetId("") + return nil + } + return err + } + + d.Set("name", up.Name) + d.Set("value", up.Value) + + return nil +} + +func resourceAwsApiGatewayUsagePlanKeyDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigateway + + log.Printf("[DEBUG] Deleting API Gateway Usage Plan Key: %s", d.Id()) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteUsagePlanKey(&apigateway.DeleteUsagePlanKeyInput{ + UsagePlanId: aws.String(d.Get("usage_plan_id").(string)), + KeyId: aws.String(d.Get("key_id").(string)), + }) + if err == nil { + return nil + } + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + return nil + } + + return resource.NonRetryableError(err) + }) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_app_cookie_stickiness_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_app_cookie_stickiness_policy.go new file mode 100644 index 000000000..ecdc8eff4 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_app_cookie_stickiness_policy.go @@ -0,0 +1,215 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsAppCookieStickinessPolicy() *schema.Resource { + return &schema.Resource{ + // There is no concept of "updating" an App Stickiness policy in + // the AWS API. + Create: resourceAwsAppCookieStickinessPolicyCreate, + Read: resourceAwsAppCookieStickinessPolicyRead, + Delete: resourceAwsAppCookieStickinessPolicyDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) { + es = append(es, fmt.Errorf( + "only alphanumeric characters and hyphens allowed in %q", k)) + } + return + }, + }, + + "load_balancer": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "lb_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "cookie_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsAppCookieStickinessPolicyCreate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + // Provision the AppStickinessPolicy + acspOpts := &elb.CreateAppCookieStickinessPolicyInput{ + CookieName: aws.String(d.Get("cookie_name").(string)), + LoadBalancerName: aws.String(d.Get("load_balancer").(string)), + PolicyName: aws.String(d.Get("name").(string)), + } + + if _, err := elbconn.CreateAppCookieStickinessPolicy(acspOpts); err != nil { + return fmt.Errorf("Error creating AppCookieStickinessPolicy: %s", err) + } + + setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String(d.Get("load_balancer").(string)), + LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))), + PolicyNames: []*string{aws.String(d.Get("name").(string))}, + } + + if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setLoadBalancerOpts); err != nil { + return fmt.Errorf("Error setting AppCookieStickinessPolicy: %s", err) + } + + d.SetId(fmt.Sprintf("%s:%d:%s", + *acspOpts.LoadBalancerName, + *setLoadBalancerOpts.LoadBalancerPort, + *acspOpts.PolicyName)) + return nil +} + +func resourceAwsAppCookieStickinessPolicyRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + lbName, lbPort, policyName := resourceAwsAppCookieStickinessPolicyParseId(d.Id()) + + request := &elb.DescribeLoadBalancerPoliciesInput{ + LoadBalancerName: aws.String(lbName), + PolicyNames: []*string{aws.String(policyName)}, + } + + getResp, err := elbconn.DescribeLoadBalancerPolicies(request) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok { + if ec2err.Code() == "PolicyNotFound" || ec2err.Code() == "LoadBalancerNotFound" { + d.SetId("") + } + return nil + } + return fmt.Errorf("Error retrieving policy: %s", err) + } + if len(getResp.PolicyDescriptions) != 1 { + return fmt.Errorf("Unable to find policy %#v", getResp.PolicyDescriptions) + } + + // we know the policy exists now, but we have to check if it's assigned to a listener + assigned, err := resourceAwsELBSticknessPolicyAssigned(policyName, lbName, lbPort, elbconn) + if err != nil { + return err + } + if !assigned { + // policy exists, but isn't assigned to a listener + log.Printf("[DEBUG] policy '%s' exists, but isn't assigned to a listener", policyName) + d.SetId("") + return nil + } + + // We can get away with this because there's only one attribute, the + // cookie expiration, in these descriptions. + policyDesc := getResp.PolicyDescriptions[0] + cookieAttr := policyDesc.PolicyAttributeDescriptions[0] + if *cookieAttr.AttributeName != "CookieName" { + return fmt.Errorf("Unable to find cookie Name.") + } + d.Set("cookie_name", cookieAttr.AttributeValue) + + d.Set("name", policyName) + d.Set("load_balancer", lbName) + d.Set("lb_port", lbPort) + + return nil +} + +// Determine if a particular policy is assigned to an ELB listener +func resourceAwsELBSticknessPolicyAssigned(policyName, lbName, lbPort string, elbconn *elb.ELB) (bool, error) { + describeElbOpts := &elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{aws.String(lbName)}, + } + describeResp, err := elbconn.DescribeLoadBalancers(describeElbOpts) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok { + if ec2err.Code() == "LoadBalancerNotFound" { + return false, nil + } + } + return false, fmt.Errorf("Error retrieving ELB description: %s", err) + } + + if len(describeResp.LoadBalancerDescriptions) != 1 { + return false, fmt.Errorf("Unable to find ELB: %#v", describeResp.LoadBalancerDescriptions) + } + + lb := describeResp.LoadBalancerDescriptions[0] + assigned := false + for _, listener := range lb.ListenerDescriptions { + if lbPort != strconv.Itoa(int(*listener.Listener.LoadBalancerPort)) { + continue + } + + for _, name := range listener.PolicyNames { + if policyName == *name { + assigned = true + break + } + } + } + + return assigned, nil +} + +func resourceAwsAppCookieStickinessPolicyDelete(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + lbName, _, policyName := resourceAwsAppCookieStickinessPolicyParseId(d.Id()) + + // Perversely, if we Set an empty list of PolicyNames, we detach the + // policies attached to a listener, which is required to delete the + // policy itself. + setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String(d.Get("load_balancer").(string)), + LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))), + PolicyNames: []*string{}, + } + + if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setLoadBalancerOpts); err != nil { + return fmt.Errorf("Error removing AppCookieStickinessPolicy: %s", err) + } + + request := &elb.DeleteLoadBalancerPolicyInput{ + LoadBalancerName: aws.String(lbName), + PolicyName: aws.String(policyName), + } + + if _, err := elbconn.DeleteLoadBalancerPolicy(request); err != nil { + return fmt.Errorf("Error deleting App stickiness policy %s: %s", d.Id(), err) + } + return nil +} + +// resourceAwsAppCookieStickinessPolicyParseId takes an ID and parses it into +// it's constituent parts. You need three axes (LB name, policy name, and LB +// port) to create or identify a stickiness policy in AWS's API. +func resourceAwsAppCookieStickinessPolicyParseId(id string) (string, string, string) { + parts := strings.SplitN(id, ":", 3) + return parts[0], parts[1], parts[2] +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appautoscaling_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appautoscaling_policy.go new file mode 100644 index 000000000..e75e76152 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appautoscaling_policy.go @@ -0,0 +1,327 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/applicationautoscaling" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsAppautoscalingPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsAppautoscalingPolicyCreate, + Read: resourceAwsAppautoscalingPolicyRead, + Update: resourceAwsAppautoscalingPolicyUpdate, + Delete: resourceAwsAppautoscalingPolicyDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://github.com/boto/botocore/blob/9f322b1/botocore/data/autoscaling/2011-01-01/service-2.json#L1862-L1873 + value := v.(string) + if len(value) > 255 { + errors = append(errors, fmt.Errorf("%s cannot be longer than 255 characters", k)) + } + return + }, + }, + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "policy_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "StepScaling", + }, + "resource_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "scalable_dimension": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateAppautoscalingScalableDimension, + }, + "service_namespace": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateAppautoscalingServiceNamespace, + }, + "adjustment_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "cooldown": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + "metric_aggregation_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "min_adjustment_magnitude": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "alarms": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "step_adjustment": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metric_interval_lower_bound": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "metric_interval_upper_bound": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "scaling_adjustment": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: resourceAwsAppautoscalingAdjustmentHash, + }, + }, + } +} + +func resourceAwsAppautoscalingPolicyCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).appautoscalingconn + + params, err := getAwsAppautoscalingPutScalingPolicyInput(d) + if err != nil { + return err + } + + log.Printf("[DEBUG] ApplicationAutoScaling PutScalingPolicy: %#v", params) + resp, err := conn.PutScalingPolicy(¶ms) + if err != nil { + return fmt.Errorf("Error putting scaling policy: %s", err) + } + + d.Set("arn", resp.PolicyARN) + d.SetId(d.Get("name").(string)) + log.Printf("[INFO] ApplicationAutoScaling scaling PolicyARN: %s", d.Get("arn").(string)) + + return resourceAwsAppautoscalingPolicyRead(d, meta) +} + +func resourceAwsAppautoscalingPolicyRead(d *schema.ResourceData, meta interface{}) error { + p, err := getAwsAppautoscalingPolicy(d, meta) + if err != nil { + return err + } + if p == nil { + d.SetId("") + return nil + } + + log.Printf("[DEBUG] Read ApplicationAutoScaling policy: %s, SP: %s, Obj: %s", d.Get("name"), d.Get("name"), p) + + d.Set("arn", p.PolicyARN) + d.Set("name", p.PolicyName) + d.Set("policy_type", p.PolicyType) + d.Set("resource_id", p.ResourceId) + d.Set("scalable_dimension", p.ScalableDimension) + d.Set("service_namespace", p.ServiceNamespace) + d.Set("alarms", p.Alarms) + d.Set("step_scaling_policy_configuration", p.StepScalingPolicyConfiguration) + + return nil +} + +func resourceAwsAppautoscalingPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).appautoscalingconn + + params, inputErr := getAwsAppautoscalingPutScalingPolicyInput(d) + if inputErr != nil { + return inputErr + } + + log.Printf("[DEBUG] Application Autoscaling Update Scaling Policy: %#v", params) + _, err := conn.PutScalingPolicy(¶ms) + if err != nil { + return err + } + + return resourceAwsAppautoscalingPolicyRead(d, meta) +} + +func resourceAwsAppautoscalingPolicyDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).appautoscalingconn + p, err := getAwsAppautoscalingPolicy(d, meta) + if err != nil { + return fmt.Errorf("Error getting policy: %s", err) + } + if p == nil { + return nil + } + + params := applicationautoscaling.DeleteScalingPolicyInput{ + PolicyName: aws.String(d.Get("name").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + ScalableDimension: aws.String(d.Get("scalable_dimension").(string)), + ServiceNamespace: aws.String(d.Get("service_namespace").(string)), + } + log.Printf("[DEBUG] Deleting Application AutoScaling Policy opts: %#v", params) + if _, err := conn.DeleteScalingPolicy(¶ms); err != nil { + return fmt.Errorf("Application AutoScaling Policy: %s", err) + } + + d.SetId("") + return nil +} + +// Takes the result of flatmap.Expand for an array of step adjustments and +// returns a []*applicationautoscaling.StepAdjustment. +func expandAppautoscalingStepAdjustments(configured []interface{}) ([]*applicationautoscaling.StepAdjustment, error) { + var adjustments []*applicationautoscaling.StepAdjustment + + // Loop over our configured step adjustments and create an array + // of aws-sdk-go compatible objects. We're forced to convert strings + // to floats here because there's no way to detect whether or not + // an uninitialized, optional schema element is "0.0" deliberately. + // With strings, we can test for "", which is definitely an empty + // struct value. + for _, raw := range configured { + data := raw.(map[string]interface{}) + a := &applicationautoscaling.StepAdjustment{ + ScalingAdjustment: aws.Int64(int64(data["scaling_adjustment"].(int))), + } + if data["metric_interval_lower_bound"] != "" { + bound := data["metric_interval_lower_bound"] + switch bound := bound.(type) { + case string: + f, err := strconv.ParseFloat(bound, 64) + if err != nil { + return nil, fmt.Errorf( + "metric_interval_lower_bound must be a float value represented as a string") + } + a.MetricIntervalLowerBound = aws.Float64(f) + default: + return nil, fmt.Errorf( + "metric_interval_lower_bound isn't a string. This is a bug. Please file an issue.") + } + } + if data["metric_interval_upper_bound"] != "" { + bound := data["metric_interval_upper_bound"] + switch bound := bound.(type) { + case string: + f, err := strconv.ParseFloat(bound, 64) + if err != nil { + return nil, fmt.Errorf( + "metric_interval_upper_bound must be a float value represented as a string") + } + a.MetricIntervalUpperBound = aws.Float64(f) + default: + return nil, fmt.Errorf( + "metric_interval_upper_bound isn't a string. This is a bug. Please file an issue.") + } + } + adjustments = append(adjustments, a) + } + + return adjustments, nil +} + +func getAwsAppautoscalingPutScalingPolicyInput(d *schema.ResourceData) (applicationautoscaling.PutScalingPolicyInput, error) { + var params = applicationautoscaling.PutScalingPolicyInput{ + PolicyName: aws.String(d.Get("name").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + } + + if v, ok := d.GetOk("policy_type"); ok { + params.PolicyType = aws.String(v.(string)) + } + + if v, ok := d.GetOk("service_namespace"); ok { + params.ServiceNamespace = aws.String(v.(string)) + } + + if v, ok := d.GetOk("scalable_dimension"); ok { + params.ScalableDimension = aws.String(v.(string)) + } + + var adjustmentSteps []*applicationautoscaling.StepAdjustment + if v, ok := d.GetOk("step_adjustment"); ok { + steps, err := expandAppautoscalingStepAdjustments(v.(*schema.Set).List()) + if err != nil { + return params, fmt.Errorf("metric_interval_lower_bound and metric_interval_upper_bound must be strings!") + } + adjustmentSteps = steps + } + + // build StepScalingPolicyConfiguration + params.StepScalingPolicyConfiguration = &applicationautoscaling.StepScalingPolicyConfiguration{ + AdjustmentType: aws.String(d.Get("adjustment_type").(string)), + Cooldown: aws.Int64(int64(d.Get("cooldown").(int))), + MetricAggregationType: aws.String(d.Get("metric_aggregation_type").(string)), + StepAdjustments: adjustmentSteps, + } + + if v, ok := d.GetOk("min_adjustment_magnitude"); ok { + params.StepScalingPolicyConfiguration.MinAdjustmentMagnitude = aws.Int64(int64(v.(int))) + } + + return params, nil +} + +func getAwsAppautoscalingPolicy(d *schema.ResourceData, meta interface{}) (*applicationautoscaling.ScalingPolicy, error) { + conn := meta.(*AWSClient).appautoscalingconn + + params := applicationautoscaling.DescribeScalingPoliciesInput{ + PolicyNames: []*string{aws.String(d.Get("name").(string))}, + ServiceNamespace: aws.String(d.Get("service_namespace").(string)), + } + + log.Printf("[DEBUG] Application AutoScaling Policy Describe Params: %#v", params) + resp, err := conn.DescribeScalingPolicies(¶ms) + if err != nil { + return nil, fmt.Errorf("Error retrieving scaling policies: %s", err) + } + + // find scaling policy + name := d.Get("name") + for idx, sp := range resp.ScalingPolicies { + if *sp.PolicyName == name { + return resp.ScalingPolicies[idx], nil + } + } + + // policy not found + return nil, nil +} + +func resourceAwsAppautoscalingAdjustmentHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + if v, ok := m["metric_interval_lower_bound"]; ok { + buf.WriteString(fmt.Sprintf("%f-", v)) + } + if v, ok := m["metric_interval_upper_bound"]; ok { + buf.WriteString(fmt.Sprintf("%f-", v)) + } + buf.WriteString(fmt.Sprintf("%d-", m["scaling_adjustment"].(int))) + + return hashcode.String(buf.String()) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appautoscaling_target.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appautoscaling_target.go new file mode 100644 index 000000000..2490f4d2d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_appautoscaling_target.go @@ -0,0 +1,190 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/applicationautoscaling" +) + +func resourceAwsAppautoscalingTarget() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsAppautoscalingTargetCreate, + Read: resourceAwsAppautoscalingTargetRead, + Delete: resourceAwsAppautoscalingTargetDelete, + + Schema: map[string]*schema.Schema{ + "max_capacity": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "min_capacity": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "resource_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "role_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "scalable_dimension": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateAppautoscalingScalableDimension, + }, + "service_namespace": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateAppautoscalingServiceNamespace, + }, + }, + } +} + +func resourceAwsAppautoscalingTargetCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).appautoscalingconn + + var targetOpts applicationautoscaling.RegisterScalableTargetInput + + targetOpts.MaxCapacity = aws.Int64(int64(d.Get("max_capacity").(int))) + targetOpts.MinCapacity = aws.Int64(int64(d.Get("min_capacity").(int))) + targetOpts.ResourceId = aws.String(d.Get("resource_id").(string)) + targetOpts.RoleARN = aws.String(d.Get("role_arn").(string)) + targetOpts.ScalableDimension = aws.String(d.Get("scalable_dimension").(string)) + targetOpts.ServiceNamespace = aws.String(d.Get("service_namespace").(string)) + + log.Printf("[DEBUG] Application autoscaling target create configuration %#v", targetOpts) + var err error + err = resource.Retry(1*time.Minute, func() *resource.RetryError { + _, err = conn.RegisterScalableTarget(&targetOpts) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ValidationException" { + log.Printf("[DEBUG] Retrying creation of Application Autoscaling Scalable Target due to possible issues with IAM: %s", awsErr) + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + + return nil + }) + if err != nil { + return fmt.Errorf("Error creating application autoscaling target: %s", err) + } + + d.SetId(d.Get("resource_id").(string)) + log.Printf("[INFO] Application AutoScaling Target ID: %s", d.Id()) + + return resourceAwsAppautoscalingTargetRead(d, meta) +} + +func resourceAwsAppautoscalingTargetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).appautoscalingconn + + t, err := getAwsAppautoscalingTarget(d, conn) + if err != nil { + return err + } + if t == nil { + log.Printf("[INFO] Application AutoScaling Target %q not found", d.Id()) + d.SetId("") + return nil + } + + d.Set("max_capacity", t.MaxCapacity) + d.Set("min_capacity", t.MinCapacity) + d.Set("resource_id", t.ResourceId) + d.Set("role_arn", t.RoleARN) + d.Set("scalable_dimension", t.ScalableDimension) + d.Set("service_namespace", t.ServiceNamespace) + + return nil +} + +func resourceAwsAppautoscalingTargetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).appautoscalingconn + + t, err := getAwsAppautoscalingTarget(d, conn) + if err != nil { + return err + } + if t == nil { + log.Printf("[INFO] Application AutoScaling Target %q not found", d.Id()) + d.SetId("") + return nil + } + + log.Printf("[DEBUG] Application AutoScaling Target destroy: %#v", d.Id()) + deleteOpts := applicationautoscaling.DeregisterScalableTargetInput{ + ResourceId: aws.String(d.Get("resource_id").(string)), + ServiceNamespace: aws.String(d.Get("service_namespace").(string)), + ScalableDimension: aws.String(d.Get("scalable_dimension").(string)), + } + + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + if _, err := conn.DeregisterScalableTarget(&deleteOpts); err != nil { + if awserr, ok := err.(awserr.Error); ok { + // @TODO: We should do stuff here depending on the actual error returned + return resource.RetryableError(awserr) + } + // Non recognized error, no retry. + return resource.NonRetryableError(err) + } + // Successful delete + return nil + }) + if err != nil { + return err + } + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + if t, _ = getAwsAppautoscalingTarget(d, conn); t != nil { + return resource.RetryableError( + fmt.Errorf("Application AutoScaling Target still exists")) + } + return nil + }) +} + +func getAwsAppautoscalingTarget( + d *schema.ResourceData, + conn *applicationautoscaling.ApplicationAutoScaling) (*applicationautoscaling.ScalableTarget, error) { + + tgtName := d.Id() + describeOpts := applicationautoscaling.DescribeScalableTargetsInput{ + ResourceIds: []*string{aws.String(tgtName)}, + ServiceNamespace: aws.String(d.Get("service_namespace").(string)), + } + + log.Printf("[DEBUG] Application AutoScaling Target describe configuration: %#v", describeOpts) + describeTargets, err := conn.DescribeScalableTargets(&describeOpts) + if err != nil { + // @TODO: We should probably send something else back if we're trying to access an unknown Resource ID + // targetserr, ok := err.(awserr.Error) + // if ok && targetserr.Code() == "" + return nil, fmt.Errorf("Error retrieving Application AutoScaling Target: %s", err) + } + + for idx, tgt := range describeTargets.ScalableTargets { + if *tgt.ResourceId == tgtName { + return describeTargets.ScalableTargets[idx], nil + } + } + + return nil, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_attachment.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_attachment.go new file mode 100644 index 000000000..c04b9d782 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_attachment.go @@ -0,0 +1,157 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsAutoscalingAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsAutoscalingAttachmentCreate, + Read: resourceAwsAutoscalingAttachmentRead, + Delete: resourceAwsAutoscalingAttachmentDelete, + + Schema: map[string]*schema.Schema{ + "autoscaling_group_name": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + + "elb": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + + "alb_target_group_arn": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + }, + } +} + +func resourceAwsAutoscalingAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + asgconn := meta.(*AWSClient).autoscalingconn + asgName := d.Get("autoscaling_group_name").(string) + + if v, ok := d.GetOk("elb"); ok { + attachOpts := &autoscaling.AttachLoadBalancersInput{ + AutoScalingGroupName: aws.String(asgName), + LoadBalancerNames: []*string{aws.String(v.(string))}, + } + + log.Printf("[INFO] registering asg %s with ELBs %s", asgName, v.(string)) + + if _, err := asgconn.AttachLoadBalancers(attachOpts); err != nil { + return errwrap.Wrapf(fmt.Sprintf("Failure attaching AutoScaling Group %s with Elastic Load Balancer: %s: {{err}}", asgName, v.(string)), err) + } + } + + if v, ok := d.GetOk("alb_target_group_arn"); ok { + attachOpts := &autoscaling.AttachLoadBalancerTargetGroupsInput{ + AutoScalingGroupName: aws.String(asgName), + TargetGroupARNs: []*string{aws.String(v.(string))}, + } + + log.Printf("[INFO] registering asg %s with ALB Target Group %s", asgName, v.(string)) + + if _, err := asgconn.AttachLoadBalancerTargetGroups(attachOpts); err != nil { + return errwrap.Wrapf(fmt.Sprintf("Failure attaching AutoScaling Group %s with ALB Target Group: %s: {{err}}", asgName, v.(string)), err) + } + } + + d.SetId(resource.PrefixedUniqueId(fmt.Sprintf("%s-", asgName))) + + return resourceAwsAutoscalingAttachmentRead(d, meta) +} + +func resourceAwsAutoscalingAttachmentRead(d *schema.ResourceData, meta interface{}) error { + asgconn := meta.(*AWSClient).autoscalingconn + asgName := d.Get("autoscaling_group_name").(string) + + // Retrieve the ASG properites to get list of associated ELBs + asg, err := getAwsAutoscalingGroup(asgName, asgconn) + + if err != nil { + return err + } + if asg == nil { + log.Printf("[INFO] Autoscaling Group %q not found", asgName) + d.SetId("") + return nil + } + + if v, ok := d.GetOk("elb"); ok { + found := false + for _, i := range asg.LoadBalancerNames { + if v.(string) == *i { + d.Set("elb", v.(string)) + found = true + break + } + } + + if !found { + log.Printf("[WARN] Association for %s was not found in ASG assocation", v.(string)) + d.SetId("") + } + } + + if v, ok := d.GetOk("alb_target_group_arn"); ok { + found := false + for _, i := range asg.TargetGroupARNs { + if v.(string) == *i { + d.Set("alb_target_group_arn", v.(string)) + found = true + break + } + } + + if !found { + log.Printf("[WARN] Association for %s was not found in ASG assocation", v.(string)) + d.SetId("") + } + } + + return nil +} + +func resourceAwsAutoscalingAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + asgconn := meta.(*AWSClient).autoscalingconn + asgName := d.Get("autoscaling_group_name").(string) + + if v, ok := d.GetOk("elb"); ok { + detachOpts := &autoscaling.DetachLoadBalancersInput{ + AutoScalingGroupName: aws.String(asgName), + LoadBalancerNames: []*string{aws.String(v.(string))}, + } + + log.Printf("[INFO] Deleting ELB %s association from: %s", v.(string), asgName) + if _, err := asgconn.DetachLoadBalancers(detachOpts); err != nil { + return errwrap.Wrapf(fmt.Sprintf("Failure detaching AutoScaling Group %s with Elastic Load Balancer: %s: {{err}}", asgName, v.(string)), err) + } + } + + if v, ok := d.GetOk("alb_target_group_arn"); ok { + detachOpts := &autoscaling.DetachLoadBalancerTargetGroupsInput{ + AutoScalingGroupName: aws.String(asgName), + TargetGroupARNs: []*string{aws.String(v.(string))}, + } + + log.Printf("[INFO] Deleting ALB Target Group %s association from: %s", v.(string), asgName) + if _, err := asgconn.DetachLoadBalancerTargetGroups(detachOpts); err != nil { + return errwrap.Wrapf(fmt.Sprintf("Failure detaching AutoScaling Group %s with ALB Target Group: %s: {{err}}", asgName, v.(string)), err) + } + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_group.go new file mode 100644 index 000000000..be23aa02f --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_group.go @@ -0,0 +1,1035 @@ +package aws + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go/service/elbv2" +) + +func resourceAwsAutoscalingGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsAutoscalingGroupCreate, + Read: resourceAwsAutoscalingGroupRead, + Update: resourceAwsAutoscalingGroupUpdate, + Delete: resourceAwsAutoscalingGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://github.com/boto/botocore/blob/9f322b1/botocore/data/autoscaling/2011-01-01/service-2.json#L1862-L1873 + value := v.(string) + if len(value) > 255 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 255 characters", k)) + } + return + }, + }, + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 229 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 229 characters, name is limited to 255", k)) + } + return + }, + }, + + "launch_configuration": { + Type: schema.TypeString, + Required: true, + }, + + "desired_capacity": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "min_elb_capacity": { + Type: schema.TypeInt, + Optional: true, + }, + + "min_size": { + Type: schema.TypeInt, + Required: true, + }, + + "max_size": { + Type: schema.TypeInt, + Required: true, + }, + + "default_cooldown": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "force_delete": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "health_check_grace_period": { + Type: schema.TypeInt, + Optional: true, + Default: 300, + }, + + "health_check_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "availability_zones": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "placement_group": { + Type: schema.TypeString, + Optional: true, + }, + + "load_balancers": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "vpc_zone_identifier": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "termination_policies": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "wait_for_capacity_timeout": { + Type: schema.TypeString, + Optional: true, + Default: "10m", + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + duration, err := time.ParseDuration(value) + if err != nil { + errors = append(errors, fmt.Errorf( + "%q cannot be parsed as a duration: %s", k, err)) + } + if duration < 0 { + errors = append(errors, fmt.Errorf( + "%q must be greater than zero", k)) + } + return + }, + }, + + "wait_for_elb_capacity": { + Type: schema.TypeInt, + Optional: true, + }, + + "enabled_metrics": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "suspended_processes": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "metrics_granularity": { + Type: schema.TypeString, + Optional: true, + Default: "1Minute", + }, + + "protect_from_scale_in": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "target_group_arns": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "initial_lifecycle_hook": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "default_result": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "heartbeat_timeout": { + Type: schema.TypeInt, + Optional: true, + }, + "lifecycle_transition": { + Type: schema.TypeString, + Required: true, + }, + "notification_metadata": { + Type: schema.TypeString, + Optional: true, + }, + "notification_target_arn": { + Type: schema.TypeString, + Optional: true, + }, + "role_arn": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "tag": autoscalingTagSchema(), + + "tags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeMap}, + ConflictsWith: []string{"tag"}, + }, + }, + } +} + +func generatePutLifecycleHookInputs(asgName string, cfgs []interface{}) []autoscaling.PutLifecycleHookInput { + res := make([]autoscaling.PutLifecycleHookInput, 0, len(cfgs)) + + for _, raw := range cfgs { + cfg := raw.(map[string]interface{}) + + input := autoscaling.PutLifecycleHookInput{ + AutoScalingGroupName: &asgName, + LifecycleHookName: aws.String(cfg["name"].(string)), + } + + if v, ok := cfg["default_result"]; ok && v.(string) != "" { + input.DefaultResult = aws.String(v.(string)) + } + + if v, ok := cfg["heartbeat_timeout"]; ok && v.(int) > 0 { + input.HeartbeatTimeout = aws.Int64(int64(v.(int))) + } + + if v, ok := cfg["lifecycle_transition"]; ok && v.(string) != "" { + input.LifecycleTransition = aws.String(v.(string)) + } + + if v, ok := cfg["notification_metadata"]; ok && v.(string) != "" { + input.NotificationMetadata = aws.String(v.(string)) + } + + if v, ok := cfg["notification_target_arn"]; ok && v.(string) != "" { + input.NotificationTargetARN = aws.String(v.(string)) + } + + if v, ok := cfg["role_arn"]; ok && v.(string) != "" { + input.RoleARN = aws.String(v.(string)) + } + + res = append(res, input) + } + + return res +} + +func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).autoscalingconn + + var asgName string + if v, ok := d.GetOk("name"); ok { + asgName = v.(string) + } else { + if v, ok := d.GetOk("name_prefix"); ok { + asgName = resource.PrefixedUniqueId(v.(string)) + } else { + asgName = resource.PrefixedUniqueId("tf-asg-") + } + d.Set("name", asgName) + } + + createOpts := autoscaling.CreateAutoScalingGroupInput{ + AutoScalingGroupName: aws.String(asgName), + LaunchConfigurationName: aws.String(d.Get("launch_configuration").(string)), + NewInstancesProtectedFromScaleIn: aws.Bool(d.Get("protect_from_scale_in").(bool)), + } + updateOpts := autoscaling.UpdateAutoScalingGroupInput{ + AutoScalingGroupName: aws.String(asgName), + } + + initialLifecycleHooks := d.Get("initial_lifecycle_hook").(*schema.Set).List() + twoPhases := len(initialLifecycleHooks) > 0 + + minSize := aws.Int64(int64(d.Get("min_size").(int))) + maxSize := aws.Int64(int64(d.Get("max_size").(int))) + + if twoPhases { + createOpts.MinSize = aws.Int64(int64(0)) + createOpts.MaxSize = aws.Int64(int64(0)) + + updateOpts.MinSize = minSize + updateOpts.MaxSize = maxSize + + if v, ok := d.GetOk("desired_capacity"); ok { + updateOpts.DesiredCapacity = aws.Int64(int64(v.(int))) + } + } else { + createOpts.MinSize = minSize + createOpts.MaxSize = maxSize + + if v, ok := d.GetOk("desired_capacity"); ok { + createOpts.DesiredCapacity = aws.Int64(int64(v.(int))) + } + } + + // Availability Zones are optional if VPC Zone Identifer(s) are specified + if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 { + createOpts.AvailabilityZones = expandStringList(v.(*schema.Set).List()) + } + + resourceID := d.Get("name").(string) + if v, ok := d.GetOk("tag"); ok { + var err error + createOpts.Tags, err = autoscalingTagsFromMap( + setToMapByKey(v.(*schema.Set), "key"), resourceID) + if err != nil { + return err + } + } + + if v, ok := d.GetOk("tags"); ok { + tags, err := autoscalingTagsFromList(v.([]interface{}), resourceID) + if err != nil { + return err + } + + createOpts.Tags = append(createOpts.Tags, tags...) + } + + if v, ok := d.GetOk("default_cooldown"); ok { + createOpts.DefaultCooldown = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("health_check_type"); ok && v.(string) != "" { + createOpts.HealthCheckType = aws.String(v.(string)) + } + + if v, ok := d.GetOk("health_check_grace_period"); ok { + createOpts.HealthCheckGracePeriod = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("placement_group"); ok { + createOpts.PlacementGroup = aws.String(v.(string)) + } + + if v, ok := d.GetOk("load_balancers"); ok && v.(*schema.Set).Len() > 0 { + createOpts.LoadBalancerNames = expandStringList( + v.(*schema.Set).List()) + } + + if v, ok := d.GetOk("vpc_zone_identifier"); ok && v.(*schema.Set).Len() > 0 { + createOpts.VPCZoneIdentifier = expandVpcZoneIdentifiers(v.(*schema.Set).List()) + } + + if v, ok := d.GetOk("termination_policies"); ok && len(v.([]interface{})) > 0 { + createOpts.TerminationPolicies = expandStringList(v.([]interface{})) + } + + if v, ok := d.GetOk("target_group_arns"); ok && len(v.(*schema.Set).List()) > 0 { + createOpts.TargetGroupARNs = expandStringList(v.(*schema.Set).List()) + } + + log.Printf("[DEBUG] AutoScaling Group create configuration: %#v", createOpts) + _, err := conn.CreateAutoScalingGroup(&createOpts) + if err != nil { + return fmt.Errorf("Error creating AutoScaling Group: %s", err) + } + + d.SetId(d.Get("name").(string)) + log.Printf("[INFO] AutoScaling Group ID: %s", d.Id()) + + if twoPhases { + for _, hook := range generatePutLifecycleHookInputs(asgName, initialLifecycleHooks) { + if err = resourceAwsAutoscalingLifecycleHookPutOp(conn, &hook); err != nil { + return fmt.Errorf("Error creating initial lifecycle hooks: %s", err) + } + } + + _, err = conn.UpdateAutoScalingGroup(&updateOpts) + if err != nil { + return fmt.Errorf("Error setting AutoScaling Group initial capacity: %s", err) + } + } + + if err := waitForASGCapacity(d, meta, capacitySatisfiedCreate); err != nil { + return err + } + + if _, ok := d.GetOk("suspended_processes"); ok { + suspendedProcessesErr := enableASGSuspendedProcesses(d, conn) + if suspendedProcessesErr != nil { + return suspendedProcessesErr + } + } + + if _, ok := d.GetOk("enabled_metrics"); ok { + metricsErr := enableASGMetricsCollection(d, conn) + if metricsErr != nil { + return metricsErr + } + } + + return resourceAwsAutoscalingGroupRead(d, meta) +} + +func resourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).autoscalingconn + + g, err := getAwsAutoscalingGroup(d.Id(), conn) + if err != nil { + return err + } + if g == nil { + log.Printf("[INFO] Autoscaling Group %q not found", d.Id()) + d.SetId("") + return nil + } + + d.Set("availability_zones", flattenStringList(g.AvailabilityZones)) + d.Set("default_cooldown", g.DefaultCooldown) + d.Set("arn", g.AutoScalingGroupARN) + d.Set("desired_capacity", g.DesiredCapacity) + d.Set("health_check_grace_period", g.HealthCheckGracePeriod) + d.Set("health_check_type", g.HealthCheckType) + d.Set("launch_configuration", g.LaunchConfigurationName) + d.Set("load_balancers", flattenStringList(g.LoadBalancerNames)) + + if err := d.Set("suspended_processes", flattenAsgSuspendedProcesses(g.SuspendedProcesses)); err != nil { + log.Printf("[WARN] Error setting suspended_processes for %q: %s", d.Id(), err) + } + if err := d.Set("target_group_arns", flattenStringList(g.TargetGroupARNs)); err != nil { + log.Printf("[ERR] Error setting target groups: %s", err) + } + d.Set("min_size", g.MinSize) + d.Set("max_size", g.MaxSize) + d.Set("placement_group", g.PlacementGroup) + d.Set("name", g.AutoScalingGroupName) + + var tagList, tagsList []*autoscaling.TagDescription + var tagOk, tagsOk bool + var v interface{} + + if v, tagOk = d.GetOk("tag"); tagOk { + tags := setToMapByKey(v.(*schema.Set), "key") + for _, t := range g.Tags { + if _, ok := tags[*t.Key]; ok { + tagList = append(tagList, t) + } + } + d.Set("tag", autoscalingTagDescriptionsToSlice(tagList)) + } + + if v, tagsOk = d.GetOk("tags"); tagsOk { + tags := map[string]struct{}{} + for _, tag := range v.([]interface{}) { + attr, ok := tag.(map[string]interface{}) + if !ok { + continue + } + + key, ok := attr["key"].(string) + if !ok { + continue + } + + tags[key] = struct{}{} + } + + for _, t := range g.Tags { + if _, ok := tags[*t.Key]; ok { + tagsList = append(tagsList, t) + } + } + d.Set("tags", autoscalingTagDescriptionsToSlice(tagsList)) + } + + if !tagOk && !tagsOk { + d.Set("tag", autoscalingTagDescriptionsToSlice(g.Tags)) + } + + d.Set("vpc_zone_identifier", strings.Split(*g.VPCZoneIdentifier, ",")) + d.Set("protect_from_scale_in", g.NewInstancesProtectedFromScaleIn) + + // If no termination polices are explicitly configured and the upstream state + // is only using the "Default" policy, clear the state to make it consistent + // with the default AWS create API behavior. + _, ok := d.GetOk("termination_policies") + if !ok && len(g.TerminationPolicies) == 1 && *g.TerminationPolicies[0] == "Default" { + d.Set("termination_policies", []interface{}{}) + } else { + d.Set("termination_policies", flattenStringList(g.TerminationPolicies)) + } + + if g.EnabledMetrics != nil { + if err := d.Set("enabled_metrics", flattenAsgEnabledMetrics(g.EnabledMetrics)); err != nil { + log.Printf("[WARN] Error setting metrics for (%s): %s", d.Id(), err) + } + d.Set("metrics_granularity", g.EnabledMetrics[0].Granularity) + } + + return nil +} + +func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).autoscalingconn + shouldWaitForCapacity := false + + opts := autoscaling.UpdateAutoScalingGroupInput{ + AutoScalingGroupName: aws.String(d.Id()), + } + + opts.NewInstancesProtectedFromScaleIn = aws.Bool(d.Get("protect_from_scale_in").(bool)) + + if d.HasChange("default_cooldown") { + opts.DefaultCooldown = aws.Int64(int64(d.Get("default_cooldown").(int))) + } + + if d.HasChange("desired_capacity") { + opts.DesiredCapacity = aws.Int64(int64(d.Get("desired_capacity").(int))) + shouldWaitForCapacity = true + } + + if d.HasChange("launch_configuration") { + opts.LaunchConfigurationName = aws.String(d.Get("launch_configuration").(string)) + } + + if d.HasChange("min_size") { + opts.MinSize = aws.Int64(int64(d.Get("min_size").(int))) + shouldWaitForCapacity = true + } + + if d.HasChange("max_size") { + opts.MaxSize = aws.Int64(int64(d.Get("max_size").(int))) + } + + if d.HasChange("health_check_grace_period") { + opts.HealthCheckGracePeriod = aws.Int64(int64(d.Get("health_check_grace_period").(int))) + } + + if d.HasChange("health_check_type") { + opts.HealthCheckGracePeriod = aws.Int64(int64(d.Get("health_check_grace_period").(int))) + opts.HealthCheckType = aws.String(d.Get("health_check_type").(string)) + } + + if d.HasChange("vpc_zone_identifier") { + opts.VPCZoneIdentifier = expandVpcZoneIdentifiers(d.Get("vpc_zone_identifier").(*schema.Set).List()) + } + + if d.HasChange("availability_zones") { + if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 { + opts.AvailabilityZones = expandStringList(v.(*schema.Set).List()) + } + } + + if d.HasChange("placement_group") { + opts.PlacementGroup = aws.String(d.Get("placement_group").(string)) + } + + if d.HasChange("termination_policies") { + // If the termination policy is set to null, we need to explicitly set + // it back to "Default", or the API won't reset it for us. + if v, ok := d.GetOk("termination_policies"); ok && len(v.([]interface{})) > 0 { + opts.TerminationPolicies = expandStringList(v.([]interface{})) + } else { + log.Printf("[DEBUG] Explicitly setting null termination policy to 'Default'") + opts.TerminationPolicies = aws.StringSlice([]string{"Default"}) + } + } + + if err := setAutoscalingTags(conn, d); err != nil { + return err + } + + if d.HasChange("tag") { + d.SetPartial("tag") + } + + if d.HasChange("tags") { + d.SetPartial("tags") + } + + log.Printf("[DEBUG] AutoScaling Group update configuration: %#v", opts) + _, err := conn.UpdateAutoScalingGroup(&opts) + if err != nil { + d.Partial(true) + return fmt.Errorf("Error updating Autoscaling group: %s", err) + } + + if d.HasChange("load_balancers") { + + o, n := d.GetChange("load_balancers") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove := expandStringList(os.Difference(ns).List()) + add := expandStringList(ns.Difference(os).List()) + + if len(remove) > 0 { + _, err := conn.DetachLoadBalancers(&autoscaling.DetachLoadBalancersInput{ + AutoScalingGroupName: aws.String(d.Id()), + LoadBalancerNames: remove, + }) + if err != nil { + return fmt.Errorf("[WARN] Error updating Load Balancers for AutoScaling Group (%s), error: %s", d.Id(), err) + } + } + + if len(add) > 0 { + _, err := conn.AttachLoadBalancers(&autoscaling.AttachLoadBalancersInput{ + AutoScalingGroupName: aws.String(d.Id()), + LoadBalancerNames: add, + }) + if err != nil { + return fmt.Errorf("[WARN] Error updating Load Balancers for AutoScaling Group (%s), error: %s", d.Id(), err) + } + } + } + + if d.HasChange("target_group_arns") { + + o, n := d.GetChange("target_group_arns") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove := expandStringList(os.Difference(ns).List()) + add := expandStringList(ns.Difference(os).List()) + + if len(remove) > 0 { + _, err := conn.DetachLoadBalancerTargetGroups(&autoscaling.DetachLoadBalancerTargetGroupsInput{ + AutoScalingGroupName: aws.String(d.Id()), + TargetGroupARNs: remove, + }) + if err != nil { + return fmt.Errorf("[WARN] Error updating Load Balancers Target Groups for AutoScaling Group (%s), error: %s", d.Id(), err) + } + } + + if len(add) > 0 { + _, err := conn.AttachLoadBalancerTargetGroups(&autoscaling.AttachLoadBalancerTargetGroupsInput{ + AutoScalingGroupName: aws.String(d.Id()), + TargetGroupARNs: add, + }) + if err != nil { + return fmt.Errorf("[WARN] Error updating Load Balancers Target Groups for AutoScaling Group (%s), error: %s", d.Id(), err) + } + } + } + + if shouldWaitForCapacity { + if err := waitForASGCapacity(d, meta, capacitySatisfiedUpdate); err != nil { + return errwrap.Wrapf("Error waiting for AutoScaling Group Capacity: {{err}}", err) + } + } + + if d.HasChange("enabled_metrics") { + if err := updateASGMetricsCollection(d, conn); err != nil { + return errwrap.Wrapf("Error updating AutoScaling Group Metrics collection: {{err}}", err) + } + } + + if d.HasChange("suspended_processes") { + if err := updateASGSuspendedProcesses(d, conn); err != nil { + return errwrap.Wrapf("Error updating AutoScaling Group Suspended Processes: {{err}}", err) + } + } + + return resourceAwsAutoscalingGroupRead(d, meta) +} + +func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).autoscalingconn + + // Read the autoscaling group first. If it doesn't exist, we're done. + // We need the group in order to check if there are instances attached. + // If so, we need to remove those first. + g, err := getAwsAutoscalingGroup(d.Id(), conn) + if err != nil { + return err + } + if g == nil { + log.Printf("[INFO] Autoscaling Group %q not found", d.Id()) + d.SetId("") + return nil + } + if len(g.Instances) > 0 || *g.DesiredCapacity > 0 { + if err := resourceAwsAutoscalingGroupDrain(d, meta); err != nil { + return err + } + } + + log.Printf("[DEBUG] AutoScaling Group destroy: %v", d.Id()) + deleteopts := autoscaling.DeleteAutoScalingGroupInput{ + AutoScalingGroupName: aws.String(d.Id()), + ForceDelete: aws.Bool(d.Get("force_delete").(bool)), + } + + // We retry the delete operation to handle InUse/InProgress errors coming + // from scaling operations. We should be able to sneak in a delete in between + // scaling operations within 5m. + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + if _, err := conn.DeleteAutoScalingGroup(&deleteopts); err != nil { + if awserr, ok := err.(awserr.Error); ok { + switch awserr.Code() { + case "InvalidGroup.NotFound": + // Already gone? Sure! + return nil + case "ResourceInUse", "ScalingActivityInProgress": + // These are retryable + return resource.RetryableError(awserr) + } + } + // Didn't recognize the error, so shouldn't retry. + return resource.NonRetryableError(err) + } + // Successful delete + return nil + }) + if err != nil { + return err + } + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + if g, _ = getAwsAutoscalingGroup(d.Id(), conn); g != nil { + return resource.RetryableError( + fmt.Errorf("Auto Scaling Group still exists")) + } + return nil + }) +} + +func getAwsAutoscalingGroup( + asgName string, + conn *autoscaling.AutoScaling) (*autoscaling.Group, error) { + + describeOpts := autoscaling.DescribeAutoScalingGroupsInput{ + AutoScalingGroupNames: []*string{aws.String(asgName)}, + } + + log.Printf("[DEBUG] AutoScaling Group describe configuration: %#v", describeOpts) + describeGroups, err := conn.DescribeAutoScalingGroups(&describeOpts) + if err != nil { + autoscalingerr, ok := err.(awserr.Error) + if ok && autoscalingerr.Code() == "InvalidGroup.NotFound" { + return nil, nil + } + + return nil, fmt.Errorf("Error retrieving AutoScaling groups: %s", err) + } + + // Search for the autoscaling group + for idx, asc := range describeGroups.AutoScalingGroups { + if *asc.AutoScalingGroupName == asgName { + return describeGroups.AutoScalingGroups[idx], nil + } + } + + return nil, nil +} + +func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).autoscalingconn + + if d.Get("force_delete").(bool) { + log.Printf("[DEBUG] Skipping ASG drain, force_delete was set.") + return nil + } + + // First, set the capacity to zero so the group will drain + log.Printf("[DEBUG] Reducing autoscaling group capacity to zero") + opts := autoscaling.UpdateAutoScalingGroupInput{ + AutoScalingGroupName: aws.String(d.Id()), + DesiredCapacity: aws.Int64(0), + MinSize: aws.Int64(0), + MaxSize: aws.Int64(0), + } + if _, err := conn.UpdateAutoScalingGroup(&opts); err != nil { + return fmt.Errorf("Error setting capacity to zero to drain: %s", err) + } + + // Next, wait for the autoscale group to drain + log.Printf("[DEBUG] Waiting for group to have zero instances") + return resource.Retry(10*time.Minute, func() *resource.RetryError { + g, err := getAwsAutoscalingGroup(d.Id(), conn) + if err != nil { + return resource.NonRetryableError(err) + } + if g == nil { + log.Printf("[INFO] Autoscaling Group %q not found", d.Id()) + d.SetId("") + return nil + } + + if len(g.Instances) == 0 { + return nil + } + + return resource.RetryableError( + fmt.Errorf("group still has %d instances", len(g.Instances))) + }) +} + +func enableASGSuspendedProcesses(d *schema.ResourceData, conn *autoscaling.AutoScaling) error { + props := &autoscaling.ScalingProcessQuery{ + AutoScalingGroupName: aws.String(d.Id()), + ScalingProcesses: expandStringList(d.Get("suspended_processes").(*schema.Set).List()), + } + + _, err := conn.SuspendProcesses(props) + if err != nil { + return err + } + + return nil +} + +func enableASGMetricsCollection(d *schema.ResourceData, conn *autoscaling.AutoScaling) error { + props := &autoscaling.EnableMetricsCollectionInput{ + AutoScalingGroupName: aws.String(d.Id()), + Granularity: aws.String(d.Get("metrics_granularity").(string)), + Metrics: expandStringList(d.Get("enabled_metrics").(*schema.Set).List()), + } + + log.Printf("[INFO] Enabling metrics collection for the ASG: %s", d.Id()) + _, metricsErr := conn.EnableMetricsCollection(props) + if metricsErr != nil { + return metricsErr + } + + return nil +} + +func updateASGSuspendedProcesses(d *schema.ResourceData, conn *autoscaling.AutoScaling) error { + o, n := d.GetChange("suspended_processes") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + resumeProcesses := os.Difference(ns) + if resumeProcesses.Len() != 0 { + props := &autoscaling.ScalingProcessQuery{ + AutoScalingGroupName: aws.String(d.Id()), + ScalingProcesses: expandStringList(resumeProcesses.List()), + } + + _, err := conn.ResumeProcesses(props) + if err != nil { + return fmt.Errorf("Error Resuming Processes for ASG %q: %s", d.Id(), err) + } + } + + suspendedProcesses := ns.Difference(os) + if suspendedProcesses.Len() != 0 { + props := &autoscaling.ScalingProcessQuery{ + AutoScalingGroupName: aws.String(d.Id()), + ScalingProcesses: expandStringList(suspendedProcesses.List()), + } + + _, err := conn.SuspendProcesses(props) + if err != nil { + return fmt.Errorf("Error Suspending Processes for ASG %q: %s", d.Id(), err) + } + } + + return nil + +} + +func updateASGMetricsCollection(d *schema.ResourceData, conn *autoscaling.AutoScaling) error { + + o, n := d.GetChange("enabled_metrics") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + disableMetrics := os.Difference(ns) + if disableMetrics.Len() != 0 { + props := &autoscaling.DisableMetricsCollectionInput{ + AutoScalingGroupName: aws.String(d.Id()), + Metrics: expandStringList(disableMetrics.List()), + } + + _, err := conn.DisableMetricsCollection(props) + if err != nil { + return fmt.Errorf("Failure to Disable metrics collection types for ASG %s: %s", d.Id(), err) + } + } + + enabledMetrics := ns.Difference(os) + if enabledMetrics.Len() != 0 { + props := &autoscaling.EnableMetricsCollectionInput{ + AutoScalingGroupName: aws.String(d.Id()), + Metrics: expandStringList(enabledMetrics.List()), + Granularity: aws.String(d.Get("metrics_granularity").(string)), + } + + _, err := conn.EnableMetricsCollection(props) + if err != nil { + return fmt.Errorf("Failure to Enable metrics collection types for ASG %s: %s", d.Id(), err) + } + } + + return nil +} + +// getELBInstanceStates returns a mapping of the instance states of all the ELBs attached to the +// provided ASG. +// +// Note that this is the instance state function for ELB Classic. +// +// Nested like: lbName -> instanceId -> instanceState +func getELBInstanceStates(g *autoscaling.Group, meta interface{}) (map[string]map[string]string, error) { + lbInstanceStates := make(map[string]map[string]string) + elbconn := meta.(*AWSClient).elbconn + + for _, lbName := range g.LoadBalancerNames { + lbInstanceStates[*lbName] = make(map[string]string) + opts := &elb.DescribeInstanceHealthInput{LoadBalancerName: lbName} + r, err := elbconn.DescribeInstanceHealth(opts) + if err != nil { + return nil, err + } + for _, is := range r.InstanceStates { + if is.InstanceId == nil || is.State == nil { + continue + } + lbInstanceStates[*lbName][*is.InstanceId] = *is.State + } + } + + return lbInstanceStates, nil +} + +// getTargetGroupInstanceStates returns a mapping of the instance states of +// all the ALB target groups attached to the provided ASG. +// +// Note that this is the instance state function for Application Load +// Balancing (aka ELBv2). +// +// Nested like: targetGroupARN -> instanceId -> instanceState +func getTargetGroupInstanceStates(g *autoscaling.Group, meta interface{}) (map[string]map[string]string, error) { + targetInstanceStates := make(map[string]map[string]string) + elbv2conn := meta.(*AWSClient).elbv2conn + + for _, targetGroupARN := range g.TargetGroupARNs { + targetInstanceStates[*targetGroupARN] = make(map[string]string) + opts := &elbv2.DescribeTargetHealthInput{TargetGroupArn: targetGroupARN} + r, err := elbv2conn.DescribeTargetHealth(opts) + if err != nil { + return nil, err + } + for _, desc := range r.TargetHealthDescriptions { + if desc.Target == nil || desc.Target.Id == nil || desc.TargetHealth == nil || desc.TargetHealth.State == nil { + continue + } + targetInstanceStates[*targetGroupARN][*desc.Target.Id] = *desc.TargetHealth.State + } + } + + return targetInstanceStates, nil +} + +func expandVpcZoneIdentifiers(list []interface{}) *string { + strs := make([]string, len(list)) + for _, s := range list { + strs = append(strs, s.(string)) + } + return aws.String(strings.Join(strs, ",")) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_group_waiting.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_group_waiting.go new file mode 100644 index 000000000..1c27bb813 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_group_waiting.go @@ -0,0 +1,166 @@ +package aws + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +// waitForASGCapacityTimeout gathers the current numbers of healthy instances +// in the ASG and its attached ELBs and yields these numbers to a +// capacitySatifiedFunction. Loops for up to wait_for_capacity_timeout until +// the capacitySatisfiedFunc returns true. +// +// See "Waiting for Capacity" in docs for more discussion of the feature. +func waitForASGCapacity( + d *schema.ResourceData, + meta interface{}, + satisfiedFunc capacitySatisfiedFunc) error { + wait, err := time.ParseDuration(d.Get("wait_for_capacity_timeout").(string)) + if err != nil { + return err + } + + if wait == 0 { + log.Printf("[DEBUG] Capacity timeout set to 0, skipping capacity waiting.") + return nil + } + + log.Printf("[DEBUG] Waiting on %s for capacity...", d.Id()) + + err = resource.Retry(wait, func() *resource.RetryError { + g, err := getAwsAutoscalingGroup(d.Id(), meta.(*AWSClient).autoscalingconn) + if err != nil { + return resource.NonRetryableError(err) + } + if g == nil { + log.Printf("[INFO] Autoscaling Group %q not found", d.Id()) + d.SetId("") + return nil + } + elbis, err := getELBInstanceStates(g, meta) + albis, err := getTargetGroupInstanceStates(g, meta) + if err != nil { + return resource.NonRetryableError(err) + } + + haveASG := 0 + haveELB := 0 + + for _, i := range g.Instances { + if i.HealthStatus == nil || i.InstanceId == nil || i.LifecycleState == nil { + continue + } + + if !strings.EqualFold(*i.HealthStatus, "Healthy") { + continue + } + + if !strings.EqualFold(*i.LifecycleState, "InService") { + continue + } + + haveASG++ + + inAllLbs := true + for _, states := range elbis { + state, ok := states[*i.InstanceId] + if !ok || !strings.EqualFold(state, "InService") { + inAllLbs = false + } + } + for _, states := range albis { + state, ok := states[*i.InstanceId] + if !ok || !strings.EqualFold(state, "healthy") { + inAllLbs = false + } + } + if inAllLbs { + haveELB++ + } + } + + satisfied, reason := satisfiedFunc(d, haveASG, haveELB) + + log.Printf("[DEBUG] %q Capacity: %d ASG, %d ELB/ALB, satisfied: %t, reason: %q", + d.Id(), haveASG, haveELB, satisfied, reason) + + if satisfied { + return nil + } + + return resource.RetryableError( + fmt.Errorf("%q: Waiting up to %s: %s", d.Id(), wait, reason)) + }) + + if err == nil { + return nil + } + + recentStatus := "" + + conn := meta.(*AWSClient).autoscalingconn + resp, aErr := conn.DescribeScalingActivities(&autoscaling.DescribeScalingActivitiesInput{ + AutoScalingGroupName: aws.String(d.Id()), + MaxRecords: aws.Int64(1), + }) + if aErr == nil { + if len(resp.Activities) > 0 { + recentStatus = fmt.Sprintf("%s", resp.Activities[0]) + } else { + recentStatus = "(0 activities found)" + } + } else { + recentStatus = fmt.Sprintf("(Failed to describe scaling activities: %s)", aErr) + } + + msg := fmt.Sprintf("{{err}}. Most recent activity: %s", recentStatus) + return errwrap.Wrapf(msg, err) +} + +type capacitySatisfiedFunc func(*schema.ResourceData, int, int) (bool, string) + +// capacitySatisfiedCreate treats all targets as minimums +func capacitySatisfiedCreate(d *schema.ResourceData, haveASG, haveELB int) (bool, string) { + minASG := d.Get("min_size").(int) + if wantASG := d.Get("desired_capacity").(int); wantASG > 0 { + minASG = wantASG + } + if haveASG < minASG { + return false, fmt.Sprintf( + "Need at least %d healthy instances in ASG, have %d", minASG, haveASG) + } + minELB := d.Get("min_elb_capacity").(int) + if wantELB := d.Get("wait_for_elb_capacity").(int); wantELB > 0 { + minELB = wantELB + } + if haveELB < minELB { + return false, fmt.Sprintf( + "Need at least %d healthy instances in ELB, have %d", minELB, haveELB) + } + return true, "" +} + +// capacitySatisfiedUpdate only cares about specific targets +func capacitySatisfiedUpdate(d *schema.ResourceData, haveASG, haveELB int) (bool, string) { + if wantASG := d.Get("desired_capacity").(int); wantASG > 0 { + if haveASG != wantASG { + return false, fmt.Sprintf( + "Need exactly %d healthy instances in ASG, have %d", wantASG, haveASG) + } + } + if wantELB := d.Get("wait_for_elb_capacity").(int); wantELB > 0 { + if haveELB != wantELB { + return false, fmt.Sprintf( + "Need exactly %d healthy instances in ELB, have %d", wantELB, haveELB) + } + } + return true, "" +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_lifecycle_hook.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_lifecycle_hook.go new file mode 100644 index 000000000..60622345e --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_lifecycle_hook.go @@ -0,0 +1,194 @@ +package aws + +import ( + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsAutoscalingLifecycleHook() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsAutoscalingLifecycleHookPut, + Read: resourceAwsAutoscalingLifecycleHookRead, + Update: resourceAwsAutoscalingLifecycleHookPut, + Delete: resourceAwsAutoscalingLifecycleHookDelete, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "autoscaling_group_name": { + Type: schema.TypeString, + Required: true, + }, + "default_result": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "heartbeat_timeout": { + Type: schema.TypeInt, + Optional: true, + }, + "lifecycle_transition": { + Type: schema.TypeString, + Required: true, + }, + "notification_metadata": { + Type: schema.TypeString, + Optional: true, + }, + "notification_target_arn": { + Type: schema.TypeString, + Optional: true, + }, + "role_arn": { + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceAwsAutoscalingLifecycleHookPutOp(conn *autoscaling.AutoScaling, params *autoscaling.PutLifecycleHookInput) error { + log.Printf("[DEBUG] AutoScaling PutLifecyleHook: %s", params) + return resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.PutLifecycleHook(params) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if strings.Contains(awsErr.Message(), "Unable to publish test message to notification target") { + return resource.RetryableError(errwrap.Wrapf("[DEBUG] Retrying AWS AutoScaling Lifecycle Hook: {{err}}", awsErr)) + } + } + return resource.NonRetryableError(errwrap.Wrapf("Error putting lifecycle hook: {{err}}", err)) + } + return nil + }) +} + +func resourceAwsAutoscalingLifecycleHookPut(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).autoscalingconn + params := getAwsAutoscalingPutLifecycleHookInput(d) + + if err := resourceAwsAutoscalingLifecycleHookPutOp(conn, ¶ms); err != nil { + return err + } + + d.SetId(d.Get("name").(string)) + + return resourceAwsAutoscalingLifecycleHookRead(d, meta) +} + +func resourceAwsAutoscalingLifecycleHookRead(d *schema.ResourceData, meta interface{}) error { + p, err := getAwsAutoscalingLifecycleHook(d, meta) + if err != nil { + return err + } + if p == nil { + d.SetId("") + return nil + } + + log.Printf("[DEBUG] Read Lifecycle Hook: ASG: %s, SH: %s, Obj: %#v", d.Get("autoscaling_group_name"), d.Get("name"), p) + + d.Set("default_result", p.DefaultResult) + d.Set("heartbeat_timeout", p.HeartbeatTimeout) + d.Set("lifecycle_transition", p.LifecycleTransition) + d.Set("notification_metadata", p.NotificationMetadata) + d.Set("notification_target_arn", p.NotificationTargetARN) + d.Set("name", p.LifecycleHookName) + d.Set("role_arn", p.RoleARN) + + return nil +} + +func resourceAwsAutoscalingLifecycleHookDelete(d *schema.ResourceData, meta interface{}) error { + autoscalingconn := meta.(*AWSClient).autoscalingconn + p, err := getAwsAutoscalingLifecycleHook(d, meta) + if err != nil { + return err + } + if p == nil { + return nil + } + + params := autoscaling.DeleteLifecycleHookInput{ + AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)), + LifecycleHookName: aws.String(d.Get("name").(string)), + } + if _, err := autoscalingconn.DeleteLifecycleHook(¶ms); err != nil { + return errwrap.Wrapf("Autoscaling Lifecycle Hook: {{err}}", err) + } + + d.SetId("") + return nil +} + +func getAwsAutoscalingPutLifecycleHookInput(d *schema.ResourceData) autoscaling.PutLifecycleHookInput { + var params = autoscaling.PutLifecycleHookInput{ + AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)), + LifecycleHookName: aws.String(d.Get("name").(string)), + } + + if v, ok := d.GetOk("default_result"); ok { + params.DefaultResult = aws.String(v.(string)) + } + + if v, ok := d.GetOk("heartbeat_timeout"); ok { + params.HeartbeatTimeout = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("lifecycle_transition"); ok { + params.LifecycleTransition = aws.String(v.(string)) + } + + if v, ok := d.GetOk("notification_metadata"); ok { + params.NotificationMetadata = aws.String(v.(string)) + } + + if v, ok := d.GetOk("notification_target_arn"); ok { + params.NotificationTargetARN = aws.String(v.(string)) + } + + if v, ok := d.GetOk("role_arn"); ok { + params.RoleARN = aws.String(v.(string)) + } + + return params +} + +func getAwsAutoscalingLifecycleHook(d *schema.ResourceData, meta interface{}) (*autoscaling.LifecycleHook, error) { + autoscalingconn := meta.(*AWSClient).autoscalingconn + + params := autoscaling.DescribeLifecycleHooksInput{ + AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)), + LifecycleHookNames: []*string{aws.String(d.Get("name").(string))}, + } + + log.Printf("[DEBUG] AutoScaling Lifecycle Hook Describe Params: %#v", params) + resp, err := autoscalingconn.DescribeLifecycleHooks(¶ms) + if err != nil { + return nil, errwrap.Wrapf("Error retrieving lifecycle hooks: {{err}}", err) + } + + // find lifecycle hooks + name := d.Get("name") + for idx, sp := range resp.LifecycleHooks { + if *sp.LifecycleHookName == name { + return resp.LifecycleHooks[idx], nil + } + } + + // lifecycle hook not found + return nil, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_notification.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_notification.go new file mode 100644 index 000000000..5afcc6649 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_notification.go @@ -0,0 +1,211 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsAutoscalingNotification() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsAutoscalingNotificationCreate, + Read: resourceAwsAutoscalingNotificationRead, + Update: resourceAwsAutoscalingNotificationUpdate, + Delete: resourceAwsAutoscalingNotificationDelete, + + Schema: map[string]*schema.Schema{ + "topic_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "group_names": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "notifications": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceAwsAutoscalingNotificationCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).autoscalingconn + gl := convertSetToList(d.Get("group_names").(*schema.Set)) + nl := convertSetToList(d.Get("notifications").(*schema.Set)) + + topic := d.Get("topic_arn").(string) + if err := addNotificationConfigToGroupsWithTopic(conn, gl, nl, topic); err != nil { + return err + } + + // ARNs are unique, and these notifications are per ARN, so we re-use the ARN + // here as the ID + d.SetId(topic) + return resourceAwsAutoscalingNotificationRead(d, meta) +} + +func resourceAwsAutoscalingNotificationRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).autoscalingconn + gl := convertSetToList(d.Get("group_names").(*schema.Set)) + + opts := &autoscaling.DescribeNotificationConfigurationsInput{ + AutoScalingGroupNames: gl, + } + + topic := d.Get("topic_arn").(string) + // Grab all applicable notifcation configurations for this Topic. + // Each NotificationType will have a record, so 1 Group with 3 Types results + // in 3 records, all with the same Group name + gRaw := make(map[string]bool) + nRaw := make(map[string]bool) + + i := 0 + err := conn.DescribeNotificationConfigurationsPages(opts, func(resp *autoscaling.DescribeNotificationConfigurationsOutput, lastPage bool) bool { + if resp != nil { + i++ + log.Printf("[DEBUG] Paging DescribeNotificationConfigurations for (%s), page: %d", d.Id(), i) + } else { + log.Printf("[DEBUG] Paging finished for DescribeNotificationConfigurations (%s)", d.Id()) + } + + for _, n := range resp.NotificationConfigurations { + if *n.TopicARN == topic { + gRaw[*n.AutoScalingGroupName] = true + nRaw[*n.NotificationType] = true + } + } + return true // return false to stop paging + }) + if err != nil { + return err + } + + // Grab the keys here as the list of Groups + var gList []string + for k, _ := range gRaw { + gList = append(gList, k) + } + + // Grab the keys here as the list of Types + var nList []string + for k, _ := range nRaw { + nList = append(nList, k) + } + + if err := d.Set("group_names", gList); err != nil { + return err + } + if err := d.Set("notifications", nList); err != nil { + return err + } + + return nil +} + +func resourceAwsAutoscalingNotificationUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).autoscalingconn + + // Notifications API call is a PUT, so we don't need to diff the list, just + // push whatever it is and AWS sorts it out + nl := convertSetToList(d.Get("notifications").(*schema.Set)) + + o, n := d.GetChange("group_names") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove := convertSetToList(os.Difference(ns)) + add := convertSetToList(ns.Difference(os)) + + topic := d.Get("topic_arn").(string) + + if err := removeNotificationConfigToGroupsWithTopic(conn, remove, topic); err != nil { + return err + } + + var update []*string + if d.HasChange("notifications") { + update = convertSetToList(d.Get("group_names").(*schema.Set)) + } else { + update = add + } + + if err := addNotificationConfigToGroupsWithTopic(conn, update, nl, topic); err != nil { + return err + } + + return resourceAwsAutoscalingNotificationRead(d, meta) +} + +func addNotificationConfigToGroupsWithTopic(conn *autoscaling.AutoScaling, groups []*string, nl []*string, topic string) error { + for _, a := range groups { + opts := &autoscaling.PutNotificationConfigurationInput{ + AutoScalingGroupName: a, + NotificationTypes: nl, + TopicARN: aws.String(topic), + } + + _, err := conn.PutNotificationConfiguration(opts) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + return fmt.Errorf("[WARN] Error creating Autoscaling Group Notification for Group %s, error: \"%s\", code: \"%s\"", *a, awsErr.Message(), awsErr.Code()) + } + return err + } + } + return nil +} + +func removeNotificationConfigToGroupsWithTopic(conn *autoscaling.AutoScaling, groups []*string, topic string) error { + for _, r := range groups { + opts := &autoscaling.DeleteNotificationConfigurationInput{ + AutoScalingGroupName: r, + TopicARN: aws.String(topic), + } + + _, err := conn.DeleteNotificationConfiguration(opts) + if err != nil { + return fmt.Errorf("[WARN] Error deleting notification configuration for ASG \"%s\", Topic ARN \"%s\"", *r, topic) + } + } + return nil +} + +func resourceAwsAutoscalingNotificationDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).autoscalingconn + gl := convertSetToList(d.Get("group_names").(*schema.Set)) + + topic := d.Get("topic_arn").(string) + if err := removeNotificationConfigToGroupsWithTopic(conn, gl, topic); err != nil { + return err + } + + return nil +} + +func convertSetToList(s *schema.Set) (nl []*string) { + l := s.List() + for _, n := range l { + nl = append(nl, aws.String(n.(string))) + } + + return nl +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_policy.go new file mode 100644 index 000000000..6d2403050 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_policy.go @@ -0,0 +1,311 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsAutoscalingPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsAutoscalingPolicyCreate, + Read: resourceAwsAutoscalingPolicyRead, + Update: resourceAwsAutoscalingPolicyUpdate, + Delete: resourceAwsAutoscalingPolicyDelete, + + Schema: map[string]*schema.Schema{ + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "adjustment_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "autoscaling_group_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "policy_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "SimpleScaling", // preserve AWS's default to make validation easier. + }, + "cooldown": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "estimated_instance_warmup": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "metric_aggregation_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "min_adjustment_magnitude": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "min_adjustment_step": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Deprecated: "Use min_adjustment_magnitude instead, otherwise you may see a perpetual diff on this resource.", + ConflictsWith: []string{"min_adjustment_magnitude"}, + }, + "scaling_adjustment": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ConflictsWith: []string{"step_adjustment"}, + }, + "step_adjustment": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ConflictsWith: []string{"scaling_adjustment"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metric_interval_lower_bound": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "metric_interval_upper_bound": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "scaling_adjustment": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: resourceAwsAutoscalingScalingAdjustmentHash, + }, + }, + } +} + +func resourceAwsAutoscalingPolicyCreate(d *schema.ResourceData, meta interface{}) error { + autoscalingconn := meta.(*AWSClient).autoscalingconn + + params, err := getAwsAutoscalingPutScalingPolicyInput(d) + if err != nil { + return err + } + + log.Printf("[DEBUG] AutoScaling PutScalingPolicy: %#v", params) + resp, err := autoscalingconn.PutScalingPolicy(¶ms) + if err != nil { + return fmt.Errorf("Error putting scaling policy: %s", err) + } + + d.Set("arn", resp.PolicyARN) + d.SetId(d.Get("name").(string)) + log.Printf("[INFO] AutoScaling Scaling PolicyARN: %s", d.Get("arn").(string)) + + return resourceAwsAutoscalingPolicyRead(d, meta) +} + +func resourceAwsAutoscalingPolicyRead(d *schema.ResourceData, meta interface{}) error { + p, err := getAwsAutoscalingPolicy(d, meta) + if err != nil { + return err + } + if p == nil { + d.SetId("") + return nil + } + + log.Printf("[DEBUG] Read Scaling Policy: ASG: %s, SP: %s, Obj: %s", d.Get("autoscaling_group_name"), d.Get("name"), p) + + d.Set("adjustment_type", p.AdjustmentType) + d.Set("autoscaling_group_name", p.AutoScalingGroupName) + d.Set("cooldown", p.Cooldown) + d.Set("estimated_instance_warmup", p.EstimatedInstanceWarmup) + d.Set("metric_aggregation_type", p.MetricAggregationType) + d.Set("policy_type", p.PolicyType) + if p.MinAdjustmentMagnitude != nil { + d.Set("min_adjustment_magnitude", p.MinAdjustmentMagnitude) + d.Set("min_adjustment_step", 0) + } else { + d.Set("min_adjustment_step", p.MinAdjustmentStep) + } + d.Set("arn", p.PolicyARN) + d.Set("name", p.PolicyName) + d.Set("scaling_adjustment", p.ScalingAdjustment) + d.Set("step_adjustment", flattenStepAdjustments(p.StepAdjustments)) + + return nil +} + +func resourceAwsAutoscalingPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + autoscalingconn := meta.(*AWSClient).autoscalingconn + + params, inputErr := getAwsAutoscalingPutScalingPolicyInput(d) + if inputErr != nil { + return inputErr + } + + log.Printf("[DEBUG] Autoscaling Update Scaling Policy: %#v", params) + _, err := autoscalingconn.PutScalingPolicy(¶ms) + if err != nil { + return err + } + + return resourceAwsAutoscalingPolicyRead(d, meta) +} + +func resourceAwsAutoscalingPolicyDelete(d *schema.ResourceData, meta interface{}) error { + autoscalingconn := meta.(*AWSClient).autoscalingconn + p, err := getAwsAutoscalingPolicy(d, meta) + if err != nil { + return err + } + if p == nil { + return nil + } + + params := autoscaling.DeletePolicyInput{ + AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)), + PolicyName: aws.String(d.Get("name").(string)), + } + log.Printf("[DEBUG] Deleting Autoscaling Policy opts: %s", params) + if _, err := autoscalingconn.DeletePolicy(¶ms); err != nil { + return fmt.Errorf("Autoscaling Scaling Policy: %s ", err) + } + + d.SetId("") + return nil +} + +// PutScalingPolicy can safely resend all parameters without destroying the +// resource, so create and update can share this common function. It will error +// if certain mutually exclusive values are set. +func getAwsAutoscalingPutScalingPolicyInput(d *schema.ResourceData) (autoscaling.PutScalingPolicyInput, error) { + var params = autoscaling.PutScalingPolicyInput{ + AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)), + PolicyName: aws.String(d.Get("name").(string)), + } + + if v, ok := d.GetOk("adjustment_type"); ok { + params.AdjustmentType = aws.String(v.(string)) + } + + if v, ok := d.GetOk("cooldown"); ok { + params.Cooldown = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("estimated_instance_warmup"); ok { + params.EstimatedInstanceWarmup = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("metric_aggregation_type"); ok { + params.MetricAggregationType = aws.String(v.(string)) + } + + if v, ok := d.GetOk("policy_type"); ok { + params.PolicyType = aws.String(v.(string)) + } + + //if policy_type=="SimpleScaling" then scaling_adjustment is required and 0 is allowed + if v, ok := d.GetOk("scaling_adjustment"); ok || *params.PolicyType == "SimpleScaling" { + params.ScalingAdjustment = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("step_adjustment"); ok { + steps, err := expandStepAdjustments(v.(*schema.Set).List()) + if err != nil { + return params, fmt.Errorf("metric_interval_lower_bound and metric_interval_upper_bound must be strings!") + } + params.StepAdjustments = steps + } + + if v, ok := d.GetOk("min_adjustment_magnitude"); ok { + // params.MinAdjustmentMagnitude = aws.Int64(int64(d.Get("min_adjustment_magnitude").(int))) + params.MinAdjustmentMagnitude = aws.Int64(int64(v.(int))) + } else if v, ok := d.GetOk("min_adjustment_step"); ok { + // params.MinAdjustmentStep = aws.Int64(int64(d.Get("min_adjustment_step").(int))) + params.MinAdjustmentStep = aws.Int64(int64(v.(int))) + } + + // Validate our final input to confirm it won't error when sent to AWS. + // First, SimpleScaling policy types... + if *params.PolicyType == "SimpleScaling" && params.StepAdjustments != nil { + return params, fmt.Errorf("SimpleScaling policy types cannot use step_adjustments!") + } + if *params.PolicyType == "SimpleScaling" && params.MetricAggregationType != nil { + return params, fmt.Errorf("SimpleScaling policy types cannot use metric_aggregation_type!") + } + if *params.PolicyType == "SimpleScaling" && params.EstimatedInstanceWarmup != nil { + return params, fmt.Errorf("SimpleScaling policy types cannot use estimated_instance_warmup!") + } + + // Second, StepScaling policy types... + if *params.PolicyType == "StepScaling" && params.ScalingAdjustment != nil { + return params, fmt.Errorf("StepScaling policy types cannot use scaling_adjustment!") + } + if *params.PolicyType == "StepScaling" && params.Cooldown != nil { + return params, fmt.Errorf("StepScaling policy types cannot use cooldown!") + } + + return params, nil +} + +func getAwsAutoscalingPolicy(d *schema.ResourceData, meta interface{}) (*autoscaling.ScalingPolicy, error) { + autoscalingconn := meta.(*AWSClient).autoscalingconn + + params := autoscaling.DescribePoliciesInput{ + AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)), + PolicyNames: []*string{aws.String(d.Get("name").(string))}, + } + + log.Printf("[DEBUG] AutoScaling Scaling Policy Describe Params: %#v", params) + resp, err := autoscalingconn.DescribePolicies(¶ms) + if err != nil { + //A ValidationError here can mean that either the Policy is missing OR the Autoscaling Group is missing + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "ValidationError" { + log.Printf("[WARNING] %s not found, removing from state", d.Id()) + d.SetId("") + + return nil, nil + } + return nil, fmt.Errorf("Error retrieving scaling policies: %s", err) + } + + // find scaling policy + name := d.Get("name") + for idx, sp := range resp.ScalingPolicies { + if *sp.PolicyName == name { + return resp.ScalingPolicies[idx], nil + } + } + + // policy not found + return nil, nil +} + +func resourceAwsAutoscalingScalingAdjustmentHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + if v, ok := m["metric_interval_lower_bound"]; ok { + buf.WriteString(fmt.Sprintf("%f-", v)) + } + if v, ok := m["metric_interval_upper_bound"]; ok { + buf.WriteString(fmt.Sprintf("%f-", v)) + } + buf.WriteString(fmt.Sprintf("%d-", m["scaling_adjustment"].(int))) + + return hashcode.String(buf.String()) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_schedule.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_schedule.go new file mode 100644 index 000000000..5cfa1c729 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_autoscaling_schedule.go @@ -0,0 +1,190 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/hashicorp/terraform/helper/schema" +) + +const awsAutoscalingScheduleTimeLayout = "2006-01-02T15:04:05Z" + +func resourceAwsAutoscalingSchedule() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsAutoscalingScheduleCreate, + Read: resourceAwsAutoscalingScheduleRead, + Update: resourceAwsAutoscalingScheduleCreate, + Delete: resourceAwsAutoscalingScheduleDelete, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "scheduled_action_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "autoscaling_group_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "start_time": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateASGScheduleTimestamp, + }, + "end_time": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateASGScheduleTimestamp, + }, + "recurrence": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "min_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "max_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "desired_capacity": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceAwsAutoscalingScheduleCreate(d *schema.ResourceData, meta interface{}) error { + autoscalingconn := meta.(*AWSClient).autoscalingconn + params := &autoscaling.PutScheduledUpdateGroupActionInput{ + AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)), + ScheduledActionName: aws.String(d.Get("scheduled_action_name").(string)), + } + + if attr, ok := d.GetOk("start_time"); ok { + t, err := time.Parse(awsAutoscalingScheduleTimeLayout, attr.(string)) + if err != nil { + return fmt.Errorf("Error Parsing AWS Autoscaling Group Schedule Start Time: %s", err.Error()) + } + params.StartTime = aws.Time(t) + } + + if attr, ok := d.GetOk("end_time"); ok { + t, err := time.Parse(awsAutoscalingScheduleTimeLayout, attr.(string)) + if err != nil { + return fmt.Errorf("Error Parsing AWS Autoscaling Group Schedule End Time: %s", err.Error()) + } + params.EndTime = aws.Time(t) + } + + if attr, ok := d.GetOk("recurrence"); ok { + params.Recurrence = aws.String(attr.(string)) + } + + params.MinSize = aws.Int64(int64(d.Get("min_size").(int))) + params.MaxSize = aws.Int64(int64(d.Get("max_size").(int))) + params.DesiredCapacity = aws.Int64(int64(d.Get("desired_capacity").(int))) + + log.Printf("[INFO] Creating Autoscaling Scheduled Action: %s", d.Get("scheduled_action_name").(string)) + _, err := autoscalingconn.PutScheduledUpdateGroupAction(params) + if err != nil { + return fmt.Errorf("Error Creating Autoscaling Scheduled Action: %s", err.Error()) + } + + d.SetId(d.Get("scheduled_action_name").(string)) + + return resourceAwsAutoscalingScheduleRead(d, meta) +} + +func resourceAwsAutoscalingScheduleRead(d *schema.ResourceData, meta interface{}) error { + sa, err, exists := resourceAwsASGScheduledActionRetrieve(d, meta) + if err != nil { + return err + } + + if !exists { + log.Printf("Error retrieving Autoscaling Scheduled Actions. Removing from state") + d.SetId("") + return nil + } + + d.Set("autoscaling_group_name", sa.AutoScalingGroupName) + d.Set("arn", sa.ScheduledActionARN) + d.Set("desired_capacity", sa.DesiredCapacity) + d.Set("min_size", sa.MinSize) + d.Set("max_size", sa.MaxSize) + d.Set("recurrence", sa.Recurrence) + + if sa.StartTime != nil { + d.Set("start_time", sa.StartTime.Format(awsAutoscalingScheduleTimeLayout)) + } + + if sa.EndTime != nil { + d.Set("end_time", sa.EndTime.Format(awsAutoscalingScheduleTimeLayout)) + } + + return nil +} + +func resourceAwsAutoscalingScheduleDelete(d *schema.ResourceData, meta interface{}) error { + autoscalingconn := meta.(*AWSClient).autoscalingconn + + params := &autoscaling.DeleteScheduledActionInput{ + AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)), + ScheduledActionName: aws.String(d.Id()), + } + + log.Printf("[INFO] Deleting Autoscaling Scheduled Action: %s", d.Id()) + _, err := autoscalingconn.DeleteScheduledAction(params) + if err != nil { + return fmt.Errorf("Error deleting Autoscaling Scheduled Action: %s", err.Error()) + } + + return nil +} + +func resourceAwsASGScheduledActionRetrieve(d *schema.ResourceData, meta interface{}) (*autoscaling.ScheduledUpdateGroupAction, error, bool) { + autoscalingconn := meta.(*AWSClient).autoscalingconn + + params := &autoscaling.DescribeScheduledActionsInput{ + AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)), + ScheduledActionNames: []*string{aws.String(d.Id())}, + } + + log.Printf("[INFO] Describing Autoscaling Scheduled Action: %+v", params) + actions, err := autoscalingconn.DescribeScheduledActions(params) + if err != nil { + //A ValidationError here can mean that either the Schedule is missing OR the Autoscaling Group is missing + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "ValidationError" { + log.Printf("[WARNING] %s not found, removing from state", d.Id()) + d.SetId("") + + return nil, nil, false + } + return nil, fmt.Errorf("Error retrieving Autoscaling Scheduled Actions: %s", err), false + } + + if len(actions.ScheduledUpdateGroupActions) != 1 || + *actions.ScheduledUpdateGroupActions[0].ScheduledActionName != d.Id() { + return nil, nil, false + } + + return actions.ScheduledUpdateGroupActions[0], nil, true +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudformation_stack.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudformation_stack.go new file mode 100644 index 000000000..c9448734b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudformation_stack.go @@ -0,0 +1,652 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/cloudformation" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCloudFormationStack() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudFormationStackCreate, + Read: resourceAwsCloudFormationStackRead, + Update: resourceAwsCloudFormationStackUpdate, + Delete: resourceAwsCloudFormationStackDelete, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "template_body": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateCloudFormationTemplate, + StateFunc: func(v interface{}) string { + template, _ := normalizeCloudFormationTemplate(v) + return template + }, + }, + "template_url": { + Type: schema.TypeString, + Optional: true, + }, + "capabilities": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "disable_rollback": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "notification_arns": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "on_failure": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "parameters": { + Type: schema.TypeMap, + Optional: true, + Computed: true, + }, + "outputs": { + Type: schema.TypeMap, + Computed: true, + }, + "policy_body": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateJsonString, + StateFunc: func(v interface{}) string { + json, _ := normalizeJsonString(v) + return json + }, + }, + "policy_url": { + Type: schema.TypeString, + Optional: true, + }, + "timeout_in_minutes": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + "iam_role_arn": { + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceAwsCloudFormationStackCreate(d *schema.ResourceData, meta interface{}) error { + retryTimeout := int64(30) + conn := meta.(*AWSClient).cfconn + + input := cloudformation.CreateStackInput{ + StackName: aws.String(d.Get("name").(string)), + } + if v, ok := d.GetOk("template_body"); ok { + template, err := normalizeCloudFormationTemplate(v) + if err != nil { + return errwrap.Wrapf("template body contains an invalid JSON or YAML: {{err}}", err) + } + input.TemplateBody = aws.String(template) + } + if v, ok := d.GetOk("template_url"); ok { + input.TemplateURL = aws.String(v.(string)) + } + if v, ok := d.GetOk("capabilities"); ok { + input.Capabilities = expandStringList(v.(*schema.Set).List()) + } + if v, ok := d.GetOk("disable_rollback"); ok { + input.DisableRollback = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("notification_arns"); ok { + input.NotificationARNs = expandStringList(v.(*schema.Set).List()) + } + if v, ok := d.GetOk("on_failure"); ok { + input.OnFailure = aws.String(v.(string)) + } + if v, ok := d.GetOk("parameters"); ok { + input.Parameters = expandCloudFormationParameters(v.(map[string]interface{})) + } + if v, ok := d.GetOk("policy_body"); ok { + policy, err := normalizeJsonString(v) + if err != nil { + return errwrap.Wrapf("policy body contains an invalid JSON: {{err}}", err) + } + input.StackPolicyBody = aws.String(policy) + } + if v, ok := d.GetOk("policy_url"); ok { + input.StackPolicyURL = aws.String(v.(string)) + } + if v, ok := d.GetOk("tags"); ok { + input.Tags = expandCloudFormationTags(v.(map[string]interface{})) + } + if v, ok := d.GetOk("timeout_in_minutes"); ok { + m := int64(v.(int)) + input.TimeoutInMinutes = aws.Int64(m) + if m > retryTimeout { + retryTimeout = m + 5 + log.Printf("[DEBUG] CloudFormation timeout: %d", retryTimeout) + } + } + if v, ok := d.GetOk("iam_role_arn"); ok { + input.RoleARN = aws.String(v.(string)) + } + + log.Printf("[DEBUG] Creating CloudFormation Stack: %s", input) + resp, err := conn.CreateStack(&input) + if err != nil { + return fmt.Errorf("Creating CloudFormation stack failed: %s", err.Error()) + } + + d.SetId(*resp.StackId) + var lastStatus string + + wait := resource.StateChangeConf{ + Pending: []string{ + "CREATE_IN_PROGRESS", + "DELETE_IN_PROGRESS", + "ROLLBACK_IN_PROGRESS", + }, + Target: []string{ + "CREATE_COMPLETE", + "CREATE_FAILED", + "DELETE_COMPLETE", + "DELETE_FAILED", + "ROLLBACK_COMPLETE", + "ROLLBACK_FAILED", + }, + Timeout: time.Duration(retryTimeout) * time.Minute, + MinTimeout: 1 * time.Second, + Refresh: func() (interface{}, string, error) { + resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{ + StackName: aws.String(d.Id()), + }) + if err != nil { + log.Printf("[ERROR] Failed to describe stacks: %s", err) + return nil, "", err + } + if len(resp.Stacks) == 0 { + // This shouldn't happen unless CloudFormation is inconsistent + // See https://github.com/hashicorp/terraform/issues/5487 + log.Printf("[WARN] CloudFormation stack %q not found.\nresponse: %q", + d.Id(), resp) + return resp, "", fmt.Errorf( + "CloudFormation stack %q vanished unexpectedly during creation.\n"+ + "Unless you knowingly manually deleted the stack "+ + "please report this as bug at https://github.com/hashicorp/terraform/issues\n"+ + "along with the config & Terraform version & the details below:\n"+ + "Full API response: %s\n", + d.Id(), resp) + } + + status := *resp.Stacks[0].StackStatus + lastStatus = status + log.Printf("[DEBUG] Current CloudFormation stack status: %q", status) + + return resp, status, err + }, + } + + _, err = wait.WaitForState() + if err != nil { + return err + } + + if lastStatus == "ROLLBACK_COMPLETE" || lastStatus == "ROLLBACK_FAILED" { + reasons, err := getCloudFormationRollbackReasons(d.Id(), nil, conn) + if err != nil { + return fmt.Errorf("Failed getting rollback reasons: %q", err.Error()) + } + + return fmt.Errorf("%s: %q", lastStatus, reasons) + } + if lastStatus == "DELETE_COMPLETE" || lastStatus == "DELETE_FAILED" { + reasons, err := getCloudFormationDeletionReasons(d.Id(), conn) + if err != nil { + return fmt.Errorf("Failed getting deletion reasons: %q", err.Error()) + } + + d.SetId("") + return fmt.Errorf("%s: %q", lastStatus, reasons) + } + if lastStatus == "CREATE_FAILED" { + reasons, err := getCloudFormationFailures(d.Id(), conn) + if err != nil { + return fmt.Errorf("Failed getting failure reasons: %q", err.Error()) + } + return fmt.Errorf("%s: %q", lastStatus, reasons) + } + + log.Printf("[INFO] CloudFormation Stack %q created", d.Id()) + + return resourceAwsCloudFormationStackRead(d, meta) +} + +func resourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cfconn + + input := &cloudformation.DescribeStacksInput{ + StackName: aws.String(d.Id()), + } + resp, err := conn.DescribeStacks(input) + if err != nil { + awsErr, ok := err.(awserr.Error) + // ValidationError: Stack with id % does not exist + if ok && awsErr.Code() == "ValidationError" { + log.Printf("[WARN] Removing CloudFormation stack %s as it's already gone", d.Id()) + d.SetId("") + return nil + } + + return err + } + + stacks := resp.Stacks + if len(stacks) < 1 { + log.Printf("[WARN] Removing CloudFormation stack %s as it's already gone", d.Id()) + d.SetId("") + return nil + } + for _, s := range stacks { + if *s.StackId == d.Id() && *s.StackStatus == "DELETE_COMPLETE" { + log.Printf("[DEBUG] Removing CloudFormation stack %s"+ + " as it has been already deleted", d.Id()) + d.SetId("") + return nil + } + } + + tInput := cloudformation.GetTemplateInput{ + StackName: aws.String(d.Id()), + } + out, err := conn.GetTemplate(&tInput) + if err != nil { + return err + } + + template, err := normalizeCloudFormationTemplate(*out.TemplateBody) + if err != nil { + return errwrap.Wrapf("template body contains an invalid JSON or YAML: {{err}}", err) + } + d.Set("template_body", template) + + stack := stacks[0] + log.Printf("[DEBUG] Received CloudFormation stack: %s", stack) + + d.Set("name", stack.StackName) + d.Set("arn", stack.StackId) + d.Set("iam_role_arn", stack.RoleARN) + + if stack.TimeoutInMinutes != nil { + d.Set("timeout_in_minutes", int(*stack.TimeoutInMinutes)) + } + if stack.Description != nil { + d.Set("description", stack.Description) + } + if stack.DisableRollback != nil { + d.Set("disable_rollback", stack.DisableRollback) + } + if len(stack.NotificationARNs) > 0 { + err = d.Set("notification_arns", schema.NewSet(schema.HashString, flattenStringList(stack.NotificationARNs))) + if err != nil { + return err + } + } + + originalParams := d.Get("parameters").(map[string]interface{}) + err = d.Set("parameters", flattenCloudFormationParameters(stack.Parameters, originalParams)) + if err != nil { + return err + } + + err = d.Set("tags", flattenCloudFormationTags(stack.Tags)) + if err != nil { + return err + } + + err = d.Set("outputs", flattenCloudFormationOutputs(stack.Outputs)) + if err != nil { + return err + } + + if len(stack.Capabilities) > 0 { + err = d.Set("capabilities", schema.NewSet(schema.HashString, flattenStringList(stack.Capabilities))) + if err != nil { + return err + } + } + + return nil +} + +func resourceAwsCloudFormationStackUpdate(d *schema.ResourceData, meta interface{}) error { + retryTimeout := int64(30) + conn := meta.(*AWSClient).cfconn + + input := &cloudformation.UpdateStackInput{ + StackName: aws.String(d.Id()), + } + + // Either TemplateBody, TemplateURL or UsePreviousTemplate are required + if v, ok := d.GetOk("template_url"); ok { + input.TemplateURL = aws.String(v.(string)) + } + if v, ok := d.GetOk("template_body"); ok && input.TemplateURL == nil { + template, err := normalizeCloudFormationTemplate(v) + if err != nil { + return errwrap.Wrapf("template body contains an invalid JSON or YAML: {{err}}", err) + } + input.TemplateBody = aws.String(template) + } + + // Capabilities must be present whether they are changed or not + if v, ok := d.GetOk("capabilities"); ok { + input.Capabilities = expandStringList(v.(*schema.Set).List()) + } + + if d.HasChange("notification_arns") { + input.NotificationARNs = expandStringList(d.Get("notification_arns").(*schema.Set).List()) + } + + // Parameters must be present whether they are changed or not + if v, ok := d.GetOk("parameters"); ok { + input.Parameters = expandCloudFormationParameters(v.(map[string]interface{})) + } + + if d.HasChange("policy_body") { + policy, err := normalizeJsonString(d.Get("policy_body")) + if err != nil { + return errwrap.Wrapf("policy body contains an invalid JSON: {{err}}", err) + } + input.StackPolicyBody = aws.String(policy) + } + if d.HasChange("policy_url") { + input.StackPolicyURL = aws.String(d.Get("policy_url").(string)) + } + + if d.HasChange("iam_role_arn") { + input.RoleARN = aws.String(d.Get("iam_role_arn").(string)) + } + + log.Printf("[DEBUG] Updating CloudFormation stack: %s", input) + _, err := conn.UpdateStack(input) + if err != nil { + awsErr, ok := err.(awserr.Error) + // ValidationError: No updates are to be performed. + if !ok || + awsErr.Code() != "ValidationError" || + awsErr.Message() != "No updates are to be performed." { + return err + } + + log.Printf("[DEBUG] Current CloudFormation stack has no updates") + } + + lastUpdatedTime, err := getLastCfEventTimestamp(d.Id(), conn) + if err != nil { + return err + } + + if v, ok := d.GetOk("timeout_in_minutes"); ok { + m := int64(v.(int)) + if m > retryTimeout { + retryTimeout = m + 5 + log.Printf("[DEBUG] CloudFormation timeout: %d", retryTimeout) + } + } + var lastStatus string + var stackId string + wait := resource.StateChangeConf{ + Pending: []string{ + "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", + "UPDATE_IN_PROGRESS", + "UPDATE_ROLLBACK_IN_PROGRESS", + "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", + }, + Target: []string{ + "CREATE_COMPLETE", // If no stack update was performed + "UPDATE_COMPLETE", + "UPDATE_ROLLBACK_COMPLETE", + "UPDATE_ROLLBACK_FAILED", + }, + Timeout: time.Duration(retryTimeout) * time.Minute, + MinTimeout: 5 * time.Second, + Refresh: func() (interface{}, string, error) { + resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{ + StackName: aws.String(d.Id()), + }) + if err != nil { + log.Printf("[ERROR] Failed to describe stacks: %s", err) + return nil, "", err + } + + stackId = aws.StringValue(resp.Stacks[0].StackId) + + status := *resp.Stacks[0].StackStatus + lastStatus = status + log.Printf("[DEBUG] Current CloudFormation stack status: %q", status) + + return resp, status, err + }, + } + + _, err = wait.WaitForState() + if err != nil { + return err + } + + if lastStatus == "UPDATE_ROLLBACK_COMPLETE" || lastStatus == "UPDATE_ROLLBACK_FAILED" { + reasons, err := getCloudFormationRollbackReasons(stackId, lastUpdatedTime, conn) + if err != nil { + return fmt.Errorf("Failed getting details about rollback: %q", err.Error()) + } + + return fmt.Errorf("%s: %q", lastStatus, reasons) + } + + log.Printf("[DEBUG] CloudFormation stack %q has been updated", stackId) + + return resourceAwsCloudFormationStackRead(d, meta) +} + +func resourceAwsCloudFormationStackDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cfconn + + input := &cloudformation.DeleteStackInput{ + StackName: aws.String(d.Id()), + } + log.Printf("[DEBUG] Deleting CloudFormation stack %s", input) + _, err := conn.DeleteStack(input) + if err != nil { + awsErr, ok := err.(awserr.Error) + if !ok { + return err + } + + if awsErr.Code() == "ValidationError" { + // Ignore stack which has been already deleted + return nil + } + return err + } + var lastStatus string + wait := resource.StateChangeConf{ + Pending: []string{ + "DELETE_IN_PROGRESS", + "ROLLBACK_IN_PROGRESS", + }, + Target: []string{ + "DELETE_COMPLETE", + "DELETE_FAILED", + }, + Timeout: 30 * time.Minute, + MinTimeout: 5 * time.Second, + Refresh: func() (interface{}, string, error) { + resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{ + StackName: aws.String(d.Id()), + }) + if err != nil { + awsErr, ok := err.(awserr.Error) + if !ok { + return nil, "", err + } + + log.Printf("[DEBUG] Error when deleting CloudFormation stack: %s: %s", + awsErr.Code(), awsErr.Message()) + + // ValidationError: Stack with id % does not exist + if awsErr.Code() == "ValidationError" { + return resp, "DELETE_COMPLETE", nil + } + return nil, "", err + } + + if len(resp.Stacks) == 0 { + log.Printf("[DEBUG] CloudFormation stack %q is already gone", d.Id()) + return resp, "DELETE_COMPLETE", nil + } + + status := *resp.Stacks[0].StackStatus + lastStatus = status + log.Printf("[DEBUG] Current CloudFormation stack status: %q", status) + + return resp, status, err + }, + } + + _, err = wait.WaitForState() + if err != nil { + return err + } + + if lastStatus == "DELETE_FAILED" { + reasons, err := getCloudFormationFailures(d.Id(), conn) + if err != nil { + return fmt.Errorf("Failed getting reasons of failure: %q", err.Error()) + } + + return fmt.Errorf("%s: %q", lastStatus, reasons) + } + + log.Printf("[DEBUG] CloudFormation stack %q has been deleted", d.Id()) + + d.SetId("") + + return nil +} + +// getLastCfEventTimestamp takes the first event in a list +// of events ordered from the newest to the oldest +// and extracts timestamp from it +// LastUpdatedTime only provides last >successful< updated time +func getLastCfEventTimestamp(stackName string, conn *cloudformation.CloudFormation) ( + *time.Time, error) { + output, err := conn.DescribeStackEvents(&cloudformation.DescribeStackEventsInput{ + StackName: aws.String(stackName), + }) + if err != nil { + return nil, err + } + + return output.StackEvents[0].Timestamp, nil +} + +func getCloudFormationRollbackReasons(stackId string, afterTime *time.Time, conn *cloudformation.CloudFormation) ([]string, error) { + var failures []string + + err := conn.DescribeStackEventsPages(&cloudformation.DescribeStackEventsInput{ + StackName: aws.String(stackId), + }, func(page *cloudformation.DescribeStackEventsOutput, lastPage bool) bool { + for _, e := range page.StackEvents { + if afterTime != nil && !e.Timestamp.After(*afterTime) { + continue + } + + if cfStackEventIsFailure(e) || cfStackEventIsRollback(e) { + failures = append(failures, *e.ResourceStatusReason) + } + } + return !lastPage + }) + + return failures, err +} + +func getCloudFormationDeletionReasons(stackId string, conn *cloudformation.CloudFormation) ([]string, error) { + var failures []string + + err := conn.DescribeStackEventsPages(&cloudformation.DescribeStackEventsInput{ + StackName: aws.String(stackId), + }, func(page *cloudformation.DescribeStackEventsOutput, lastPage bool) bool { + for _, e := range page.StackEvents { + if cfStackEventIsFailure(e) || cfStackEventIsStackDeletion(e) { + failures = append(failures, *e.ResourceStatusReason) + } + } + return !lastPage + }) + + return failures, err +} + +func getCloudFormationFailures(stackId string, conn *cloudformation.CloudFormation) ([]string, error) { + var failures []string + + err := conn.DescribeStackEventsPages(&cloudformation.DescribeStackEventsInput{ + StackName: aws.String(stackId), + }, func(page *cloudformation.DescribeStackEventsOutput, lastPage bool) bool { + for _, e := range page.StackEvents { + if cfStackEventIsFailure(e) { + failures = append(failures, *e.ResourceStatusReason) + } + } + return !lastPage + }) + + return failures, err +} + +func cfStackEventIsFailure(event *cloudformation.StackEvent) bool { + failRe := regexp.MustCompile("_FAILED$") + return failRe.MatchString(*event.ResourceStatus) && event.ResourceStatusReason != nil +} + +func cfStackEventIsRollback(event *cloudformation.StackEvent) bool { + rollbackRe := regexp.MustCompile("^ROLLBACK_") + return rollbackRe.MatchString(*event.ResourceStatus) && event.ResourceStatusReason != nil +} + +func cfStackEventIsStackDeletion(event *cloudformation.StackEvent) bool { + return *event.ResourceStatus == "DELETE_IN_PROGRESS" && + *event.ResourceType == "AWS::CloudFormation::Stack" && + event.ResourceStatusReason != nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_distribution.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_distribution.go new file mode 100644 index 000000000..e5c91cde9 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_distribution.go @@ -0,0 +1,732 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" +) + +func resourceAwsCloudFrontDistribution() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudFrontDistributionCreate, + Read: resourceAwsCloudFrontDistributionRead, + Update: resourceAwsCloudFrontDistributionUpdate, + Delete: resourceAwsCloudFrontDistributionDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsCloudFrontDistributionImport, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "aliases": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: aliasesHash, + }, + "cache_behavior": { + Type: schema.TypeSet, + Optional: true, + Set: cacheBehaviorHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_methods": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "cached_methods": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "compress": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "default_ttl": { + Type: schema.TypeInt, + Required: true, + }, + "forwarded_values": { + Type: schema.TypeSet, + Required: true, + Set: forwardedValuesHash, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cookies": { + Type: schema.TypeSet, + Required: true, + Set: cookiePreferenceHash, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "forward": { + Type: schema.TypeString, + Required: true, + }, + "whitelisted_names": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "headers": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "query_string": { + Type: schema.TypeBool, + Required: true, + }, + "query_string_cache_keys": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "lambda_function_association": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 4, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "event_type": { + Type: schema.TypeString, + Required: true, + }, + "lambda_arn": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + Set: lambdaFunctionAssociationHash, + }, + "max_ttl": { + Type: schema.TypeInt, + Required: true, + }, + "min_ttl": { + Type: schema.TypeInt, + Required: true, + }, + "path_pattern": { + Type: schema.TypeString, + Required: true, + }, + "smooth_streaming": { + Type: schema.TypeBool, + Optional: true, + }, + "target_origin_id": { + Type: schema.TypeString, + Required: true, + }, + "trusted_signers": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "viewer_protocol_policy": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "comment": { + Type: schema.TypeString, + Optional: true, + }, + "custom_error_response": { + Type: schema.TypeSet, + Optional: true, + Set: customErrorResponseHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "error_caching_min_ttl": { + Type: schema.TypeInt, + Optional: true, + }, + "error_code": { + Type: schema.TypeInt, + Required: true, + }, + "response_code": { + Type: schema.TypeInt, + Optional: true, + }, + "response_page_path": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "default_cache_behavior": { + Type: schema.TypeSet, + Required: true, + Set: defaultCacheBehaviorHash, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_methods": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "cached_methods": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "compress": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "default_ttl": { + Type: schema.TypeInt, + Required: true, + }, + "forwarded_values": { + Type: schema.TypeSet, + Required: true, + Set: forwardedValuesHash, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cookies": { + Type: schema.TypeSet, + Required: true, + Set: cookiePreferenceHash, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "forward": { + Type: schema.TypeString, + Required: true, + }, + "whitelisted_names": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "headers": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "query_string": { + Type: schema.TypeBool, + Required: true, + }, + "query_string_cache_keys": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "lambda_function_association": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 4, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "event_type": { + Type: schema.TypeString, + Required: true, + }, + "lambda_arn": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + Set: lambdaFunctionAssociationHash, + }, + "max_ttl": { + Type: schema.TypeInt, + Required: true, + }, + "min_ttl": { + Type: schema.TypeInt, + Required: true, + }, + "smooth_streaming": { + Type: schema.TypeBool, + Optional: true, + }, + "target_origin_id": { + Type: schema.TypeString, + Required: true, + }, + "trusted_signers": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "viewer_protocol_policy": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "default_root_object": { + Type: schema.TypeString, + Optional: true, + }, + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + "http_version": { + Type: schema.TypeString, + Optional: true, + Default: "http2", + ValidateFunc: validateHTTP, + }, + "logging_config": { + Type: schema.TypeSet, + Optional: true, + Set: loggingConfigHash, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + }, + "include_cookies": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "prefix": { + Type: schema.TypeString, + Optional: true, + Default: "", + }, + }, + }, + }, + "origin": { + Type: schema.TypeSet, + Required: true, + Set: originHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "custom_origin_config": { + Type: schema.TypeSet, + Optional: true, + ConflictsWith: []string{"origin.s3_origin_config"}, + Set: customOriginConfigHash, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_port": { + Type: schema.TypeInt, + Required: true, + }, + "https_port": { + Type: schema.TypeInt, + Required: true, + }, + "origin_keepalive_timeout": { + Type: schema.TypeInt, + Optional: true, + Default: 5, + ValidateFunc: validation.IntBetween(1, 60), + }, + "origin_read_timeout": { + Type: schema.TypeInt, + Optional: true, + Default: 30, + ValidateFunc: validation.IntBetween(4, 60), + }, + "origin_protocol_policy": { + Type: schema.TypeString, + Required: true, + }, + "origin_ssl_protocols": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "domain_name": { + Type: schema.TypeString, + Required: true, + }, + "custom_header": { + Type: schema.TypeSet, + Optional: true, + Set: originCustomHeaderHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "origin_id": { + Type: schema.TypeString, + Required: true, + }, + "origin_path": { + Type: schema.TypeString, + Optional: true, + }, + "s3_origin_config": { + Type: schema.TypeSet, + Optional: true, + ConflictsWith: []string{"origin.custom_origin_config"}, + Set: s3OriginConfigHash, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "origin_access_identity": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "price_class": { + Type: schema.TypeString, + Optional: true, + Default: "PriceClass_All", + }, + "restrictions": { + Type: schema.TypeSet, + Required: true, + Set: restrictionsHash, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "geo_restriction": { + Type: schema.TypeSet, + Required: true, + Set: geoRestrictionHash, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "locations": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "restriction_type": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "viewer_certificate": { + Type: schema.TypeSet, + Required: true, + Set: viewerCertificateHash, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "acm_certificate_arn": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"viewer_certificate.cloudfront_default_certificate", "viewer_certificate.iam_certificate_id"}, + }, + "cloudfront_default_certificate": { + Type: schema.TypeBool, + Optional: true, + ConflictsWith: []string{"viewer_certificate.acm_certificate_arn", "viewer_certificate.iam_certificate_id"}, + }, + "iam_certificate_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"viewer_certificate.acm_certificate_arn", "viewer_certificate.cloudfront_default_certificate"}, + }, + "minimum_protocol_version": { + Type: schema.TypeString, + Optional: true, + Default: "SSLv3", + }, + "ssl_support_method": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "web_acl_id": { + Type: schema.TypeString, + Optional: true, + }, + "caller_reference": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "active_trusted_signers": { + Type: schema.TypeMap, + Computed: true, + }, + "domain_name": { + Type: schema.TypeString, + Computed: true, + }, + "last_modified_time": { + Type: schema.TypeString, + Computed: true, + }, + "in_progress_validation_batches": { + Type: schema.TypeInt, + Computed: true, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + }, + "hosted_zone_id": { + Type: schema.TypeString, + Computed: true, + }, + // retain_on_delete is a non-API attribute that may help facilitate speedy + // deletion of a resoruce. It's mainly here for testing purposes, so + // enable at your own risk. + "retain_on_delete": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "is_ipv6_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsCloudFrontDistributionCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + + params := &cloudfront.CreateDistributionWithTagsInput{ + DistributionConfigWithTags: &cloudfront.DistributionConfigWithTags{ + DistributionConfig: expandDistributionConfig(d), + Tags: tagsFromMapCloudFront(d.Get("tags").(map[string]interface{})), + }, + } + + resp, err := conn.CreateDistributionWithTags(params) + if err != nil { + return err + } + d.SetId(*resp.Distribution.Id) + return resourceAwsCloudFrontDistributionRead(d, meta) +} + +func resourceAwsCloudFrontDistributionRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + params := &cloudfront.GetDistributionInput{ + Id: aws.String(d.Id()), + } + + resp, err := conn.GetDistribution(params) + if err != nil { + if errcode, ok := err.(awserr.Error); ok && errcode.Code() == "NoSuchDistribution" { + log.Printf("[WARN] No Distribution found: %s", d.Id()) + d.SetId("") + return nil + } + + return err + } + + // Update attributes from DistributionConfig + err = flattenDistributionConfig(d, resp.Distribution.DistributionConfig) + if err != nil { + return err + } + // Update other attributes outside of DistributionConfig + d.SetId(*resp.Distribution.Id) + err = d.Set("active_trusted_signers", flattenActiveTrustedSigners(resp.Distribution.ActiveTrustedSigners)) + if err != nil { + return err + } + d.Set("status", resp.Distribution.Status) + d.Set("domain_name", resp.Distribution.DomainName) + d.Set("last_modified_time", aws.String(resp.Distribution.LastModifiedTime.String())) + d.Set("in_progress_validation_batches", resp.Distribution.InProgressInvalidationBatches) + d.Set("etag", resp.ETag) + d.Set("arn", resp.Distribution.ARN) + + tagResp, err := conn.ListTagsForResource(&cloudfront.ListTagsForResourceInput{ + Resource: aws.String(d.Get("arn").(string)), + }) + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf( + "Error retrieving EC2 tags for CloudFront Distribution %q (ARN: %q): {{err}}", + d.Id(), d.Get("arn").(string)), err) + } + + if err := d.Set("tags", tagsToMapCloudFront(tagResp.Tags)); err != nil { + return err + } + + return nil +} + +func resourceAwsCloudFrontDistributionUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + params := &cloudfront.UpdateDistributionInput{ + Id: aws.String(d.Id()), + DistributionConfig: expandDistributionConfig(d), + IfMatch: aws.String(d.Get("etag").(string)), + } + _, err := conn.UpdateDistribution(params) + if err != nil { + return err + } + + if err := setTagsCloudFront(conn, d, d.Get("arn").(string)); err != nil { + return err + } + + return resourceAwsCloudFrontDistributionRead(d, meta) +} + +func resourceAwsCloudFrontDistributionDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + + // manually disable the distribution first + d.Set("enabled", false) + err := resourceAwsCloudFrontDistributionUpdate(d, meta) + if err != nil { + return err + } + + // skip delete if retain_on_delete is enabled + if d.Get("retain_on_delete").(bool) { + log.Printf("[WARN] Removing CloudFront Distribution ID %q with `retain_on_delete` set. Please delete this distribution manually.", d.Id()) + d.SetId("") + return nil + } + + // Distribution needs to be in deployed state again before it can be deleted. + err = resourceAwsCloudFrontDistributionWaitUntilDeployed(d.Id(), meta) + if err != nil { + return err + } + + // now delete + params := &cloudfront.DeleteDistributionInput{ + Id: aws.String(d.Id()), + IfMatch: aws.String(d.Get("etag").(string)), + } + + _, err = conn.DeleteDistribution(params) + if err != nil { + return err + } + + // Done + d.SetId("") + return nil +} + +// resourceAwsCloudFrontWebDistributionWaitUntilDeployed blocks until the +// distribution is deployed. It currently takes exactly 15 minutes to deploy +// but that might change in the future. +func resourceAwsCloudFrontDistributionWaitUntilDeployed(id string, meta interface{}) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{"InProgress"}, + Target: []string{"Deployed"}, + Refresh: resourceAwsCloudFrontWebDistributionStateRefreshFunc(id, meta), + Timeout: 70 * time.Minute, + MinTimeout: 15 * time.Second, + Delay: 10 * time.Minute, + } + + _, err := stateConf.WaitForState() + return err +} + +// The refresh function for resourceAwsCloudFrontWebDistributionWaitUntilDeployed. +func resourceAwsCloudFrontWebDistributionStateRefreshFunc(id string, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + conn := meta.(*AWSClient).cloudfrontconn + params := &cloudfront.GetDistributionInput{ + Id: aws.String(id), + } + + resp, err := conn.GetDistribution(params) + if err != nil { + log.Printf("[WARN] Error retrieving CloudFront Distribution %q details: %s", id, err) + return nil, "", err + } + + if resp == nil { + return nil, "", nil + } + + return resp.Distribution, *resp.Distribution.Status, nil + } +} + +// validateHTTP ensures that the http_version resource parameter is +// correct. +func validateHTTP(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if value != "http1.1" && value != "http2" { + errors = append(errors, fmt.Errorf( + "%q contains an invalid HTTP version parameter %q. Valid parameters are either %q or %q.", + k, value, "http1.1", "http2")) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_origin_access_identity.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_origin_access_identity.go new file mode 100644 index 000000000..2c4053741 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudfront_origin_access_identity.go @@ -0,0 +1,139 @@ +package aws + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCloudFrontOriginAccessIdentity() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudFrontOriginAccessIdentityCreate, + Read: resourceAwsCloudFrontOriginAccessIdentityRead, + Update: resourceAwsCloudFrontOriginAccessIdentityUpdate, + Delete: resourceAwsCloudFrontOriginAccessIdentityDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "comment": { + Type: schema.TypeString, + Optional: true, + Default: "", + }, + "caller_reference": { + Type: schema.TypeString, + Computed: true, + }, + "cloudfront_access_identity_path": { + Type: schema.TypeString, + Computed: true, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + }, + "iam_arn": { + Type: schema.TypeString, + Computed: true, + }, + "s3_canonical_user_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsCloudFrontOriginAccessIdentityCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + params := &cloudfront.CreateCloudFrontOriginAccessIdentityInput{ + CloudFrontOriginAccessIdentityConfig: expandOriginAccessIdentityConfig(d), + } + + resp, err := conn.CreateCloudFrontOriginAccessIdentity(params) + if err != nil { + return err + } + d.SetId(*resp.CloudFrontOriginAccessIdentity.Id) + return resourceAwsCloudFrontOriginAccessIdentityRead(d, meta) +} + +func resourceAwsCloudFrontOriginAccessIdentityRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + params := &cloudfront.GetCloudFrontOriginAccessIdentityInput{ + Id: aws.String(d.Id()), + } + + resp, err := conn.GetCloudFrontOriginAccessIdentity(params) + if err != nil { + return err + } + + // Update attributes from DistributionConfig + flattenOriginAccessIdentityConfig(d, resp.CloudFrontOriginAccessIdentity.CloudFrontOriginAccessIdentityConfig) + // Update other attributes outside of DistributionConfig + d.SetId(*resp.CloudFrontOriginAccessIdentity.Id) + d.Set("etag", resp.ETag) + d.Set("s3_canonical_user_id", resp.CloudFrontOriginAccessIdentity.S3CanonicalUserId) + d.Set("cloudfront_access_identity_path", fmt.Sprintf("origin-access-identity/cloudfront/%s", *resp.CloudFrontOriginAccessIdentity.Id)) + d.Set("iam_arn", fmt.Sprintf("arn:%s:iam::cloudfront:user/CloudFront Origin Access Identity %s", + meta.(*AWSClient).partition, *resp.CloudFrontOriginAccessIdentity.Id)) + return nil +} + +func resourceAwsCloudFrontOriginAccessIdentityUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + params := &cloudfront.UpdateCloudFrontOriginAccessIdentityInput{ + Id: aws.String(d.Id()), + CloudFrontOriginAccessIdentityConfig: expandOriginAccessIdentityConfig(d), + IfMatch: aws.String(d.Get("etag").(string)), + } + _, err := conn.UpdateCloudFrontOriginAccessIdentity(params) + if err != nil { + return err + } + + return resourceAwsCloudFrontOriginAccessIdentityRead(d, meta) +} + +func resourceAwsCloudFrontOriginAccessIdentityDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + params := &cloudfront.DeleteCloudFrontOriginAccessIdentityInput{ + Id: aws.String(d.Id()), + IfMatch: aws.String(d.Get("etag").(string)), + } + + _, err := conn.DeleteCloudFrontOriginAccessIdentity(params) + if err != nil { + return err + } + + // Done + d.SetId("") + return nil +} + +func expandOriginAccessIdentityConfig(d *schema.ResourceData) *cloudfront.OriginAccessIdentityConfig { + originAccessIdentityConfig := &cloudfront.OriginAccessIdentityConfig{ + Comment: aws.String(d.Get("comment").(string)), + } + // This sets CallerReference if it's still pending computation (ie: new resource) + if v, ok := d.GetOk("caller_reference"); ok == false { + originAccessIdentityConfig.CallerReference = aws.String(time.Now().Format(time.RFC3339Nano)) + } else { + originAccessIdentityConfig.CallerReference = aws.String(v.(string)) + } + return originAccessIdentityConfig +} + +func flattenOriginAccessIdentityConfig(d *schema.ResourceData, originAccessIdentityConfig *cloudfront.OriginAccessIdentityConfig) { + if originAccessIdentityConfig.Comment != nil { + d.Set("comment", originAccessIdentityConfig.Comment) + } + d.Set("caller_reference", originAccessIdentityConfig.CallerReference) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudtrail.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudtrail.go new file mode 100644 index 000000000..8c59a5d94 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudtrail.go @@ -0,0 +1,329 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudtrail" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCloudTrail() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudTrailCreate, + Read: resourceAwsCloudTrailRead, + Update: resourceAwsCloudTrailUpdate, + Delete: resourceAwsCloudTrailDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "enable_logging": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "s3_bucket_name": { + Type: schema.TypeString, + Required: true, + }, + "s3_key_prefix": { + Type: schema.TypeString, + Optional: true, + }, + "cloud_watch_logs_role_arn": { + Type: schema.TypeString, + Optional: true, + }, + "cloud_watch_logs_group_arn": { + Type: schema.TypeString, + Optional: true, + }, + "include_global_service_events": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "is_multi_region_trail": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "sns_topic_name": { + Type: schema.TypeString, + Optional: true, + }, + "enable_log_file_validation": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, + "home_region": { + Type: schema.TypeString, + Computed: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchema(), + }, + } +} + +func resourceAwsCloudTrailCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudtrailconn + + input := cloudtrail.CreateTrailInput{ + Name: aws.String(d.Get("name").(string)), + S3BucketName: aws.String(d.Get("s3_bucket_name").(string)), + } + + if v, ok := d.GetOk("cloud_watch_logs_group_arn"); ok { + input.CloudWatchLogsLogGroupArn = aws.String(v.(string)) + } + if v, ok := d.GetOk("cloud_watch_logs_role_arn"); ok { + input.CloudWatchLogsRoleArn = aws.String(v.(string)) + } + if v, ok := d.GetOk("include_global_service_events"); ok { + input.IncludeGlobalServiceEvents = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("is_multi_region_trail"); ok { + input.IsMultiRegionTrail = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("enable_log_file_validation"); ok { + input.EnableLogFileValidation = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("kms_key_id"); ok { + input.KmsKeyId = aws.String(v.(string)) + } + if v, ok := d.GetOk("s3_key_prefix"); ok { + input.S3KeyPrefix = aws.String(v.(string)) + } + if v, ok := d.GetOk("sns_topic_name"); ok { + input.SnsTopicName = aws.String(v.(string)) + } + + t, err := conn.CreateTrail(&input) + if err != nil { + return err + } + + log.Printf("[DEBUG] CloudTrail created: %s", t) + + d.Set("arn", t.TrailARN) + d.SetId(*t.Name) + + // AWS CloudTrail sets newly-created trails to false. + if v, ok := d.GetOk("enable_logging"); ok && v.(bool) { + err := cloudTrailSetLogging(conn, v.(bool), d.Id()) + if err != nil { + return err + } + } + + return resourceAwsCloudTrailUpdate(d, meta) +} + +func resourceAwsCloudTrailRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudtrailconn + + input := cloudtrail.DescribeTrailsInput{ + TrailNameList: []*string{ + aws.String(d.Id()), + }, + } + resp, err := conn.DescribeTrails(&input) + if err != nil { + return err + } + + // CloudTrail does not return a NotFound error in the event that the Trail + // you're looking for is not found. Instead, it's simply not in the list. + var trail *cloudtrail.Trail + for _, c := range resp.TrailList { + if d.Id() == *c.Name { + trail = c + } + } + + if trail == nil { + log.Printf("[WARN] CloudTrail (%s) not found", d.Id()) + d.SetId("") + return nil + } + + log.Printf("[DEBUG] CloudTrail received: %s", trail) + + d.Set("name", trail.Name) + d.Set("s3_bucket_name", trail.S3BucketName) + d.Set("s3_key_prefix", trail.S3KeyPrefix) + d.Set("cloud_watch_logs_role_arn", trail.CloudWatchLogsRoleArn) + d.Set("cloud_watch_logs_group_arn", trail.CloudWatchLogsLogGroupArn) + d.Set("include_global_service_events", trail.IncludeGlobalServiceEvents) + d.Set("is_multi_region_trail", trail.IsMultiRegionTrail) + d.Set("sns_topic_name", trail.SnsTopicName) + d.Set("enable_log_file_validation", trail.LogFileValidationEnabled) + + // TODO: Make it possible to use KMS Key names, not just ARNs + // In order to test it properly this PR needs to be merged 1st: + // https://github.com/hashicorp/terraform/pull/3928 + d.Set("kms_key_id", trail.KmsKeyId) + + d.Set("arn", trail.TrailARN) + d.Set("home_region", trail.HomeRegion) + + // Get tags + req := &cloudtrail.ListTagsInput{ + ResourceIdList: []*string{trail.TrailARN}, + } + + tagsOut, err := conn.ListTags(req) + if err != nil { + return err + } + log.Printf("[DEBUG] Received CloudTrail tags: %s", tagsOut) + + var tags []*cloudtrail.Tag + if tagsOut.ResourceTagList != nil && len(tagsOut.ResourceTagList) > 0 { + tags = tagsOut.ResourceTagList[0].TagsList + } + + if err := d.Set("tags", tagsToMapCloudtrail(tags)); err != nil { + return err + } + + logstatus, err := cloudTrailGetLoggingStatus(conn, trail.Name) + if err != nil { + return err + } + d.Set("enable_logging", logstatus) + + return nil +} + +func resourceAwsCloudTrailUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudtrailconn + + input := cloudtrail.UpdateTrailInput{ + Name: aws.String(d.Id()), + } + + if d.HasChange("s3_bucket_name") { + input.S3BucketName = aws.String(d.Get("s3_bucket_name").(string)) + } + if d.HasChange("s3_key_prefix") { + input.S3KeyPrefix = aws.String(d.Get("s3_key_prefix").(string)) + } + if d.HasChange("cloud_watch_logs_role_arn") { + input.CloudWatchLogsRoleArn = aws.String(d.Get("cloud_watch_logs_role_arn").(string)) + } + if d.HasChange("cloud_watch_logs_group_arn") { + input.CloudWatchLogsLogGroupArn = aws.String(d.Get("cloud_watch_logs_group_arn").(string)) + } + if d.HasChange("include_global_service_events") { + input.IncludeGlobalServiceEvents = aws.Bool(d.Get("include_global_service_events").(bool)) + } + if d.HasChange("is_multi_region_trail") { + input.IsMultiRegionTrail = aws.Bool(d.Get("is_multi_region_trail").(bool)) + } + if d.HasChange("enable_log_file_validation") { + input.EnableLogFileValidation = aws.Bool(d.Get("enable_log_file_validation").(bool)) + } + if d.HasChange("kms_key_id") { + input.KmsKeyId = aws.String(d.Get("kms_key_id").(string)) + } + if d.HasChange("sns_topic_name") { + input.SnsTopicName = aws.String(d.Get("sns_topic_name").(string)) + } + + log.Printf("[DEBUG] Updating CloudTrail: %s", input) + t, err := conn.UpdateTrail(&input) + if err != nil { + return err + } + + if d.HasChange("tags") { + err := setTagsCloudtrail(conn, d) + if err != nil { + return err + } + } + + if d.HasChange("enable_logging") { + log.Printf("[DEBUG] Updating logging on CloudTrail: %s", input) + err := cloudTrailSetLogging(conn, d.Get("enable_logging").(bool), *input.Name) + if err != nil { + return err + } + } + + log.Printf("[DEBUG] CloudTrail updated: %s", t) + + return resourceAwsCloudTrailRead(d, meta) +} + +func resourceAwsCloudTrailDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudtrailconn + + log.Printf("[DEBUG] Deleting CloudTrail: %q", d.Id()) + _, err := conn.DeleteTrail(&cloudtrail.DeleteTrailInput{ + Name: aws.String(d.Id()), + }) + + return err +} + +func cloudTrailGetLoggingStatus(conn *cloudtrail.CloudTrail, id *string) (bool, error) { + GetTrailStatusOpts := &cloudtrail.GetTrailStatusInput{ + Name: id, + } + resp, err := conn.GetTrailStatus(GetTrailStatusOpts) + if err != nil { + return false, fmt.Errorf("Error retrieving logging status of CloudTrail (%s): %s", *id, err) + } + + return *resp.IsLogging, err +} + +func cloudTrailSetLogging(conn *cloudtrail.CloudTrail, enabled bool, id string) error { + if enabled { + log.Printf( + "[DEBUG] Starting logging on CloudTrail (%s)", + id) + StartLoggingOpts := &cloudtrail.StartLoggingInput{ + Name: aws.String(id), + } + if _, err := conn.StartLogging(StartLoggingOpts); err != nil { + return fmt.Errorf( + "Error starting logging on CloudTrail (%s): %s", + id, err) + } + } else { + log.Printf( + "[DEBUG] Stopping logging on CloudTrail (%s)", + id) + StopLoggingOpts := &cloudtrail.StopLoggingInput{ + Name: aws.String(id), + } + if _, err := conn.StopLogging(StopLoggingOpts); err != nil { + return fmt.Errorf( + "Error stopping logging on CloudTrail (%s): %s", + id, err) + } + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_event_rule.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_event_rule.go new file mode 100644 index 000000000..e079a56c7 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_event_rule.go @@ -0,0 +1,287 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + events "github.com/aws/aws-sdk-go/service/cloudwatchevents" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCloudWatchEventRule() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudWatchEventRuleCreate, + Read: resourceAwsCloudWatchEventRuleRead, + Update: resourceAwsCloudWatchEventRuleUpdate, + Delete: resourceAwsCloudWatchEventRuleDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateCloudWatchEventRuleName, + }, + "schedule_expression": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateMaxLength(256), + }, + "event_pattern": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateEventPatternValue(2048), + StateFunc: func(v interface{}) string { + json, _ := normalizeJsonString(v) + return json + }, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateMaxLength(512), + }, + "role_arn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateMaxLength(1600), + }, + "is_enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsCloudWatchEventRuleCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatcheventsconn + + input, err := buildPutRuleInputStruct(d) + if err != nil { + return errwrap.Wrapf("Creating CloudWatch Event Rule failed: {{err}}", err) + } + log.Printf("[DEBUG] Creating CloudWatch Event Rule: %s", input) + + // IAM Roles take some time to propagate + var out *events.PutRuleOutput + err = resource.Retry(30*time.Second, func() *resource.RetryError { + var err error + out, err = conn.PutRule(input) + pattern := regexp.MustCompile("cannot be assumed by principal '[a-z]+\\.amazonaws\\.com'\\.$") + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "ValidationException" && pattern.MatchString(awsErr.Message()) { + log.Printf("[DEBUG] Retrying creation of CloudWatch Event Rule %q", *input.Name) + return resource.RetryableError(err) + } + } + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return errwrap.Wrapf("Creating CloudWatch Event Rule failed: {{err}}", err) + } + + d.Set("arn", out.RuleArn) + d.SetId(d.Get("name").(string)) + + log.Printf("[INFO] CloudWatch Event Rule %q created", *out.RuleArn) + + return resourceAwsCloudWatchEventRuleUpdate(d, meta) +} + +func resourceAwsCloudWatchEventRuleRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatcheventsconn + + input := events.DescribeRuleInput{ + Name: aws.String(d.Id()), + } + log.Printf("[DEBUG] Reading CloudWatch Event Rule: %s", input) + out, err := conn.DescribeRule(&input) + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "ResourceNotFoundException" { + log.Printf("[WARN] Removing CloudWatch Event Rule %q because it's gone.", d.Id()) + d.SetId("") + return nil + } + } + if err != nil { + return err + } + log.Printf("[DEBUG] Found Event Rule: %s", out) + + d.Set("arn", out.Arn) + d.Set("description", out.Description) + if out.EventPattern != nil { + pattern, err := normalizeJsonString(*out.EventPattern) + if err != nil { + return errwrap.Wrapf("event pattern contains an invalid JSON: {{err}}", err) + } + d.Set("event_pattern", pattern) + } + d.Set("name", out.Name) + d.Set("role_arn", out.RoleArn) + d.Set("schedule_expression", out.ScheduleExpression) + + boolState, err := getBooleanStateFromString(*out.State) + if err != nil { + return err + } + log.Printf("[DEBUG] Setting boolean state: %t", boolState) + d.Set("is_enabled", boolState) + + return nil +} + +func resourceAwsCloudWatchEventRuleUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatcheventsconn + + if d.HasChange("is_enabled") && d.Get("is_enabled").(bool) { + log.Printf("[DEBUG] Enabling CloudWatch Event Rule %q", d.Id()) + _, err := conn.EnableRule(&events.EnableRuleInput{ + Name: aws.String(d.Id()), + }) + if err != nil { + return err + } + log.Printf("[DEBUG] CloudWatch Event Rule (%q) enabled", d.Id()) + } + + input, err := buildPutRuleInputStruct(d) + if err != nil { + return errwrap.Wrapf("Updating CloudWatch Event Rule failed: {{err}}", err) + } + log.Printf("[DEBUG] Updating CloudWatch Event Rule: %s", input) + + // IAM Roles take some time to propagate + err = resource.Retry(30*time.Second, func() *resource.RetryError { + _, err := conn.PutRule(input) + pattern := regexp.MustCompile("cannot be assumed by principal '[a-z]+\\.amazonaws\\.com'\\.$") + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "ValidationException" && pattern.MatchString(awsErr.Message()) { + log.Printf("[DEBUG] Retrying update of CloudWatch Event Rule %q", *input.Name) + return resource.RetryableError(err) + } + } + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return errwrap.Wrapf("Updating CloudWatch Event Rule failed: {{err}}", err) + } + + if d.HasChange("is_enabled") && !d.Get("is_enabled").(bool) { + log.Printf("[DEBUG] Disabling CloudWatch Event Rule %q", d.Id()) + _, err := conn.DisableRule(&events.DisableRuleInput{ + Name: aws.String(d.Id()), + }) + if err != nil { + return err + } + log.Printf("[DEBUG] CloudWatch Event Rule (%q) disabled", d.Id()) + } + + return resourceAwsCloudWatchEventRuleRead(d, meta) +} + +func resourceAwsCloudWatchEventRuleDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatcheventsconn + + log.Printf("[INFO] Deleting CloudWatch Event Rule: %s", d.Id()) + _, err := conn.DeleteRule(&events.DeleteRuleInput{ + Name: aws.String(d.Id()), + }) + if err != nil { + return fmt.Errorf("Error deleting CloudWatch Event Rule: %s", err) + } + log.Println("[INFO] CloudWatch Event Rule deleted") + + d.SetId("") + + return nil +} + +func buildPutRuleInputStruct(d *schema.ResourceData) (*events.PutRuleInput, error) { + input := events.PutRuleInput{ + Name: aws.String(d.Get("name").(string)), + } + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + if v, ok := d.GetOk("event_pattern"); ok { + pattern, err := normalizeJsonString(v) + if err != nil { + return nil, errwrap.Wrapf("event pattern contains an invalid JSON: {{err}}", err) + } + input.EventPattern = aws.String(pattern) + } + if v, ok := d.GetOk("role_arn"); ok { + input.RoleArn = aws.String(v.(string)) + } + if v, ok := d.GetOk("schedule_expression"); ok { + input.ScheduleExpression = aws.String(v.(string)) + } + + input.State = aws.String(getStringStateFromBoolean(d.Get("is_enabled").(bool))) + + return &input, nil +} + +// State is represented as (ENABLED|DISABLED) in the API +func getBooleanStateFromString(state string) (bool, error) { + if state == "ENABLED" { + return true, nil + } else if state == "DISABLED" { + return false, nil + } + // We don't just blindly trust AWS as they tend to return + // unexpected values in similar cases (different casing etc.) + return false, fmt.Errorf("Failed converting state %q into boolean", state) +} + +// State is represented as (ENABLED|DISABLED) in the API +func getStringStateFromBoolean(isEnabled bool) string { + if isEnabled { + return "ENABLED" + } + return "DISABLED" +} + +func validateEventPatternValue(length int) schema.SchemaValidateFunc { + return func(v interface{}, k string) (ws []string, errors []error) { + json, err := normalizeJsonString(v) + if err != nil { + errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err)) + + // Invalid JSON? Return immediately, + // there is no need to collect other + // errors. + return + } + + // Check whether the normalized JSON is within the given length. + if len(json) > length { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than %d characters: %q", k, length, json)) + } + return + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_event_target.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_event_target.go new file mode 100644 index 000000000..afa5e84fd --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_event_target.go @@ -0,0 +1,291 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + events "github.com/aws/aws-sdk-go/service/cloudwatchevents" + "github.com/hashicorp/terraform/helper/validation" +) + +func resourceAwsCloudWatchEventTarget() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudWatchEventTargetCreate, + Read: resourceAwsCloudWatchEventTargetRead, + Update: resourceAwsCloudWatchEventTargetUpdate, + Delete: resourceAwsCloudWatchEventTargetDelete, + + Schema: map[string]*schema.Schema{ + "rule": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateCloudWatchEventRuleName, + }, + + "target_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateCloudWatchEventTargetId, + }, + + "arn": { + Type: schema.TypeString, + Required: true, + }, + + "input": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"input_path"}, + // We could be normalizing the JSON here, + // but for built-in targets input may not be JSON + }, + + "input_path": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"input"}, + }, + + "role_arn": { + Type: schema.TypeString, + Optional: true, + }, + + "run_command_targets": { + Type: schema.TypeList, + Optional: true, + MaxItems: 5, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, + "values": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + } +} + +func resourceAwsCloudWatchEventTargetCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatcheventsconn + + rule := d.Get("rule").(string) + + var targetId string + if v, ok := d.GetOk("target_id"); ok { + targetId = v.(string) + } else { + targetId = resource.UniqueId() + d.Set("target_id", targetId) + } + + input := buildPutTargetInputStruct(d) + + log.Printf("[DEBUG] Creating CloudWatch Event Target: %s", input) + out, err := conn.PutTargets(input) + if err != nil { + return fmt.Errorf("Creating CloudWatch Event Target failed: %s", err) + } + + if len(out.FailedEntries) > 0 { + return fmt.Errorf("Creating CloudWatch Event Target failed: %s", + out.FailedEntries) + } + + id := rule + "-" + targetId + d.SetId(id) + + log.Printf("[INFO] CloudWatch Event Target %q created", d.Id()) + + return resourceAwsCloudWatchEventTargetRead(d, meta) +} + +func resourceAwsCloudWatchEventTargetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatcheventsconn + + t, err := findEventTargetById( + d.Get("target_id").(string), + d.Get("rule").(string), + nil, conn) + if err != nil { + if regexp.MustCompile(" not found$").MatchString(err.Error()) { + log.Printf("[WARN] Removing CloudWatch Event Target %q because it's gone.", d.Id()) + d.SetId("") + return nil + } + if awsErr, ok := err.(awserr.Error); ok { + // This should never happen, but it's useful + // for recovering from https://github.com/hashicorp/terraform/issues/5389 + if awsErr.Code() == "ValidationException" { + log.Printf("[WARN] Removing CloudWatch Event Target %q because it never existed.", d.Id()) + d.SetId("") + return nil + } + + if awsErr.Code() == "ResourceNotFoundException" { + log.Printf("[WARN] CloudWatch Event Target (%q) not found. Removing it from state.", d.Id()) + d.SetId("") + return nil + } + + } + return err + } + log.Printf("[DEBUG] Found Event Target: %s", t) + + d.Set("arn", t.Arn) + d.Set("target_id", t.Id) + d.Set("input", t.Input) + d.Set("input_path", t.InputPath) + d.Set("role_arn", t.RoleArn) + + if t.RunCommandParameters != nil { + if err := d.Set("run_command_targets", flattenAwsCloudWatchEventTargetRunParameters(t.RunCommandParameters)); err != nil { + return fmt.Errorf("[DEBUG] Error setting run_command_targets error: %#v", err) + } + } + + return nil +} + +func findEventTargetById(id, rule string, nextToken *string, conn *events.CloudWatchEvents) ( + *events.Target, error) { + input := events.ListTargetsByRuleInput{ + Rule: aws.String(rule), + NextToken: nextToken, + Limit: aws.Int64(100), // Set limit to allowed maximum to prevent API throttling + } + log.Printf("[DEBUG] Reading CloudWatch Event Target: %s", input) + out, err := conn.ListTargetsByRule(&input) + if err != nil { + return nil, err + } + + for _, t := range out.Targets { + if *t.Id == id { + return t, nil + } + } + + if out.NextToken != nil { + return findEventTargetById(id, rule, nextToken, conn) + } + + return nil, fmt.Errorf("CloudWatch Event Target %q (%q) not found", id, rule) +} + +func resourceAwsCloudWatchEventTargetUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatcheventsconn + + input := buildPutTargetInputStruct(d) + + log.Printf("[DEBUG] Updating CloudWatch Event Target: %s", input) + _, err := conn.PutTargets(input) + if err != nil { + return fmt.Errorf("Updating CloudWatch Event Target failed: %s", err) + } + + return resourceAwsCloudWatchEventTargetRead(d, meta) +} + +func resourceAwsCloudWatchEventTargetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatcheventsconn + + input := events.RemoveTargetsInput{ + Ids: []*string{aws.String(d.Get("target_id").(string))}, + Rule: aws.String(d.Get("rule").(string)), + } + log.Printf("[INFO] Deleting CloudWatch Event Target: %s", input) + _, err := conn.RemoveTargets(&input) + if err != nil { + return fmt.Errorf("Error deleting CloudWatch Event Target: %s", err) + } + log.Println("[INFO] CloudWatch Event Target deleted") + + d.SetId("") + + return nil +} + +func buildPutTargetInputStruct(d *schema.ResourceData) *events.PutTargetsInput { + e := &events.Target{ + Arn: aws.String(d.Get("arn").(string)), + Id: aws.String(d.Get("target_id").(string)), + } + + if v, ok := d.GetOk("input"); ok { + e.Input = aws.String(v.(string)) + } + if v, ok := d.GetOk("input_path"); ok { + e.InputPath = aws.String(v.(string)) + } + + if v, ok := d.GetOk("role_arn"); ok { + e.RoleArn = aws.String(v.(string)) + } + + if v, ok := d.GetOk("run_command_targets"); ok { + e.RunCommandParameters = expandAwsCloudWatchEventTargetRunParameters(v.([]interface{})) + } + + input := events.PutTargetsInput{ + Rule: aws.String(d.Get("rule").(string)), + Targets: []*events.Target{e}, + } + + return &input +} + +func expandAwsCloudWatchEventTargetRunParameters(config []interface{}) *events.RunCommandParameters { + + commands := make([]*events.RunCommandTarget, 0) + + for _, c := range config { + param := c.(map[string]interface{}) + command := &events.RunCommandTarget{ + Key: aws.String(param["key"].(string)), + Values: expandStringList(param["values"].([]interface{})), + } + + commands = append(commands, command) + } + + command := &events.RunCommandParameters{ + RunCommandTargets: commands, + } + + return command +} + +func flattenAwsCloudWatchEventTargetRunParameters(runCommand *events.RunCommandParameters) []map[string]interface{} { + result := make([]map[string]interface{}, 0) + + for _, x := range runCommand.RunCommandTargets { + config := make(map[string]interface{}) + + config["key"] = *x.Key + config["values"] = flattenStringList(x.Values) + + result = append(result, config) + } + + return result +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_destination.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_destination.go new file mode 100644 index 000000000..172630648 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_destination.go @@ -0,0 +1,150 @@ +package aws + +import ( + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCloudWatchLogDestination() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudWatchLogDestinationPut, + Update: resourceAwsCloudWatchLogDestinationPut, + Read: resourceAwsCloudWatchLogDestinationRead, + Delete: resourceAwsCloudWatchLogDestinationDelete, + + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("name", d.Id()) + return []*schema.ResourceData{d}, nil + }, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "role_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "target_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsCloudWatchLogDestinationPut(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + + name := d.Get("name").(string) + role_arn := d.Get("role_arn").(string) + target_arn := d.Get("target_arn").(string) + + params := &cloudwatchlogs.PutDestinationInput{ + DestinationName: aws.String(name), + RoleArn: aws.String(role_arn), + TargetArn: aws.String(target_arn), + } + + return resource.Retry(3*time.Minute, func() *resource.RetryError { + resp, err := conn.PutDestination(params) + + if err == nil { + d.SetId(name) + d.Set("arn", *resp.Destination.Arn) + } + + awsErr, ok := err.(awserr.Error) + if !ok { + return resource.RetryableError(err) + } + + if awsErr.Code() == "InvalidParameterException" { + if strings.Contains(awsErr.Message(), "Could not deliver test message to specified") { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + + return resource.NonRetryableError(err) + }) +} + +func resourceAwsCloudWatchLogDestinationRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + name := d.Get("name").(string) + destination, exists, err := lookupCloudWatchLogDestination(conn, name, nil) + if err != nil { + return err + } + + if !exists { + d.SetId("") + return nil + } + + d.SetId(name) + d.Set("arn", destination.Arn) + d.Set("role_arn", destination.RoleArn) + d.Set("target_arn", destination.TargetArn) + + return nil +} + +func resourceAwsCloudWatchLogDestinationDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + + name := d.Get("name").(string) + + params := &cloudwatchlogs.DeleteDestinationInput{ + DestinationName: aws.String(name), + } + _, err := conn.DeleteDestination(params) + if err != nil { + return fmt.Errorf("Error deleting Destination with name %s", name) + } + d.SetId("") + return nil +} + +func lookupCloudWatchLogDestination(conn *cloudwatchlogs.CloudWatchLogs, + name string, nextToken *string) (*cloudwatchlogs.Destination, bool, error) { + input := &cloudwatchlogs.DescribeDestinationsInput{ + DestinationNamePrefix: aws.String(name), + NextToken: nextToken, + } + resp, err := conn.DescribeDestinations(input) + if err != nil { + return nil, true, err + } + + for _, destination := range resp.Destinations { + if *destination.DestinationName == name { + return destination, true, nil + } + } + + if resp.NextToken != nil { + return lookupCloudWatchLogDestination(conn, name, resp.NextToken) + } + + return nil, false, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_destination_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_destination_policy.go new file mode 100644 index 000000000..704dacf45 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_destination_policy.go @@ -0,0 +1,88 @@ +package aws + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" +) + +func resourceAwsCloudWatchLogDestinationPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudWatchLogDestinationPolicyPut, + Update: resourceAwsCloudWatchLogDestinationPolicyPut, + Read: resourceAwsCloudWatchLogDestinationPolicyRead, + Delete: resourceAwsCloudWatchLogDestinationPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("destination_name", d.Id()) + return []*schema.ResourceData{d}, nil + }, + }, + + Schema: map[string]*schema.Schema{ + "destination_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "access_policy": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceAwsCloudWatchLogDestinationPolicyPut(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + + destination_name := d.Get("destination_name").(string) + access_policy := d.Get("access_policy").(string) + + params := &cloudwatchlogs.PutDestinationPolicyInput{ + DestinationName: aws.String(destination_name), + AccessPolicy: aws.String(access_policy), + } + + _, err := conn.PutDestinationPolicy(params) + + if err != nil { + return fmt.Errorf("Error creating DestinationPolicy with destination_name %s: %#v", destination_name, err) + } + + d.SetId(destination_name) + return resourceAwsCloudWatchLogDestinationPolicyRead(d, meta) +} + +func resourceAwsCloudWatchLogDestinationPolicyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + destination_name := d.Get("destination_name").(string) + destination, exists, err := lookupCloudWatchLogDestination(conn, destination_name, nil) + if err != nil { + return err + } + + if !exists { + d.SetId("") + return nil + } + + if destination.AccessPolicy != nil { + d.SetId(destination_name) + d.Set("access_policy", *destination.AccessPolicy) + } else { + d.SetId("") + } + + return nil +} + +func resourceAwsCloudWatchLogDestinationPolicyDelete(d *schema.ResourceData, meta interface{}) error { + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_group.go new file mode 100644 index 000000000..a4ca7b753 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_group.go @@ -0,0 +1,260 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/hashicorp/errwrap" +) + +func resourceAwsCloudWatchLogGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudWatchLogGroupCreate, + Read: resourceAwsCloudWatchLogGroupRead, + Update: resourceAwsCloudWatchLogGroupUpdate, + Delete: resourceAwsCloudWatchLogGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: validateLogGroupName, + }, + "name_prefix": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateLogGroupNamePrefix, + }, + + "retention_in_days": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + }, + + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsCloudWatchLogGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + + var logGroupName string + if v, ok := d.GetOk("name"); ok { + logGroupName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + logGroupName = resource.PrefixedUniqueId(v.(string)) + } else { + logGroupName = resource.UniqueId() + } + + log.Printf("[DEBUG] Creating CloudWatch Log Group: %s", logGroupName) + + _, err := conn.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String(logGroupName), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceAlreadyExistsException" { + return fmt.Errorf("Creating CloudWatch Log Group failed: %s: The CloudWatch Log Group '%s' already exists.", err, d.Get("name").(string)) + } + return fmt.Errorf("Creating CloudWatch Log Group failed: %s '%s'", err, d.Get("name")) + } + + d.SetId(logGroupName) + + log.Println("[INFO] CloudWatch Log Group created") + + return resourceAwsCloudWatchLogGroupUpdate(d, meta) +} + +func resourceAwsCloudWatchLogGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + log.Printf("[DEBUG] Reading CloudWatch Log Group: %q", d.Get("name").(string)) + lg, exists, err := lookupCloudWatchLogGroup(conn, d.Id(), nil) + if err != nil { + return err + } + + if !exists { + log.Printf("[DEBUG] CloudWatch Group %q Not Found", d.Id()) + d.SetId("") + return nil + } + + log.Printf("[DEBUG] Found Log Group: %#v", *lg) + + d.Set("arn", lg.Arn) + d.Set("name", lg.LogGroupName) + + if lg.RetentionInDays != nil { + d.Set("retention_in_days", lg.RetentionInDays) + } + + if !meta.(*AWSClient).IsChinaCloud() && !meta.(*AWSClient).IsGovCloud() { + tags, err := flattenCloudWatchTags(d, conn) + if err != nil { + return err + } + d.Set("tags", tags) + } + + return nil +} + +func lookupCloudWatchLogGroup(conn *cloudwatchlogs.CloudWatchLogs, + name string, nextToken *string) (*cloudwatchlogs.LogGroup, bool, error) { + input := &cloudwatchlogs.DescribeLogGroupsInput{ + LogGroupNamePrefix: aws.String(name), + NextToken: nextToken, + } + resp, err := conn.DescribeLogGroups(input) + if err != nil { + return nil, true, err + } + + for _, lg := range resp.LogGroups { + if *lg.LogGroupName == name { + return lg, true, nil + } + } + + if resp.NextToken != nil { + return lookupCloudWatchLogGroup(conn, name, resp.NextToken) + } + + return nil, false, nil +} + +func resourceAwsCloudWatchLogGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + + name := d.Get("name").(string) + log.Printf("[DEBUG] Updating CloudWatch Log Group: %q", name) + + if d.HasChange("retention_in_days") { + var err error + + if v, ok := d.GetOk("retention_in_days"); ok { + input := cloudwatchlogs.PutRetentionPolicyInput{ + LogGroupName: aws.String(name), + RetentionInDays: aws.Int64(int64(v.(int))), + } + log.Printf("[DEBUG] Setting retention for CloudWatch Log Group: %q: %s", name, input) + _, err = conn.PutRetentionPolicy(&input) + } else { + log.Printf("[DEBUG] Deleting retention for CloudWatch Log Group: %q", name) + _, err = conn.DeleteRetentionPolicy(&cloudwatchlogs.DeleteRetentionPolicyInput{ + LogGroupName: aws.String(name), + }) + } + + if err != nil { + return err + } + } + + restricted := meta.(*AWSClient).IsChinaCloud() || meta.(*AWSClient).IsGovCloud() + + if !restricted && d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffCloudWatchTags(o, n) + + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags from %s", name) + _, err := conn.UntagLogGroup(&cloudwatchlogs.UntagLogGroupInput{ + LogGroupName: aws.String(name), + Tags: remove, + }) + if err != nil { + return err + } + } + + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags on %s", name) + _, err := conn.TagLogGroup(&cloudwatchlogs.TagLogGroupInput{ + LogGroupName: aws.String(name), + Tags: create, + }) + if err != nil { + return err + } + } + } + + return resourceAwsCloudWatchLogGroupRead(d, meta) +} + +func diffCloudWatchTags(oldTags map[string]interface{}, newTags map[string]interface{}) (map[string]*string, []*string) { + create := make(map[string]*string) + for k, v := range newTags { + create[k] = aws.String(v.(string)) + } + + var remove []*string + for t, _ := range oldTags { + _, ok := create[t] + if !ok { + remove = append(remove, aws.String(t)) + } + } + + return create, remove +} + +func resourceAwsCloudWatchLogGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + log.Printf("[INFO] Deleting CloudWatch Log Group: %s", d.Id()) + _, err := conn.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ + LogGroupName: aws.String(d.Get("name").(string)), + }) + if err != nil { + return fmt.Errorf("Error deleting CloudWatch Log Group: %s", err) + } + log.Println("[INFO] CloudWatch Log Group deleted") + + d.SetId("") + + return nil +} + +func flattenCloudWatchTags(d *schema.ResourceData, conn *cloudwatchlogs.CloudWatchLogs) (map[string]interface{}, error) { + tagsOutput, err := conn.ListTagsLogGroup(&cloudwatchlogs.ListTagsLogGroupInput{ + LogGroupName: aws.String(d.Get("name").(string)), + }) + if err != nil { + return nil, errwrap.Wrapf("Error Getting CloudWatch Logs Tag List: {{err}}", err) + } + if tagsOutput != nil { + output := make(map[string]interface{}, len(tagsOutput.Tags)) + + for i, v := range tagsOutput.Tags { + output[i] = *v + } + + return output, nil + } + + return make(map[string]interface{}), nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_metric_filter.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_metric_filter.go new file mode 100644 index 000000000..943472f85 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_metric_filter.go @@ -0,0 +1,187 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" +) + +func resourceAwsCloudWatchLogMetricFilter() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudWatchLogMetricFilterUpdate, + Read: resourceAwsCloudWatchLogMetricFilterRead, + Update: resourceAwsCloudWatchLogMetricFilterUpdate, + Delete: resourceAwsCloudWatchLogMetricFilterDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateLogMetricFilterName, + }, + + "pattern": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validateMaxLength(512), + StateFunc: func(v interface{}) string { + s, ok := v.(string) + if !ok { + return "" + } + return strings.TrimSpace(s) + }, + }, + + "log_group_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateLogGroupName, + }, + + "metric_transformation": &schema.Schema{ + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validateLogMetricFilterTransformationName, + }, + "namespace": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validateLogMetricFilterTransformationName, + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validateMaxLength(100), + }, + }, + }, + }, + }, + } +} + +func resourceAwsCloudWatchLogMetricFilterUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + + input := cloudwatchlogs.PutMetricFilterInput{ + FilterName: aws.String(d.Get("name").(string)), + FilterPattern: aws.String(strings.TrimSpace(d.Get("pattern").(string))), + LogGroupName: aws.String(d.Get("log_group_name").(string)), + } + + transformations := d.Get("metric_transformation").([]interface{}) + o := transformations[0].(map[string]interface{}) + input.MetricTransformations = expandCloudWachLogMetricTransformations(o) + + log.Printf("[DEBUG] Creating/Updating CloudWatch Log Metric Filter: %s", input) + _, err := conn.PutMetricFilter(&input) + if err != nil { + return fmt.Errorf("Creating/Updating CloudWatch Log Metric Filter failed: %s", err) + } + + d.SetId(d.Get("name").(string)) + + log.Println("[INFO] CloudWatch Log Metric Filter created/updated") + + return resourceAwsCloudWatchLogMetricFilterRead(d, meta) +} + +func resourceAwsCloudWatchLogMetricFilterRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + + mf, err := lookupCloudWatchLogMetricFilter(conn, d.Get("name").(string), + d.Get("log_group_name").(string), nil) + if err != nil { + if _, ok := err.(*resource.NotFoundError); ok { + log.Printf("[WARN] Removing CloudWatch Log Metric Filter as it is gone") + d.SetId("") + return nil + } + + return fmt.Errorf("Failed reading CloudWatch Log Metric Filter: %s", err) + } + + log.Printf("[DEBUG] Found CloudWatch Log Metric Filter: %s", mf) + + d.Set("name", mf.FilterName) + d.Set("pattern", mf.FilterPattern) + d.Set("metric_transformation", flattenCloudWachLogMetricTransformations(mf.MetricTransformations)) + + return nil +} + +func lookupCloudWatchLogMetricFilter(conn *cloudwatchlogs.CloudWatchLogs, + name, logGroupName string, nextToken *string) (*cloudwatchlogs.MetricFilter, error) { + + input := cloudwatchlogs.DescribeMetricFiltersInput{ + FilterNamePrefix: aws.String(name), + LogGroupName: aws.String(logGroupName), + NextToken: nextToken, + } + log.Printf("[DEBUG] Reading CloudWatch Log Metric Filter: %s", input) + resp, err := conn.DescribeMetricFilters(&input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceNotFoundException" { + return nil, &resource.NotFoundError{ + Message: fmt.Sprintf("CloudWatch Log Metric Filter %q / %q not found via"+ + " initial DescribeMetricFilters call", name, logGroupName), + LastError: err, + LastRequest: input, + } + } + + return nil, fmt.Errorf("Failed describing CloudWatch Log Metric Filter: %s", err) + } + + for _, mf := range resp.MetricFilters { + if *mf.FilterName == name { + return mf, nil + } + } + + if resp.NextToken != nil { + return lookupCloudWatchLogMetricFilter(conn, name, logGroupName, resp.NextToken) + } + + return nil, &resource.NotFoundError{ + Message: fmt.Sprintf("CloudWatch Log Metric Filter %q / %q not found "+ + "in given results from DescribeMetricFilters", name, logGroupName), + LastResponse: resp, + LastRequest: input, + } +} + +func resourceAwsCloudWatchLogMetricFilterDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + + input := cloudwatchlogs.DeleteMetricFilterInput{ + FilterName: aws.String(d.Get("name").(string)), + LogGroupName: aws.String(d.Get("log_group_name").(string)), + } + log.Printf("[INFO] Deleting CloudWatch Log Metric Filter: %s", d.Id()) + _, err := conn.DeleteMetricFilter(&input) + if err != nil { + return fmt.Errorf("Error deleting CloudWatch Log Metric Filter: %s", err) + } + log.Println("[INFO] CloudWatch Log Metric Filter deleted") + + d.SetId("") + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_stream.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_stream.go new file mode 100644 index 000000000..6557787a7 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_stream.go @@ -0,0 +1,133 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCloudWatchLogStream() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudWatchLogStreamCreate, + Read: resourceAwsCloudWatchLogStreamRead, + Delete: resourceAwsCloudWatchLogStreamDelete, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateCloudWatchLogStreamName, + }, + + "log_group_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsCloudWatchLogStreamCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + + log.Printf("[DEBUG] Creating CloudWatch Log Stream: %s", d.Get("name").(string)) + _, err := conn.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: aws.String(d.Get("log_group_name").(string)), + LogStreamName: aws.String(d.Get("name").(string)), + }) + if err != nil { + return errwrap.Wrapf("Creating CloudWatch Log Stream failed: {{err}}", err) + } + + d.SetId(d.Get("name").(string)) + + return resourceAwsCloudWatchLogStreamRead(d, meta) +} + +func resourceAwsCloudWatchLogStreamRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + + ls, exists, err := lookupCloudWatchLogStream(conn, d.Id(), d.Get("log_group_name").(string), nil) + if err != nil { + return err + } + + if !exists { + log.Printf("[DEBUG] CloudWatch Stream %q Not Found. Removing from state", d.Id()) + d.SetId("") + return nil + } + + d.Set("arn", ls.Arn) + d.Set("name", ls.LogStreamName) + + return nil +} + +func resourceAwsCloudWatchLogStreamDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + + log.Printf("[INFO] Deleting CloudWatch Log Stream: %s", d.Id()) + params := &cloudwatchlogs.DeleteLogStreamInput{ + LogGroupName: aws.String(d.Get("log_group_name").(string)), + LogStreamName: aws.String(d.Id()), + } + _, err := conn.DeleteLogStream(params) + if err != nil { + return errwrap.Wrapf("Error deleting CloudWatch Log Stream: {{err}}", err) + } + + return nil +} + +func lookupCloudWatchLogStream(conn *cloudwatchlogs.CloudWatchLogs, + name string, logStreamName string, nextToken *string) (*cloudwatchlogs.LogStream, bool, error) { + input := &cloudwatchlogs.DescribeLogStreamsInput{ + LogStreamNamePrefix: aws.String(name), + LogGroupName: aws.String(logStreamName), + NextToken: nextToken, + } + resp, err := conn.DescribeLogStreams(input) + if err != nil { + return nil, true, err + } + + for _, ls := range resp.LogStreams { + if *ls.LogStreamName == name { + return ls, true, nil + } + } + + if resp.NextToken != nil { + return lookupCloudWatchLogStream(conn, name, logStreamName, resp.NextToken) + } + + return nil, false, nil +} + +func validateCloudWatchLogStreamName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if regexp.MustCompile(`:`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "colons not allowed in %q:", k)) + } + if len(value) < 1 || len(value) > 512 { + errors = append(errors, fmt.Errorf( + "%q must be between 1 and 512 characters: %q", k, value)) + } + + return + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_subscription_filter.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_subscription_filter.go new file mode 100644 index 000000000..250403143 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_log_subscription_filter.go @@ -0,0 +1,180 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCloudwatchLogSubscriptionFilter() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudwatchLogSubscriptionFilterCreate, + Read: resourceAwsCloudwatchLogSubscriptionFilterRead, + Update: resourceAwsCloudwatchLogSubscriptionFilterUpdate, + Delete: resourceAwsCloudwatchLogSubscriptionFilterDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "destination_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "filter_pattern": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "log_group_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "role_arn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceAwsCloudwatchLogSubscriptionFilterCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + params := getAwsCloudWatchLogsSubscriptionFilterInput(d) + log.Printf("[DEBUG] Creating SubscriptionFilter %#v", params) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.PutSubscriptionFilter(¶ms) + + if err == nil { + d.SetId(cloudwatchLogsSubscriptionFilterId(d.Get("log_group_name").(string))) + log.Printf("[DEBUG] Cloudwatch logs subscription %q created", d.Id()) + } + + awsErr, ok := err.(awserr.Error) + if !ok { + return resource.RetryableError(err) + } + + if awsErr.Code() == "InvalidParameterException" { + log.Printf("[DEBUG] Caught message: %q, code: %q: Retrying", awsErr.Message(), awsErr.Code()) + if strings.Contains(awsErr.Message(), "Could not deliver test message to specified") { + return resource.RetryableError(err) + } + if strings.Contains(awsErr.Message(), "Could not execute the lambda function") { + return resource.RetryableError(err) + } + resource.NonRetryableError(err) + } + + return resource.NonRetryableError(err) + }) +} + +func resourceAwsCloudwatchLogSubscriptionFilterUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + + params := getAwsCloudWatchLogsSubscriptionFilterInput(d) + + log.Printf("[DEBUG] Update SubscriptionFilter %#v", params) + _, err := conn.PutSubscriptionFilter(¶ms) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + return fmt.Errorf("[WARN] Error updating SubscriptionFilter (%s) for LogGroup (%s), message: \"%s\", code: \"%s\"", + d.Get("name").(string), d.Get("log_group_name").(string), awsErr.Message(), awsErr.Code()) + } + return err + } + + d.SetId(cloudwatchLogsSubscriptionFilterId(d.Get("log_group_name").(string))) + return resourceAwsCloudwatchLogSubscriptionFilterRead(d, meta) +} + +func getAwsCloudWatchLogsSubscriptionFilterInput(d *schema.ResourceData) cloudwatchlogs.PutSubscriptionFilterInput { + name := d.Get("name").(string) + destination_arn := d.Get("destination_arn").(string) + filter_pattern := d.Get("filter_pattern").(string) + log_group_name := d.Get("log_group_name").(string) + + params := cloudwatchlogs.PutSubscriptionFilterInput{ + FilterName: aws.String(name), + DestinationArn: aws.String(destination_arn), + FilterPattern: aws.String(filter_pattern), + LogGroupName: aws.String(log_group_name), + } + + if _, ok := d.GetOk("role_arn"); ok { + params.RoleArn = aws.String(d.Get("role_arn").(string)) + } + + return params +} + +func resourceAwsCloudwatchLogSubscriptionFilterRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + + log_group_name := d.Get("log_group_name").(string) + name := d.Get("name").(string) // "name" is a required field in the schema + + req := &cloudwatchlogs.DescribeSubscriptionFiltersInput{ + LogGroupName: aws.String(log_group_name), + FilterNamePrefix: aws.String(name), + } + + resp, err := conn.DescribeSubscriptionFilters(req) + if err != nil { + return fmt.Errorf("Error reading SubscriptionFilters for log group %s with name prefix %s: %#v", log_group_name, d.Get("name").(string), err) + } + + for _, subscriptionFilter := range resp.SubscriptionFilters { + if *subscriptionFilter.LogGroupName == log_group_name { + d.SetId(cloudwatchLogsSubscriptionFilterId(log_group_name)) + return nil // OK, matching subscription filter found + } + } + + log.Printf("[DEBUG] Subscription Filter%q Not Found", name) + d.SetId("") + return nil +} + +func resourceAwsCloudwatchLogSubscriptionFilterDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + log.Printf("[INFO] Deleting CloudWatch Log Group Subscription: %s", d.Id()) + log_group_name := d.Get("log_group_name").(string) + name := d.Get("name").(string) + + params := &cloudwatchlogs.DeleteSubscriptionFilterInput{ + FilterName: aws.String(name), // Required + LogGroupName: aws.String(log_group_name), // Required + } + _, err := conn.DeleteSubscriptionFilter(params) + if err != nil { + return fmt.Errorf( + "Error deleting Subscription Filter from log group: %s with name filter name %s", log_group_name, name) + } + d.SetId("") + return nil +} + +func cloudwatchLogsSubscriptionFilterId(log_group_name string) string { + var buf bytes.Buffer + + buf.WriteString(fmt.Sprintf("%s-", log_group_name)) // only one filter allowed per log_group_name at the moment + + return fmt.Sprintf("cwlsf-%d", hashcode.String(buf.String())) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_metric_alarm.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_metric_alarm.go new file mode 100644 index 000000000..8eef4ebee --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_metric_alarm.go @@ -0,0 +1,338 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/hashicorp/terraform/helper/validation" +) + +func resourceAwsCloudWatchMetricAlarm() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudWatchMetricAlarmCreate, + Read: resourceAwsCloudWatchMetricAlarmRead, + Update: resourceAwsCloudWatchMetricAlarmUpdate, + Delete: resourceAwsCloudWatchMetricAlarmDelete, + SchemaVersion: 1, + MigrateState: resourceAwsCloudWatchMetricAlarmMigrateState, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "alarm_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "comparison_operator": { + Type: schema.TypeString, + Required: true, + }, + "evaluation_periods": { + Type: schema.TypeInt, + Required: true, + }, + "metric_name": { + Type: schema.TypeString, + Required: true, + }, + "namespace": { + Type: schema.TypeString, + Required: true, + }, + "period": { + Type: schema.TypeInt, + Required: true, + }, + "statistic": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"extended_statistic"}, + }, + "threshold": { + Type: schema.TypeFloat, + Required: true, + }, + "actions_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "alarm_actions": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "alarm_description": { + Type: schema.TypeString, + Optional: true, + }, + "dimensions": { + Type: schema.TypeMap, + Optional: true, + }, + "insufficient_data_actions": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "ok_actions": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "unit": { + Type: schema.TypeString, + Optional: true, + }, + "extended_statistic": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"statistic"}, + }, + "treat_missing_data": { + Type: schema.TypeString, + Optional: true, + Default: "missing", + ValidateFunc: validation.StringInSlice([]string{"breaching", "notBreaching", "ignore", "missing"}, true), + }, + "evaluate_low_sample_count_percentiles": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"evaluate", "ignore"}, true), + }, + }, + } +} + +func resourceAwsCloudWatchMetricAlarmCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchconn + + _, statisticOk := d.GetOk("statistic") + _, extendedStatisticOk := d.GetOk("extended_statistic") + + if !statisticOk && !extendedStatisticOk { + return fmt.Errorf("One of `statistic` or `extended_statistic` must be set for a cloudwatch metric alarm") + } + + params := getAwsCloudWatchPutMetricAlarmInput(d) + + log.Printf("[DEBUG] Creating CloudWatch Metric Alarm: %#v", params) + _, err := conn.PutMetricAlarm(¶ms) + if err != nil { + return fmt.Errorf("Creating metric alarm failed: %s", err) + } + d.SetId(d.Get("alarm_name").(string)) + log.Println("[INFO] CloudWatch Metric Alarm created") + + return resourceAwsCloudWatchMetricAlarmRead(d, meta) +} + +func resourceAwsCloudWatchMetricAlarmRead(d *schema.ResourceData, meta interface{}) error { + a, err := getAwsCloudWatchMetricAlarm(d, meta) + if err != nil { + return err + } + if a == nil { + d.SetId("") + return nil + } + + log.Printf("[DEBUG] Reading CloudWatch Metric Alarm: %s", d.Get("alarm_name")) + + d.Set("actions_enabled", a.ActionsEnabled) + + if err := d.Set("alarm_actions", _strArrPtrToList(a.AlarmActions)); err != nil { + log.Printf("[WARN] Error setting Alarm Actions: %s", err) + } + d.Set("alarm_description", a.AlarmDescription) + d.Set("alarm_name", a.AlarmName) + d.Set("comparison_operator", a.ComparisonOperator) + if err := d.Set("dimensions", flattenDimensions(a.Dimensions)); err != nil { + return err + } + d.Set("evaluation_periods", a.EvaluationPeriods) + + if err := d.Set("insufficient_data_actions", _strArrPtrToList(a.InsufficientDataActions)); err != nil { + log.Printf("[WARN] Error setting Insufficient Data Actions: %s", err) + } + d.Set("metric_name", a.MetricName) + d.Set("namespace", a.Namespace) + + if err := d.Set("ok_actions", _strArrPtrToList(a.OKActions)); err != nil { + log.Printf("[WARN] Error setting OK Actions: %s", err) + } + d.Set("period", a.Period) + d.Set("statistic", a.Statistic) + d.Set("threshold", a.Threshold) + d.Set("unit", a.Unit) + d.Set("extended_statistic", a.ExtendedStatistic) + d.Set("treat_missing_data", a.TreatMissingData) + d.Set("evaluate_low_sample_count_percentiles", a.EvaluateLowSampleCountPercentile) + + return nil +} + +func resourceAwsCloudWatchMetricAlarmUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchconn + params := getAwsCloudWatchPutMetricAlarmInput(d) + + log.Printf("[DEBUG] Updating CloudWatch Metric Alarm: %#v", params) + _, err := conn.PutMetricAlarm(¶ms) + if err != nil { + return fmt.Errorf("Updating metric alarm failed: %s", err) + } + log.Println("[INFO] CloudWatch Metric Alarm updated") + + return resourceAwsCloudWatchMetricAlarmRead(d, meta) +} + +func resourceAwsCloudWatchMetricAlarmDelete(d *schema.ResourceData, meta interface{}) error { + p, err := getAwsCloudWatchMetricAlarm(d, meta) + if err != nil { + return err + } + if p == nil { + log.Printf("[DEBUG] CloudWatch Metric Alarm %s is already gone", d.Id()) + return nil + } + + log.Printf("[INFO] Deleting CloudWatch Metric Alarm: %s", d.Id()) + + conn := meta.(*AWSClient).cloudwatchconn + params := cloudwatch.DeleteAlarmsInput{ + AlarmNames: []*string{aws.String(d.Id())}, + } + + if _, err := conn.DeleteAlarms(¶ms); err != nil { + return fmt.Errorf("Error deleting CloudWatch Metric Alarm: %s", err) + } + log.Println("[INFO] CloudWatch Metric Alarm deleted") + + d.SetId("") + return nil +} + +func getAwsCloudWatchPutMetricAlarmInput(d *schema.ResourceData) cloudwatch.PutMetricAlarmInput { + params := cloudwatch.PutMetricAlarmInput{ + AlarmName: aws.String(d.Get("alarm_name").(string)), + ComparisonOperator: aws.String(d.Get("comparison_operator").(string)), + EvaluationPeriods: aws.Int64(int64(d.Get("evaluation_periods").(int))), + MetricName: aws.String(d.Get("metric_name").(string)), + Namespace: aws.String(d.Get("namespace").(string)), + Period: aws.Int64(int64(d.Get("period").(int))), + Threshold: aws.Float64(d.Get("threshold").(float64)), + TreatMissingData: aws.String(d.Get("treat_missing_data").(string)), + } + + if v := d.Get("actions_enabled"); v != nil { + params.ActionsEnabled = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("alarm_description"); ok { + params.AlarmDescription = aws.String(v.(string)) + } + + if v, ok := d.GetOk("unit"); ok { + params.Unit = aws.String(v.(string)) + } + + if v, ok := d.GetOk("statistic"); ok { + params.Statistic = aws.String(v.(string)) + } + + if v, ok := d.GetOk("extended_statistic"); ok { + params.ExtendedStatistic = aws.String(v.(string)) + } + + if v, ok := d.GetOk("evaluate_low_sample_count_percentiles"); ok { + params.EvaluateLowSampleCountPercentile = aws.String(v.(string)) + } + + var alarmActions []*string + if v := d.Get("alarm_actions"); v != nil { + for _, v := range v.(*schema.Set).List() { + str := v.(string) + alarmActions = append(alarmActions, aws.String(str)) + } + params.AlarmActions = alarmActions + } + + var insufficientDataActions []*string + if v := d.Get("insufficient_data_actions"); v != nil { + for _, v := range v.(*schema.Set).List() { + str := v.(string) + insufficientDataActions = append(insufficientDataActions, aws.String(str)) + } + params.InsufficientDataActions = insufficientDataActions + } + + var okActions []*string + if v := d.Get("ok_actions"); v != nil { + for _, v := range v.(*schema.Set).List() { + str := v.(string) + okActions = append(okActions, aws.String(str)) + } + params.OKActions = okActions + } + + a := d.Get("dimensions").(map[string]interface{}) + dimensions := make([]*cloudwatch.Dimension, 0, len(a)) + for k, v := range a { + dimensions = append(dimensions, &cloudwatch.Dimension{ + Name: aws.String(k), + Value: aws.String(v.(string)), + }) + } + params.Dimensions = dimensions + + return params +} + +func getAwsCloudWatchMetricAlarm(d *schema.ResourceData, meta interface{}) (*cloudwatch.MetricAlarm, error) { + conn := meta.(*AWSClient).cloudwatchconn + + params := cloudwatch.DescribeAlarmsInput{ + AlarmNames: []*string{aws.String(d.Id())}, + } + + resp, err := conn.DescribeAlarms(¶ms) + if err != nil { + return nil, err + } + + // Find it and return it + for idx, ma := range resp.MetricAlarms { + if *ma.AlarmName == d.Id() { + return resp.MetricAlarms[idx], nil + } + } + + return nil, nil +} + +func _strArrPtrToList(strArrPtr []*string) []string { + var result []string + for _, elem := range strArrPtr { + result = append(result, *elem) + } + return result +} + +func flattenDimensions(dims []*cloudwatch.Dimension) map[string]interface{} { + flatDims := make(map[string]interface{}) + for _, d := range dims { + flatDims[*d.Name] = *d.Value + } + return flatDims +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_metric_alarm_migrate.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_metric_alarm_migrate.go new file mode 100644 index 000000000..0ebd7f80d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cloudwatch_metric_alarm_migrate.go @@ -0,0 +1,33 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceAwsCloudWatchMetricAlarmMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found AWS CloudWatch Metric Alarm State v0; migrating to v1") + return migrateCloudWatchMetricAlarmStateV0toV1(is) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateCloudWatchMetricAlarmStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + is.Attributes["treat_missing_data"] = "missing" + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codebuild_project.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codebuild_project.go new file mode 100644 index 000000000..bbd3523a3 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codebuild_project.go @@ -0,0 +1,746 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/codebuild" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCodeBuildProject() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCodeBuildProjectCreate, + Read: resourceAwsCodeBuildProjectRead, + Update: resourceAwsCodeBuildProjectUpdate, + Delete: resourceAwsCodeBuildProjectDelete, + + Schema: map[string]*schema.Schema{ + "artifacts": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + }, + "location": { + Type: schema.TypeString, + Optional: true, + }, + "namespace_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAwsCodeBuildArifactsNamespaceType, + }, + "packaging": { + Type: schema.TypeString, + Optional: true, + }, + "path": { + Type: schema.TypeString, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAwsCodeBuildArifactsType, + }, + }, + }, + Set: resourceAwsCodeBuildProjectArtifactsHash, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateAwsCodeBuildProjectDescription, + }, + "encryption_key": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "environment": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "compute_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAwsCodeBuildEnvironmentComputeType, + }, + "environment_variable": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "image": { + Type: schema.TypeString, + Required: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAwsCodeBuildEnvironmentType, + }, + }, + }, + Set: resourceAwsCodeBuildProjectEnvironmentHash, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateAwsCodeBuildProjectName, + }, + "service_role": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "source": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auth": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource": { + Type: schema.TypeString, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAwsCodeBuildSourceAuthType, + }, + }, + }, + Optional: true, + }, + "buildspec": { + Type: schema.TypeString, + Optional: true, + }, + "location": { + Type: schema.TypeString, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAwsCodeBuildSourceType, + }, + }, + }, + Required: true, + MaxItems: 1, + }, + "timeout": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateAwsCodeBuildTimeout, + Removed: "This field has been removed. Please use build_timeout instead", + }, + "build_timeout": { + Type: schema.TypeInt, + Optional: true, + Default: "60", + ValidateFunc: validateAwsCodeBuildTimeout, + }, + "tags": tagsSchema(), + }, + } +} + +func resourceAwsCodeBuildProjectCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codebuildconn + + projectEnv := expandProjectEnvironment(d) + projectSource := expandProjectSource(d) + projectArtifacts := expandProjectArtifacts(d) + + params := &codebuild.CreateProjectInput{ + Environment: projectEnv, + Name: aws.String(d.Get("name").(string)), + Source: &projectSource, + Artifacts: &projectArtifacts, + } + + if v, ok := d.GetOk("description"); ok { + params.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("encryption_key"); ok { + params.EncryptionKey = aws.String(v.(string)) + } + + if v, ok := d.GetOk("service_role"); ok { + params.ServiceRole = aws.String(v.(string)) + } + + if v, ok := d.GetOk("build_timeout"); ok { + params.TimeoutInMinutes = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("tags"); ok { + params.Tags = tagsFromMapCodeBuild(v.(map[string]interface{})) + } + + var resp *codebuild.CreateProjectOutput + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + var err error + + resp, err = conn.CreateProject(params) + + if err != nil { + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + }) + + if err != nil { + return fmt.Errorf("[ERROR] Error creating CodeBuild project: %s", err) + } + + d.SetId(*resp.Project.Arn) + + return resourceAwsCodeBuildProjectUpdate(d, meta) +} + +func expandProjectArtifacts(d *schema.ResourceData) codebuild.ProjectArtifacts { + configs := d.Get("artifacts").(*schema.Set).List() + data := configs[0].(map[string]interface{}) + + projectArtifacts := codebuild.ProjectArtifacts{ + Type: aws.String(data["type"].(string)), + } + + if data["location"].(string) != "" { + projectArtifacts.Location = aws.String(data["location"].(string)) + } + + if data["name"].(string) != "" { + projectArtifacts.Name = aws.String(data["name"].(string)) + } + + if data["namespace_type"].(string) != "" { + projectArtifacts.NamespaceType = aws.String(data["namespace_type"].(string)) + } + + if data["packaging"].(string) != "" { + projectArtifacts.Packaging = aws.String(data["packaging"].(string)) + } + + if data["path"].(string) != "" { + projectArtifacts.Path = aws.String(data["path"].(string)) + } + + return projectArtifacts +} + +func expandProjectEnvironment(d *schema.ResourceData) *codebuild.ProjectEnvironment { + configs := d.Get("environment").(*schema.Set).List() + projectEnv := &codebuild.ProjectEnvironment{} + + envConfig := configs[0].(map[string]interface{}) + + if v := envConfig["compute_type"]; v != nil { + projectEnv.ComputeType = aws.String(v.(string)) + } + + if v := envConfig["image"]; v != nil { + projectEnv.Image = aws.String(v.(string)) + } + + if v := envConfig["type"]; v != nil { + projectEnv.Type = aws.String(v.(string)) + } + + if v := envConfig["environment_variable"]; v != nil { + envVariables := v.([]interface{}) + if len(envVariables) > 0 { + projectEnvironmentVariables := make([]*codebuild.EnvironmentVariable, 0, len(envVariables)) + + for _, envVariablesConfig := range envVariables { + config := envVariablesConfig.(map[string]interface{}) + + projectEnvironmentVar := &codebuild.EnvironmentVariable{} + + if v := config["name"].(string); v != "" { + projectEnvironmentVar.Name = &v + } + + if v := config["value"].(string); v != "" { + projectEnvironmentVar.Value = &v + } + + projectEnvironmentVariables = append(projectEnvironmentVariables, projectEnvironmentVar) + } + + projectEnv.EnvironmentVariables = projectEnvironmentVariables + } + } + + return projectEnv +} + +func expandProjectSource(d *schema.ResourceData) codebuild.ProjectSource { + configs := d.Get("source").(*schema.Set).List() + projectSource := codebuild.ProjectSource{} + + for _, configRaw := range configs { + data := configRaw.(map[string]interface{}) + + sourceType := data["type"].(string) + location := data["location"].(string) + buildspec := data["buildspec"].(string) + + projectSource = codebuild.ProjectSource{ + Type: &sourceType, + Location: &location, + Buildspec: &buildspec, + } + + if v, ok := data["auth"]; ok { + if len(v.(*schema.Set).List()) > 0 { + auth := v.(*schema.Set).List()[0].(map[string]interface{}) + + projectSource.Auth = &codebuild.SourceAuth{ + Type: aws.String(auth["type"].(string)), + Resource: aws.String(auth["resource"].(string)), + } + } + } + } + + return projectSource +} + +func resourceAwsCodeBuildProjectRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codebuildconn + + resp, err := conn.BatchGetProjects(&codebuild.BatchGetProjectsInput{ + Names: []*string{ + aws.String(d.Id()), + }, + }) + + if err != nil { + return fmt.Errorf("[ERROR] Error retreiving Projects: %q", err) + } + + // if nothing was found, then return no state + if len(resp.Projects) == 0 { + log.Printf("[INFO]: No projects were found, removing from state") + d.SetId("") + return nil + } + + project := resp.Projects[0] + + if err := d.Set("artifacts", flattenAwsCodebuildProjectArtifacts(project.Artifacts)); err != nil { + return err + } + + if err := d.Set("environment", schema.NewSet(resourceAwsCodeBuildProjectEnvironmentHash, flattenAwsCodebuildProjectEnvironment(project.Environment))); err != nil { + return err + } + + if err := d.Set("source", flattenAwsCodebuildProjectSource(project.Source)); err != nil { + return err + } + + d.Set("description", project.Description) + d.Set("encryption_key", project.EncryptionKey) + d.Set("name", project.Name) + d.Set("service_role", project.ServiceRole) + d.Set("build_timeout", project.TimeoutInMinutes) + + if err := d.Set("tags", tagsToMapCodeBuild(project.Tags)); err != nil { + return err + } + + return nil +} + +func resourceAwsCodeBuildProjectUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codebuildconn + + params := &codebuild.UpdateProjectInput{ + Name: aws.String(d.Get("name").(string)), + } + + if d.HasChange("environment") { + projectEnv := expandProjectEnvironment(d) + params.Environment = projectEnv + } + + if d.HasChange("source") { + projectSource := expandProjectSource(d) + params.Source = &projectSource + } + + if d.HasChange("artifacts") { + projectArtifacts := expandProjectArtifacts(d) + params.Artifacts = &projectArtifacts + } + + if d.HasChange("description") { + params.Description = aws.String(d.Get("description").(string)) + } + + if d.HasChange("encryption_key") { + params.EncryptionKey = aws.String(d.Get("encryption_key").(string)) + } + + if d.HasChange("service_role") { + params.ServiceRole = aws.String(d.Get("service_role").(string)) + } + + if d.HasChange("build_timeout") { + params.TimeoutInMinutes = aws.Int64(int64(d.Get("build_timeout").(int))) + } + + // The documentation clearly says "The replacement set of tags for this build project." + // But its a slice of pointers so if not set for every update, they get removed. + params.Tags = tagsFromMapCodeBuild(d.Get("tags").(map[string]interface{})) + + _, err := conn.UpdateProject(params) + + if err != nil { + return fmt.Errorf( + "[ERROR] Error updating CodeBuild project (%s): %s", + d.Id(), err) + } + + return resourceAwsCodeBuildProjectRead(d, meta) +} + +func resourceAwsCodeBuildProjectDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codebuildconn + + _, err := conn.DeleteProject(&codebuild.DeleteProjectInput{ + Name: aws.String(d.Id()), + }) + + if err != nil { + return err + } + + d.SetId("") + + return nil +} + +func flattenAwsCodebuildProjectArtifacts(artifacts *codebuild.ProjectArtifacts) *schema.Set { + + artifactSet := schema.Set{ + F: resourceAwsCodeBuildProjectArtifactsHash, + } + + values := map[string]interface{}{} + + values["type"] = *artifacts.Type + + if artifacts.Location != nil { + values["location"] = *artifacts.Location + } + + if artifacts.Name != nil { + values["name"] = *artifacts.Name + } + + if artifacts.NamespaceType != nil { + values["namespace_type"] = *artifacts.NamespaceType + } + + if artifacts.Packaging != nil { + values["packaging"] = *artifacts.Packaging + } + + if artifacts.Path != nil { + values["path"] = *artifacts.Path + } + + artifactSet.Add(values) + + return &artifactSet +} + +func flattenAwsCodebuildProjectEnvironment(environment *codebuild.ProjectEnvironment) []interface{} { + envConfig := map[string]interface{}{} + + envConfig["type"] = *environment.Type + envConfig["compute_type"] = *environment.ComputeType + envConfig["image"] = *environment.Image + + if environment.EnvironmentVariables != nil { + envConfig["environment_variable"] = environmentVariablesToMap(environment.EnvironmentVariables) + } + + return []interface{}{envConfig} + +} + +func flattenAwsCodebuildProjectSource(source *codebuild.ProjectSource) *schema.Set { + + sourceSet := schema.Set{ + F: resourceAwsCodeBuildProjectSourceHash, + } + + authSet := schema.Set{ + F: resourceAwsCodeBuildProjectSourceAuthHash, + } + + sourceConfig := map[string]interface{}{} + + sourceConfig["type"] = *source.Type + + if source.Auth != nil { + authSet.Add(sourceAuthToMap(source.Auth)) + sourceConfig["auth"] = &authSet + } + + if source.Buildspec != nil { + sourceConfig["buildspec"] = *source.Buildspec + } + + if source.Location != nil { + sourceConfig["location"] = *source.Location + } + + sourceSet.Add(sourceConfig) + + return &sourceSet + +} + +func resourceAwsCodeBuildProjectArtifactsHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + artifactType := m["type"].(string) + + buf.WriteString(fmt.Sprintf("%s-", artifactType)) + + return hashcode.String(buf.String()) +} + +func resourceAwsCodeBuildProjectEnvironmentHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + environmentType := m["type"].(string) + computeType := m["compute_type"].(string) + image := m["image"].(string) + environmentVariables := m["environment_variable"].([]interface{}) + buf.WriteString(fmt.Sprintf("%s-", environmentType)) + buf.WriteString(fmt.Sprintf("%s-", computeType)) + buf.WriteString(fmt.Sprintf("%s-", image)) + for _, e := range environmentVariables { + if e != nil { // Old statefiles might have nil values in them + ev := e.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s:%s-", ev["name"].(string), ev["value"].(string))) + } + } + + return hashcode.String(buf.String()) +} + +func resourceAwsCodeBuildProjectSourceHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + sourceType := m["type"].(string) + buildspec := m["buildspec"].(string) + location := m["location"].(string) + + buf.WriteString(fmt.Sprintf("%s-", sourceType)) + buf.WriteString(fmt.Sprintf("%s-", buildspec)) + buf.WriteString(fmt.Sprintf("%s-", location)) + + return hashcode.String(buf.String()) +} + +func resourceAwsCodeBuildProjectSourceAuthHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + buf.WriteString(fmt.Sprintf("%s-", m["type"].(string))) + + if m["resource"] != nil { + buf.WriteString(fmt.Sprintf("%s-", m["resource"].(string))) + } + + return hashcode.String(buf.String()) +} + +func environmentVariablesToMap(environmentVariables []*codebuild.EnvironmentVariable) []interface{} { + + envVariables := []interface{}{} + if len(environmentVariables) > 0 { + for _, env := range environmentVariables { + item := map[string]interface{}{} + item["name"] = *env.Name + item["value"] = *env.Value + envVariables = append(envVariables, item) + } + } + + return envVariables +} + +func sourceAuthToMap(sourceAuth *codebuild.SourceAuth) map[string]interface{} { + + auth := map[string]interface{}{} + auth["type"] = *sourceAuth.Type + + if sourceAuth.Resource != nil { + auth["resource"] = *sourceAuth.Resource + } + + return auth +} + +func validateAwsCodeBuildArifactsType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + types := map[string]bool{ + "CODEPIPELINE": true, + "NO_ARTIFACTS": true, + "S3": true, + } + + if !types[value] { + errors = append(errors, fmt.Errorf("CodeBuild: Arifacts Type can only be CODEPIPELINE / NO_ARTIFACTS / S3")) + } + return +} + +func validateAwsCodeBuildArifactsNamespaceType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + types := map[string]bool{ + "NONE": true, + "BUILD_ID": true, + } + + if !types[value] { + errors = append(errors, fmt.Errorf("CodeBuild: Arifacts Namespace Type can only be NONE / BUILD_ID")) + } + return +} + +func validateAwsCodeBuildProjectName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[A-Za-z0-9]`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "first character of %q must be a letter or number", value)) + } + + if !regexp.MustCompile(`^[A-Za-z0-9\-_]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only alphanumeric characters, hyphens and underscores allowed in %q", value)) + } + + if len(value) > 255 { + errors = append(errors, fmt.Errorf( + "%q cannot be greater than 255 characters", value)) + } + + return +} + +func validateAwsCodeBuildProjectDescription(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 255 { + errors = append(errors, fmt.Errorf("%q cannot be greater than 255 characters", value)) + } + return +} + +func validateAwsCodeBuildEnvironmentComputeType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + types := map[string]bool{ + "BUILD_GENERAL1_SMALL": true, + "BUILD_GENERAL1_MEDIUM": true, + "BUILD_GENERAL1_LARGE": true, + } + + if !types[value] { + errors = append(errors, fmt.Errorf("CodeBuild: Environment Compute Type can only be BUILD_GENERAL1_SMALL / BUILD_GENERAL1_MEDIUM / BUILD_GENERAL1_LARGE")) + } + return +} + +func validateAwsCodeBuildEnvironmentType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + types := map[string]bool{ + "LINUX_CONTAINER": true, + } + + if !types[value] { + errors = append(errors, fmt.Errorf("CodeBuild: Environment Type can only be LINUX_CONTAINER")) + } + return +} + +func validateAwsCodeBuildSourceType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + types := map[string]bool{ + "CODECOMMIT": true, + "CODEPIPELINE": true, + "GITHUB": true, + "S3": true, + } + + if !types[value] { + errors = append(errors, fmt.Errorf("CodeBuild: Source Type can only be CODECOMMIT / CODEPIPELINE / GITHUB / S3")) + } + return +} + +func validateAwsCodeBuildSourceAuthType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + types := map[string]bool{ + "OAUTH": true, + } + + if !types[value] { + errors = append(errors, fmt.Errorf("CodeBuild: Source Auth Type can only be OAUTH")) + } + return +} + +func validateAwsCodeBuildTimeout(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + + if value < 5 || value > 480 { + errors = append(errors, fmt.Errorf("%q must be greater than 5 minutes and less than 480 minutes (8 hours)", value)) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codebuild_project_migrate.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codebuild_project_migrate.go new file mode 100644 index 000000000..97d7a9ff2 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codebuild_project_migrate.go @@ -0,0 +1,36 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceAwsCodebuildMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found AWS Codebuild State v0; migrating to v1") + return migrateCodebuildStateV0toV1(is) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateCodebuildStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + if is.Attributes["timeout"] != "" { + is.Attributes["build_timeout"] = strings.TrimSpace(is.Attributes["timeout"]) + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codecommit_repository.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codecommit_repository.go new file mode 100644 index 000000000..a477c274e --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codecommit_repository.go @@ -0,0 +1,202 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/codecommit" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCodeCommitRepository() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCodeCommitRepositoryCreate, + Update: resourceAwsCodeCommitRepositoryUpdate, + Read: resourceAwsCodeCommitRepositoryRead, + Delete: resourceAwsCodeCommitRepositoryDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "repository_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 100 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 100 characters", k)) + } + return + }, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 1000 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 1000 characters", k)) + } + return + }, + }, + + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "repository_id": { + Type: schema.TypeString, + Computed: true, + }, + + "clone_url_http": { + Type: schema.TypeString, + Computed: true, + }, + + "clone_url_ssh": { + Type: schema.TypeString, + Computed: true, + }, + + "default_branch": { + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceAwsCodeCommitRepositoryCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codecommitconn + + input := &codecommit.CreateRepositoryInput{ + RepositoryName: aws.String(d.Get("repository_name").(string)), + RepositoryDescription: aws.String(d.Get("description").(string)), + } + + out, err := conn.CreateRepository(input) + if err != nil { + return fmt.Errorf("Error creating CodeCommit Repository: %s", err) + } + + d.SetId(d.Get("repository_name").(string)) + d.Set("repository_id", out.RepositoryMetadata.RepositoryId) + d.Set("arn", out.RepositoryMetadata.Arn) + d.Set("clone_url_http", out.RepositoryMetadata.CloneUrlHttp) + d.Set("clone_url_ssh", out.RepositoryMetadata.CloneUrlSsh) + + return resourceAwsCodeCommitRepositoryUpdate(d, meta) +} + +func resourceAwsCodeCommitRepositoryUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codecommitconn + + if _, ok := d.GetOk("default_branch"); ok { + if d.HasChange("default_branch") { + if err := resourceAwsCodeCommitUpdateDefaultBranch(conn, d); err != nil { + return err + } + } + } + + if d.HasChange("description") { + if err := resourceAwsCodeCommitUpdateDescription(conn, d); err != nil { + return err + } + } + + return resourceAwsCodeCommitRepositoryRead(d, meta) +} + +func resourceAwsCodeCommitRepositoryRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codecommitconn + + input := &codecommit.GetRepositoryInput{ + RepositoryName: aws.String(d.Id()), + } + + out, err := conn.GetRepository(input) + if err != nil { + return fmt.Errorf("Error reading CodeCommit Repository: %s", err.Error()) + } + + d.Set("repository_id", out.RepositoryMetadata.RepositoryId) + d.Set("arn", out.RepositoryMetadata.Arn) + d.Set("clone_url_http", out.RepositoryMetadata.CloneUrlHttp) + d.Set("clone_url_ssh", out.RepositoryMetadata.CloneUrlSsh) + d.Set("description", out.RepositoryMetadata.RepositoryDescription) + d.Set("repository_name", out.RepositoryMetadata.RepositoryName) + + if _, ok := d.GetOk("default_branch"); ok { + if out.RepositoryMetadata.DefaultBranch != nil { + d.Set("default_branch", out.RepositoryMetadata.DefaultBranch) + } + } + + return nil +} + +func resourceAwsCodeCommitRepositoryDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codecommitconn + + log.Printf("[DEBUG] CodeCommit Delete Repository: %s", d.Id()) + _, err := conn.DeleteRepository(&codecommit.DeleteRepositoryInput{ + RepositoryName: aws.String(d.Id()), + }) + if err != nil { + return fmt.Errorf("Error deleting CodeCommit Repository: %s", err.Error()) + } + + return nil +} + +func resourceAwsCodeCommitUpdateDescription(conn *codecommit.CodeCommit, d *schema.ResourceData) error { + branchInput := &codecommit.UpdateRepositoryDescriptionInput{ + RepositoryName: aws.String(d.Id()), + RepositoryDescription: aws.String(d.Get("description").(string)), + } + + _, err := conn.UpdateRepositoryDescription(branchInput) + if err != nil { + return fmt.Errorf("Error Updating Repository Description for CodeCommit Repository: %s", err.Error()) + } + + return nil +} + +func resourceAwsCodeCommitUpdateDefaultBranch(conn *codecommit.CodeCommit, d *schema.ResourceData) error { + input := &codecommit.ListBranchesInput{ + RepositoryName: aws.String(d.Id()), + } + + out, err := conn.ListBranches(input) + if err != nil { + return fmt.Errorf("Error reading CodeCommit Repository branches: %s", err.Error()) + } + + if len(out.Branches) == 0 { + log.Printf("[WARN] Not setting Default Branch CodeCommit Repository that has no branches: %s", d.Id()) + return nil + } + + branchInput := &codecommit.UpdateDefaultBranchInput{ + RepositoryName: aws.String(d.Id()), + DefaultBranchName: aws.String(d.Get("default_branch").(string)), + } + + _, err = conn.UpdateDefaultBranch(branchInput) + if err != nil { + return fmt.Errorf("Error Updating Default Branch for CodeCommit Repository: %s", err.Error()) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codecommit_trigger.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codecommit_trigger.go new file mode 100644 index 000000000..c21d55a9c --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codecommit_trigger.go @@ -0,0 +1,163 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/codecommit" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCodeCommitTrigger() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCodeCommitTriggerCreate, + Read: resourceAwsCodeCommitTriggerRead, + Delete: resourceAwsCodeCommitTriggerDelete, + + Schema: map[string]*schema.Schema{ + "repository_name": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "configuration_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "trigger": &schema.Schema{ + Type: schema.TypeSet, + ForceNew: true, + Required: true, + MaxItems: 10, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "destination_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "custom_data": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "branches": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "events": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + } +} + +func resourceAwsCodeCommitTriggerCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codecommitconn + + // Expand the "trigger" set to aws-sdk-go compat []*codecommit.RepositoryTrigger + triggers := expandAwsCodeCommitTriggers(d.Get("trigger").(*schema.Set).List()) + + input := &codecommit.PutRepositoryTriggersInput{ + RepositoryName: aws.String(d.Get("repository_name").(string)), + Triggers: triggers, + } + + resp, err := conn.PutRepositoryTriggers(input) + if err != nil { + return fmt.Errorf("Error creating CodeCommit Trigger: %s", err) + } + + log.Printf("[INFO] Code Commit Trigger Created %s input %s", resp, input) + + d.SetId(d.Get("repository_name").(string)) + d.Set("configuration_id", resp.ConfigurationId) + + return resourceAwsCodeCommitTriggerRead(d, meta) +} + +func resourceAwsCodeCommitTriggerRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codecommitconn + + input := &codecommit.GetRepositoryTriggersInput{ + RepositoryName: aws.String(d.Id()), + } + + resp, err := conn.GetRepositoryTriggers(input) + if err != nil { + return fmt.Errorf("Error reading CodeCommit Trigger: %s", err.Error()) + } + + log.Printf("[DEBUG] CodeCommit Trigger: %s", resp) + + return nil +} + +func resourceAwsCodeCommitTriggerDelete(d *schema.ResourceData, meta interface{}) error { + + conn := meta.(*AWSClient).codecommitconn + + log.Printf("[DEBUG] Deleting Trigger: %q", d.Id()) + + input := &codecommit.PutRepositoryTriggersInput{ + RepositoryName: aws.String(d.Get("repository_name").(string)), + Triggers: []*codecommit.RepositoryTrigger{}, + } + + _, err := conn.PutRepositoryTriggers(input) + + if err != nil { + return err + } + + return nil +} + +func expandAwsCodeCommitTriggers(configured []interface{}) []*codecommit.RepositoryTrigger { + triggers := make([]*codecommit.RepositoryTrigger, 0, len(configured)) + // Loop over our configured triggers and create + // an array of aws-sdk-go compatabile objects + for _, lRaw := range configured { + data := lRaw.(map[string]interface{}) + t := &codecommit.RepositoryTrigger{ + CustomData: aws.String(data["custom_data"].(string)), + DestinationArn: aws.String(data["destination_arn"].(string)), + Name: aws.String(data["name"].(string)), + } + + branches := make([]*string, len(data["branches"].([]interface{}))) + for i, vv := range data["branches"].([]interface{}) { + str := vv.(string) + branches[i] = aws.String(str) + } + t.Branches = branches + + events := make([]*string, len(data["events"].([]interface{}))) + for i, vv := range data["events"].([]interface{}) { + str := vv.(string) + events[i] = aws.String(str) + } + t.Events = events + + triggers = append(triggers, t) + } + return triggers +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_app.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_app.go new file mode 100644 index 000000000..706bd7afa --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_app.go @@ -0,0 +1,127 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/codedeploy" +) + +func resourceAwsCodeDeployApp() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCodeDeployAppCreate, + Read: resourceAwsCodeDeployAppRead, + Update: resourceAwsCodeDeployUpdate, + Delete: resourceAwsCodeDeployAppDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + // The unique ID is set by AWS on create. + "unique_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceAwsCodeDeployAppCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + application := d.Get("name").(string) + log.Printf("[DEBUG] Creating CodeDeploy application %s", application) + + resp, err := conn.CreateApplication(&codedeploy.CreateApplicationInput{ + ApplicationName: aws.String(application), + }) + if err != nil { + return err + } + log.Printf("[DEBUG] CodeDeploy application %s created", *resp.ApplicationId) + + // Despite giving the application a unique ID, AWS doesn't actually use + // it in API calls. Use it and the app name to identify the resource in + // the state file. This allows us to reliably detect both when the TF + // config file changes and when the user deletes the app without removing + // it first from the TF config. + d.SetId(fmt.Sprintf("%s:%s", *resp.ApplicationId, application)) + + return resourceAwsCodeDeployAppRead(d, meta) +} + +func resourceAwsCodeDeployAppRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + _, application := resourceAwsCodeDeployAppParseId(d.Id()) + log.Printf("[DEBUG] Reading CodeDeploy application %s", application) + resp, err := conn.GetApplication(&codedeploy.GetApplicationInput{ + ApplicationName: aws.String(application), + }) + if err != nil { + if codedeployerr, ok := err.(awserr.Error); ok && codedeployerr.Code() == "ApplicationDoesNotExistException" { + d.SetId("") + return nil + } else { + log.Printf("[ERROR] Error finding CodeDeploy application: %s", err) + return err + } + } + + d.Set("name", resp.Application.ApplicationName) + + return nil +} + +func resourceAwsCodeDeployUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + o, n := d.GetChange("name") + + _, err := conn.UpdateApplication(&codedeploy.UpdateApplicationInput{ + ApplicationName: aws.String(o.(string)), + NewApplicationName: aws.String(n.(string)), + }) + if err != nil { + return err + } + log.Printf("[DEBUG] CodeDeploy application %s updated", n) + + d.Set("name", n) + + return nil +} + +func resourceAwsCodeDeployAppDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + _, err := conn.DeleteApplication(&codedeploy.DeleteApplicationInput{ + ApplicationName: aws.String(d.Get("name").(string)), + }) + if err != nil { + if cderr, ok := err.(awserr.Error); ok && cderr.Code() == "InvalidApplicationNameException" { + d.SetId("") + return nil + } else { + log.Printf("[ERROR] Error deleting CodeDeploy application: %s", err) + return err + } + } + + return nil +} + +func resourceAwsCodeDeployAppParseId(id string) (string, string) { + parts := strings.SplitN(id, ":", 2) + return parts[0], parts[1] +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_deployment_config.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_deployment_config.go new file mode 100644 index 000000000..10130dc76 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_deployment_config.go @@ -0,0 +1,152 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/codedeploy" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCodeDeployDeploymentConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCodeDeployDeploymentConfigCreate, + Read: resourceAwsCodeDeployDeploymentConfigRead, + Delete: resourceAwsCodeDeployDeploymentConfigDelete, + + Schema: map[string]*schema.Schema{ + "deployment_config_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "minimum_healthy_hosts": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateMinimumHealtyHostsType, + }, + + "value": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + + "deployment_config_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsCodeDeployDeploymentConfigCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + input := &codedeploy.CreateDeploymentConfigInput{ + DeploymentConfigName: aws.String(d.Get("deployment_config_name").(string)), + MinimumHealthyHosts: expandAwsCodeDeployConfigMinimumHealthHosts(d), + } + + _, err := conn.CreateDeploymentConfig(input) + if err != nil { + return err + } + + d.SetId(d.Get("deployment_config_name").(string)) + + return resourceAwsCodeDeployDeploymentConfigRead(d, meta) +} + +func resourceAwsCodeDeployDeploymentConfigRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + input := &codedeploy.GetDeploymentConfigInput{ + DeploymentConfigName: aws.String(d.Id()), + } + + resp, err := conn.GetDeploymentConfig(input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if "DeploymentConfigDoesNotExistException" == awsErr.Code() { + log.Printf("[DEBUG] CodeDeploy Deployment Config (%s) not found", d.Id()) + d.SetId("") + return nil + } + } + return err + } + + if resp.DeploymentConfigInfo == nil { + return fmt.Errorf("[ERROR] Cannot find DeploymentConfig %q", d.Id()) + } + + if err := d.Set("minimum_healthy_hosts", flattenAwsCodeDeployConfigMinimumHealthHosts(resp.DeploymentConfigInfo.MinimumHealthyHosts)); err != nil { + return err + } + d.Set("deployment_config_id", resp.DeploymentConfigInfo.DeploymentConfigId) + d.Set("deployment_config_name", resp.DeploymentConfigInfo.DeploymentConfigName) + + return nil +} + +func resourceAwsCodeDeployDeploymentConfigDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + input := &codedeploy.DeleteDeploymentConfigInput{ + DeploymentConfigName: aws.String(d.Id()), + } + + _, err := conn.DeleteDeploymentConfig(input) + if err != nil { + return err + } + + return nil +} + +func expandAwsCodeDeployConfigMinimumHealthHosts(d *schema.ResourceData) *codedeploy.MinimumHealthyHosts { + hosts := d.Get("minimum_healthy_hosts").([]interface{}) + host := hosts[0].(map[string]interface{}) + + minimumHealthyHost := codedeploy.MinimumHealthyHosts{ + Type: aws.String(host["type"].(string)), + Value: aws.Int64(int64(host["value"].(int))), + } + + return &minimumHealthyHost +} + +func flattenAwsCodeDeployConfigMinimumHealthHosts(hosts *codedeploy.MinimumHealthyHosts) []map[string]interface{} { + result := make([]map[string]interface{}, 0) + + item := make(map[string]interface{}) + + item["type"] = *hosts.Type + item["value"] = *hosts.Value + + result = append(result, item) + + return result +} + +func validateMinimumHealtyHostsType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "FLEET_PERCENT" && value != "HOST_COUNT" { + errors = append(errors, fmt.Errorf( + "%q must be one of \"FLEET_PERCENT\" or \"HOST_COUNT\"", k)) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_deployment_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_deployment_group.go new file mode 100644 index 000000000..4a6d17211 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codedeploy_deployment_group.go @@ -0,0 +1,690 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "regexp" + "sort" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/codedeploy" +) + +func resourceAwsCodeDeployDeploymentGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCodeDeployDeploymentGroupCreate, + Read: resourceAwsCodeDeployDeploymentGroupRead, + Update: resourceAwsCodeDeployDeploymentGroupUpdate, + Delete: resourceAwsCodeDeployDeploymentGroupDelete, + + Schema: map[string]*schema.Schema{ + "app_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 100 { + errors = append(errors, fmt.Errorf( + "%q cannot exceed 100 characters", k)) + } + return + }, + }, + + "deployment_group_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 100 { + errors = append(errors, fmt.Errorf( + "%q cannot exceed 100 characters", k)) + } + return + }, + }, + + "service_role_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "alarm_configuration": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alarms": &schema.Schema{ + Type: schema.TypeSet, + MaxItems: 10, + Optional: true, + Set: schema.HashString, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + + "ignore_poll_alarm_failure": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, + + "auto_rollback_configuration": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + + "events": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + + "autoscaling_groups": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "deployment_config_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "CodeDeployDefault.OneAtATime", + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 100 { + errors = append(errors, fmt.Errorf( + "%q cannot exceed 100 characters", k)) + } + return + }, + }, + + "ec2_tag_filter": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateTagFilters, + }, + + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + Set: resourceAwsCodeDeployTagFilterHash, + }, + + "on_premises_instance_tag_filter": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateTagFilters, + }, + + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + Set: resourceAwsCodeDeployTagFilterHash, + }, + + "trigger_configuration": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "trigger_events": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateTriggerEvent, + }, + }, + + "trigger_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "trigger_target_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + Set: resourceAwsCodeDeployTriggerConfigHash, + }, + }, + } +} + +func resourceAwsCodeDeployDeploymentGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + application := d.Get("app_name").(string) + deploymentGroup := d.Get("deployment_group_name").(string) + + input := codedeploy.CreateDeploymentGroupInput{ + ApplicationName: aws.String(application), + DeploymentGroupName: aws.String(deploymentGroup), + ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)), + } + if attr, ok := d.GetOk("deployment_config_name"); ok { + input.DeploymentConfigName = aws.String(attr.(string)) + } + if attr, ok := d.GetOk("autoscaling_groups"); ok { + input.AutoScalingGroups = expandStringList(attr.(*schema.Set).List()) + } + if attr, ok := d.GetOk("on_premises_instance_tag_filter"); ok { + onPremFilters := buildOnPremTagFilters(attr.(*schema.Set).List()) + input.OnPremisesInstanceTagFilters = onPremFilters + } + if attr, ok := d.GetOk("ec2_tag_filter"); ok { + ec2TagFilters := buildEC2TagFilters(attr.(*schema.Set).List()) + input.Ec2TagFilters = ec2TagFilters + } + if attr, ok := d.GetOk("trigger_configuration"); ok { + triggerConfigs := buildTriggerConfigs(attr.(*schema.Set).List()) + input.TriggerConfigurations = triggerConfigs + } + + if attr, ok := d.GetOk("auto_rollback_configuration"); ok { + input.AutoRollbackConfiguration = buildAutoRollbackConfig(attr.([]interface{})) + } + + if attr, ok := d.GetOk("alarm_configuration"); ok { + input.AlarmConfiguration = buildAlarmConfig(attr.([]interface{})) + } + + // Retry to handle IAM role eventual consistency. + var resp *codedeploy.CreateDeploymentGroupOutput + var err error + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + resp, err = conn.CreateDeploymentGroup(&input) + if err != nil { + retry := false + codedeployErr, ok := err.(awserr.Error) + if !ok { + return resource.NonRetryableError(err) + } + if codedeployErr.Code() == "InvalidRoleException" { + retry = true + } + if codedeployErr.Code() == "InvalidTriggerConfigException" { + r := regexp.MustCompile("^Topic ARN .+ is not valid$") + if r.MatchString(codedeployErr.Message()) { + retry = true + } + } + if retry { + log.Printf("[DEBUG] Trying to create deployment group again: %q", + codedeployErr.Message()) + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return err + } + + d.SetId(*resp.DeploymentGroupId) + + return resourceAwsCodeDeployDeploymentGroupRead(d, meta) +} + +func resourceAwsCodeDeployDeploymentGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + log.Printf("[DEBUG] Reading CodeDeploy DeploymentGroup %s", d.Id()) + resp, err := conn.GetDeploymentGroup(&codedeploy.GetDeploymentGroupInput{ + ApplicationName: aws.String(d.Get("app_name").(string)), + DeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)), + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "DeploymentGroupDoesNotExistException" { + log.Printf("[INFO] CodeDeployment DeploymentGroup %s not found", d.Get("deployment_group_name").(string)) + d.SetId("") + return nil + } + + return err + } + + d.Set("app_name", resp.DeploymentGroupInfo.ApplicationName) + d.Set("autoscaling_groups", resp.DeploymentGroupInfo.AutoScalingGroups) + d.Set("deployment_config_name", resp.DeploymentGroupInfo.DeploymentConfigName) + d.Set("deployment_group_name", resp.DeploymentGroupInfo.DeploymentGroupName) + d.Set("service_role_arn", resp.DeploymentGroupInfo.ServiceRoleArn) + if err := d.Set("ec2_tag_filter", ec2TagFiltersToMap(resp.DeploymentGroupInfo.Ec2TagFilters)); err != nil { + return err + } + if err := d.Set("on_premises_instance_tag_filter", onPremisesTagFiltersToMap(resp.DeploymentGroupInfo.OnPremisesInstanceTagFilters)); err != nil { + return err + } + if err := d.Set("trigger_configuration", triggerConfigsToMap(resp.DeploymentGroupInfo.TriggerConfigurations)); err != nil { + return err + } + + if err := d.Set("auto_rollback_configuration", autoRollbackConfigToMap(resp.DeploymentGroupInfo.AutoRollbackConfiguration)); err != nil { + return err + } + + if err := d.Set("alarm_configuration", alarmConfigToMap(resp.DeploymentGroupInfo.AlarmConfiguration)); err != nil { + return err + } + + return nil +} + +func resourceAwsCodeDeployDeploymentGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + input := codedeploy.UpdateDeploymentGroupInput{ + ApplicationName: aws.String(d.Get("app_name").(string)), + CurrentDeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)), + ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)), + } + + if d.HasChange("autoscaling_groups") { + _, n := d.GetChange("autoscaling_groups") + input.AutoScalingGroups = expandStringList(n.(*schema.Set).List()) + } + if d.HasChange("deployment_config_name") { + _, n := d.GetChange("deployment_config_name") + input.DeploymentConfigName = aws.String(n.(string)) + } + if d.HasChange("deployment_group_name") { + _, n := d.GetChange("deployment_group_name") + input.NewDeploymentGroupName = aws.String(n.(string)) + } + + // TagFilters aren't like tags. They don't append. They simply replace. + if d.HasChange("on_premises_instance_tag_filter") { + _, n := d.GetChange("on_premises_instance_tag_filter") + onPremFilters := buildOnPremTagFilters(n.(*schema.Set).List()) + input.OnPremisesInstanceTagFilters = onPremFilters + } + if d.HasChange("ec2_tag_filter") { + _, n := d.GetChange("ec2_tag_filter") + ec2Filters := buildEC2TagFilters(n.(*schema.Set).List()) + input.Ec2TagFilters = ec2Filters + } + if d.HasChange("trigger_configuration") { + _, n := d.GetChange("trigger_configuration") + triggerConfigs := buildTriggerConfigs(n.(*schema.Set).List()) + input.TriggerConfigurations = triggerConfigs + } + + if d.HasChange("auto_rollback_configuration") { + _, n := d.GetChange("auto_rollback_configuration") + input.AutoRollbackConfiguration = buildAutoRollbackConfig(n.([]interface{})) + } + + if d.HasChange("alarm_configuration") { + _, n := d.GetChange("alarm_configuration") + input.AlarmConfiguration = buildAlarmConfig(n.([]interface{})) + } + + log.Printf("[DEBUG] Updating CodeDeploy DeploymentGroup %s", d.Id()) + // Retry to handle IAM role eventual consistency. + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.UpdateDeploymentGroup(&input) + if err != nil { + retry := false + codedeployErr, ok := err.(awserr.Error) + if !ok { + return resource.NonRetryableError(err) + } + if codedeployErr.Code() == "InvalidRoleException" { + retry = true + } + if codedeployErr.Code() == "InvalidTriggerConfigException" { + r := regexp.MustCompile("^Topic ARN .+ is not valid$") + if r.MatchString(codedeployErr.Message()) { + retry = true + } + } + if retry { + log.Printf("[DEBUG] Retrying Code Deployment Group Update: %q", + codedeployErr.Message()) + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + } + return nil + }) + + if err != nil { + return err + } + + return resourceAwsCodeDeployDeploymentGroupRead(d, meta) +} + +func resourceAwsCodeDeployDeploymentGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + log.Printf("[DEBUG] Deleting CodeDeploy DeploymentGroup %s", d.Id()) + _, err := conn.DeleteDeploymentGroup(&codedeploy.DeleteDeploymentGroupInput{ + ApplicationName: aws.String(d.Get("app_name").(string)), + DeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)), + }) + if err != nil { + return err + } + + d.SetId("") + + return nil +} + +// buildOnPremTagFilters converts raw schema lists into a list of +// codedeploy.TagFilters. +func buildOnPremTagFilters(configured []interface{}) []*codedeploy.TagFilter { + filters := make([]*codedeploy.TagFilter, 0) + for _, raw := range configured { + var filter codedeploy.TagFilter + m := raw.(map[string]interface{}) + + if v, ok := m["key"]; ok { + filter.Key = aws.String(v.(string)) + } + if v, ok := m["type"]; ok { + filter.Type = aws.String(v.(string)) + } + if v, ok := m["value"]; ok { + filter.Value = aws.String(v.(string)) + } + + filters = append(filters, &filter) + } + + return filters +} + +// buildEC2TagFilters converts raw schema lists into a list of +// codedeploy.EC2TagFilters. +func buildEC2TagFilters(configured []interface{}) []*codedeploy.EC2TagFilter { + filters := make([]*codedeploy.EC2TagFilter, 0) + for _, raw := range configured { + var filter codedeploy.EC2TagFilter + m := raw.(map[string]interface{}) + + filter.Key = aws.String(m["key"].(string)) + filter.Type = aws.String(m["type"].(string)) + filter.Value = aws.String(m["value"].(string)) + + filters = append(filters, &filter) + } + + return filters +} + +// buildTriggerConfigs converts a raw schema list into a list of +// codedeploy.TriggerConfig. +func buildTriggerConfigs(configured []interface{}) []*codedeploy.TriggerConfig { + configs := make([]*codedeploy.TriggerConfig, 0, len(configured)) + for _, raw := range configured { + var config codedeploy.TriggerConfig + m := raw.(map[string]interface{}) + + config.TriggerEvents = expandStringSet(m["trigger_events"].(*schema.Set)) + config.TriggerName = aws.String(m["trigger_name"].(string)) + config.TriggerTargetArn = aws.String(m["trigger_target_arn"].(string)) + + configs = append(configs, &config) + } + return configs +} + +// buildAutoRollbackConfig converts a raw schema list containing a map[string]interface{} +// into a single codedeploy.AutoRollbackConfiguration +func buildAutoRollbackConfig(configured []interface{}) *codedeploy.AutoRollbackConfiguration { + result := &codedeploy.AutoRollbackConfiguration{} + + if len(configured) == 1 { + config := configured[0].(map[string]interface{}) + result.Enabled = aws.Bool(config["enabled"].(bool)) + result.Events = expandStringSet(config["events"].(*schema.Set)) + } else { // delete the configuration + result.Enabled = aws.Bool(false) + result.Events = make([]*string, 0) + } + + return result +} + +// buildAlarmConfig converts a raw schema list containing a map[string]interface{} +// into a single codedeploy.AlarmConfiguration +func buildAlarmConfig(configured []interface{}) *codedeploy.AlarmConfiguration { + result := &codedeploy.AlarmConfiguration{} + + if len(configured) == 1 { + config := configured[0].(map[string]interface{}) + names := expandStringSet(config["alarms"].(*schema.Set)) + alarms := make([]*codedeploy.Alarm, 0, len(names)) + + for _, name := range names { + alarm := &codedeploy.Alarm{ + Name: name, + } + alarms = append(alarms, alarm) + } + + result.Alarms = alarms + result.Enabled = aws.Bool(config["enabled"].(bool)) + result.IgnorePollAlarmFailure = aws.Bool(config["ignore_poll_alarm_failure"].(bool)) + } else { // delete the configuration + result.Alarms = make([]*codedeploy.Alarm, 0) + result.Enabled = aws.Bool(false) + result.IgnorePollAlarmFailure = aws.Bool(false) + } + + return result +} + +// ec2TagFiltersToMap converts lists of tag filters into a []map[string]string. +func ec2TagFiltersToMap(list []*codedeploy.EC2TagFilter) []map[string]string { + result := make([]map[string]string, 0, len(list)) + for _, tf := range list { + l := make(map[string]string) + if tf.Key != nil && *tf.Key != "" { + l["key"] = *tf.Key + } + if tf.Value != nil && *tf.Value != "" { + l["value"] = *tf.Value + } + if tf.Type != nil && *tf.Type != "" { + l["type"] = *tf.Type + } + result = append(result, l) + } + return result +} + +// onPremisesTagFiltersToMap converts lists of on-prem tag filters into a []map[string]string. +func onPremisesTagFiltersToMap(list []*codedeploy.TagFilter) []map[string]string { + result := make([]map[string]string, 0, len(list)) + for _, tf := range list { + l := make(map[string]string) + if tf.Key != nil && *tf.Key != "" { + l["key"] = *tf.Key + } + if tf.Value != nil && *tf.Value != "" { + l["value"] = *tf.Value + } + if tf.Type != nil && *tf.Type != "" { + l["type"] = *tf.Type + } + result = append(result, l) + } + return result +} + +// triggerConfigsToMap converts a list of []*codedeploy.TriggerConfig into a []map[string]interface{} +func triggerConfigsToMap(list []*codedeploy.TriggerConfig) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(list)) + for _, tc := range list { + item := make(map[string]interface{}) + item["trigger_events"] = schema.NewSet(schema.HashString, flattenStringList(tc.TriggerEvents)) + item["trigger_name"] = *tc.TriggerName + item["trigger_target_arn"] = *tc.TriggerTargetArn + result = append(result, item) + } + return result +} + +// autoRollbackConfigToMap converts a codedeploy.AutoRollbackConfiguration +// into a []map[string]interface{} list containing a single item +func autoRollbackConfigToMap(config *codedeploy.AutoRollbackConfiguration) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + + // only create configurations that are enabled or temporarily disabled (retaining events) + // otherwise empty configurations will be created + if config != nil && (*config.Enabled == true || len(config.Events) > 0) { + item := make(map[string]interface{}) + item["enabled"] = *config.Enabled + item["events"] = schema.NewSet(schema.HashString, flattenStringList(config.Events)) + result = append(result, item) + } + + return result +} + +// alarmConfigToMap converts a codedeploy.AlarmConfiguration +// into a []map[string]interface{} list containing a single item +func alarmConfigToMap(config *codedeploy.AlarmConfiguration) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + + // only create configurations that are enabled or temporarily disabled (retaining alarms) + // otherwise empty configurations will be created + if config != nil && (*config.Enabled == true || len(config.Alarms) > 0) { + names := make([]*string, 0, len(config.Alarms)) + for _, alarm := range config.Alarms { + names = append(names, alarm.Name) + } + + item := make(map[string]interface{}) + item["alarms"] = schema.NewSet(schema.HashString, flattenStringList(names)) + item["enabled"] = *config.Enabled + item["ignore_poll_alarm_failure"] = *config.IgnorePollAlarmFailure + + result = append(result, item) + } + + return result +} + +func resourceAwsCodeDeployTagFilterHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + // Nothing's actually required in tag filters, so we must check the + // presence of all values before attempting a hash. + if v, ok := m["key"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["type"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["value"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + return hashcode.String(buf.String()) +} + +func resourceAwsCodeDeployTriggerConfigHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["trigger_name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["trigger_target_arn"].(string))) + + if triggerEvents, ok := m["trigger_events"]; ok { + names := triggerEvents.(*schema.Set).List() + strings := make([]string, len(names)) + for i, raw := range names { + strings[i] = raw.(string) + } + sort.Strings(strings) + + for _, s := range strings { + buf.WriteString(fmt.Sprintf("%s-", s)) + } + } + return hashcode.String(buf.String()) +} + +func validateTriggerEvent(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + triggerEvents := map[string]bool{ + "DeploymentStart": true, + "DeploymentStop": true, + "DeploymentSuccess": true, + "DeploymentFailure": true, + "DeploymentRollback": true, + "InstanceStart": true, + "InstanceSuccess": true, + "InstanceFailure": true, + } + + if !triggerEvents[value] { + errors = append(errors, fmt.Errorf("%q must be a valid event type value: %q", k, value)) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codepipeline.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codepipeline.go new file mode 100644 index 000000000..29866cb19 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_codepipeline.go @@ -0,0 +1,509 @@ +package aws + +import ( + "fmt" + "log" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/codepipeline" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCodePipeline() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCodePipelineCreate, + Read: resourceAwsCodePipelineRead, + Update: resourceAwsCodePipelineUpdate, + Delete: resourceAwsCodePipelineDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "role_arn": { + Type: schema.TypeString, + Required: true, + }, + + "artifact_store": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + }, + + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAwsCodePipelineArtifactStoreType, + }, + + "encryption_key": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + }, + + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAwsCodePipelineEncryptionKeyType, + }, + }, + }, + }, + }, + }, + }, + "stage": { + Type: schema.TypeList, + MinItems: 2, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "action": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "configuration": { + Type: schema.TypeMap, + Optional: true, + }, + "category": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAwsCodePipelineStageActionCategory, + }, + "owner": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAwsCodePipelineStageActionOwner, + }, + "provider": { + Type: schema.TypeString, + Required: true, + }, + "version": { + Type: schema.TypeString, + Required: true, + }, + "input_artifacts": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "output_artifacts": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "role_arn": { + Type: schema.TypeString, + Optional: true, + }, + "run_order": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} +func validateAwsCodePipelineEncryptionKeyType(v interface{}, k string) (ws []string, errors []error) { + if v.(string) != "KMS" { + errors = append(errors, fmt.Errorf("CodePipeline: encryption_key type can only be KMS")) + } + return +} + +func validateAwsCodePipelineArtifactStoreType(v interface{}, k string) (ws []string, errors []error) { + if v.(string) != "S3" { + errors = append(errors, fmt.Errorf("CodePipeline: artifact_store type can only be S3")) + } + return +} + +func validateAwsCodePipelineStageActionCategory(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + types := map[string]bool{ + "Source": true, + "Build": true, + "Deploy": true, + "Test": true, + "Invoke": true, + "Approval": true, + } + + if !types[value] { + errors = append(errors, fmt.Errorf("CodePipeline: category can only be one of Source | Build | Deploy | Test | Invoke | Approval")) + } + return +} + +func validateAwsCodePipelineStageActionOwner(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + types := map[string]bool{ + "AWS": true, + "ThirdParty": true, + "Custom": true, + } + + if !types[value] { + errors = append(errors, fmt.Errorf("CodePipeline: owner can only be one of AWS | ThirdParty | Custom")) + } + return +} + +func validateAwsCodePipelineStageActionConfiguration(v interface{}, k string) (ws []string, errors []error) { + for k := range v.(map[string]interface{}) { + if k == "OAuthToken" { + errors = append(errors, fmt.Errorf("CodePipeline: OAuthToken should be set as environment variable 'GITHUB_TOKEN'")) + } + } + return +} + +func resourceAwsCodePipelineCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codepipelineconn + params := &codepipeline.CreatePipelineInput{ + Pipeline: expandAwsCodePipeline(d), + } + + var resp *codepipeline.CreatePipelineOutput + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + var err error + + resp, err = conn.CreatePipeline(params) + + if err != nil { + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + }) + if err != nil { + return fmt.Errorf("[ERROR] Error creating CodePipeline: %s", err) + } + if resp.Pipeline == nil { + return fmt.Errorf("[ERROR] Error creating CodePipeline: invalid response from AWS") + } + + d.SetId(*resp.Pipeline.Name) + return resourceAwsCodePipelineRead(d, meta) +} + +func expandAwsCodePipeline(d *schema.ResourceData) *codepipeline.PipelineDeclaration { + pipelineArtifactStore := expandAwsCodePipelineArtifactStore(d) + pipelineStages := expandAwsCodePipelineStages(d) + + pipeline := codepipeline.PipelineDeclaration{ + Name: aws.String(d.Get("name").(string)), + RoleArn: aws.String(d.Get("role_arn").(string)), + ArtifactStore: pipelineArtifactStore, + Stages: pipelineStages, + } + return &pipeline +} +func expandAwsCodePipelineArtifactStore(d *schema.ResourceData) *codepipeline.ArtifactStore { + configs := d.Get("artifact_store").([]interface{}) + data := configs[0].(map[string]interface{}) + pipelineArtifactStore := codepipeline.ArtifactStore{ + Location: aws.String(data["location"].(string)), + Type: aws.String(data["type"].(string)), + } + tek := data["encryption_key"].([]interface{}) + if len(tek) > 0 { + vk := tek[0].(map[string]interface{}) + ek := codepipeline.EncryptionKey{ + Type: aws.String(vk["type"].(string)), + Id: aws.String(vk["id"].(string)), + } + pipelineArtifactStore.EncryptionKey = &ek + } + return &pipelineArtifactStore +} + +func flattenAwsCodePipelineArtifactStore(artifactStore *codepipeline.ArtifactStore) []interface{} { + values := map[string]interface{}{} + values["type"] = *artifactStore.Type + values["location"] = *artifactStore.Location + if artifactStore.EncryptionKey != nil { + as := map[string]interface{}{ + "id": *artifactStore.EncryptionKey.Id, + "type": *artifactStore.EncryptionKey.Type, + } + values["encryption_key"] = []interface{}{as} + } + return []interface{}{values} +} + +func expandAwsCodePipelineStages(d *schema.ResourceData) []*codepipeline.StageDeclaration { + configs := d.Get("stage").([]interface{}) + pipelineStages := []*codepipeline.StageDeclaration{} + + for _, stage := range configs { + data := stage.(map[string]interface{}) + a := data["action"].([]interface{}) + actions := expandAwsCodePipelineActions(a) + pipelineStages = append(pipelineStages, &codepipeline.StageDeclaration{ + Name: aws.String(data["name"].(string)), + Actions: actions, + }) + } + return pipelineStages +} + +func flattenAwsCodePipelineStages(stages []*codepipeline.StageDeclaration) []interface{} { + stagesList := []interface{}{} + for _, stage := range stages { + values := map[string]interface{}{} + values["name"] = *stage.Name + values["action"] = flattenAwsCodePipelineStageActions(stage.Actions) + stagesList = append(stagesList, values) + } + return stagesList + +} + +func expandAwsCodePipelineActions(s []interface{}) []*codepipeline.ActionDeclaration { + actions := []*codepipeline.ActionDeclaration{} + for _, config := range s { + data := config.(map[string]interface{}) + + conf := expandAwsCodePipelineStageActionConfiguration(data["configuration"].(map[string]interface{})) + if data["provider"].(string) == "GitHub" { + githubToken := os.Getenv("GITHUB_TOKEN") + if githubToken != "" { + conf["OAuthToken"] = aws.String(githubToken) + } + + } + + action := codepipeline.ActionDeclaration{ + ActionTypeId: &codepipeline.ActionTypeId{ + Category: aws.String(data["category"].(string)), + Owner: aws.String(data["owner"].(string)), + + Provider: aws.String(data["provider"].(string)), + Version: aws.String(data["version"].(string)), + }, + Name: aws.String(data["name"].(string)), + Configuration: conf, + } + + oa := data["output_artifacts"].([]interface{}) + if len(oa) > 0 { + outputArtifacts := expandAwsCodePipelineActionsOutputArtifacts(oa) + action.OutputArtifacts = outputArtifacts + + } + ia := data["input_artifacts"].([]interface{}) + if len(ia) > 0 { + inputArtifacts := expandAwsCodePipelineActionsInputArtifacts(ia) + action.InputArtifacts = inputArtifacts + + } + ra := data["role_arn"].(string) + if ra != "" { + action.RoleArn = aws.String(ra) + } + ro := data["run_order"].(int) + if ro > 0 { + action.RunOrder = aws.Int64(int64(ro)) + } + actions = append(actions, &action) + } + return actions +} + +func flattenAwsCodePipelineStageActions(actions []*codepipeline.ActionDeclaration) []interface{} { + actionsList := []interface{}{} + for _, action := range actions { + values := map[string]interface{}{ + "category": *action.ActionTypeId.Category, + "owner": *action.ActionTypeId.Owner, + "provider": *action.ActionTypeId.Provider, + "version": *action.ActionTypeId.Version, + "name": *action.Name, + } + if action.Configuration != nil { + config := flattenAwsCodePipelineStageActionConfiguration(action.Configuration) + _, ok := config["OAuthToken"] + actionProvider := *action.ActionTypeId.Provider + if ok && actionProvider == "GitHub" { + delete(config, "OAuthToken") + } + values["configuration"] = config + } + + if len(action.OutputArtifacts) > 0 { + values["output_artifacts"] = flattenAwsCodePipelineActionsOutputArtifacts(action.OutputArtifacts) + } + + if len(action.InputArtifacts) > 0 { + values["input_artifacts"] = flattenAwsCodePipelineActionsInputArtifacts(action.InputArtifacts) + } + + if action.RoleArn != nil { + values["role_arn"] = *action.RoleArn + } + + if action.RunOrder != nil { + values["run_order"] = int(*action.RunOrder) + } + + actionsList = append(actionsList, values) + } + return actionsList +} + +func expandAwsCodePipelineStageActionConfiguration(config map[string]interface{}) map[string]*string { + m := map[string]*string{} + for k, v := range config { + s := v.(string) + m[k] = &s + } + return m +} + +func flattenAwsCodePipelineStageActionConfiguration(config map[string]*string) map[string]string { + m := map[string]string{} + for k, v := range config { + m[k] = *v + } + return m +} + +func expandAwsCodePipelineActionsOutputArtifacts(s []interface{}) []*codepipeline.OutputArtifact { + outputArtifacts := []*codepipeline.OutputArtifact{} + for _, artifact := range s { + outputArtifacts = append(outputArtifacts, &codepipeline.OutputArtifact{ + Name: aws.String(artifact.(string)), + }) + } + return outputArtifacts +} + +func flattenAwsCodePipelineActionsOutputArtifacts(artifacts []*codepipeline.OutputArtifact) []string { + values := []string{} + for _, artifact := range artifacts { + values = append(values, *artifact.Name) + } + return values +} + +func expandAwsCodePipelineActionsInputArtifacts(s []interface{}) []*codepipeline.InputArtifact { + outputArtifacts := []*codepipeline.InputArtifact{} + for _, artifact := range s { + outputArtifacts = append(outputArtifacts, &codepipeline.InputArtifact{ + Name: aws.String(artifact.(string)), + }) + } + return outputArtifacts +} + +func flattenAwsCodePipelineActionsInputArtifacts(artifacts []*codepipeline.InputArtifact) []string { + values := []string{} + for _, artifact := range artifacts { + values = append(values, *artifact.Name) + } + return values +} + +func resourceAwsCodePipelineRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codepipelineconn + resp, err := conn.GetPipeline(&codepipeline.GetPipelineInput{ + Name: aws.String(d.Id()), + }) + + if err != nil { + pipelineerr, ok := err.(awserr.Error) + if ok && pipelineerr.Code() == "PipelineNotFoundException" { + log.Printf("[INFO] Codepipeline %q not found", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("[ERROR] Error retreiving Pipeline: %q", err) + } + pipeline := resp.Pipeline + + if err := d.Set("artifact_store", flattenAwsCodePipelineArtifactStore(pipeline.ArtifactStore)); err != nil { + return err + } + + if err := d.Set("stage", flattenAwsCodePipelineStages(pipeline.Stages)); err != nil { + return err + } + + d.Set("name", pipeline.Name) + d.Set("role_arn", pipeline.RoleArn) + return nil +} + +func resourceAwsCodePipelineUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codepipelineconn + + pipeline := expandAwsCodePipeline(d) + params := &codepipeline.UpdatePipelineInput{ + Pipeline: pipeline, + } + _, err := conn.UpdatePipeline(params) + + if err != nil { + return fmt.Errorf( + "[ERROR] Error updating CodePipeline (%s): %s", + d.Id(), err) + } + + return resourceAwsCodePipelineRead(d, meta) +} + +func resourceAwsCodePipelineDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codepipelineconn + + _, err := conn.DeletePipeline(&codepipeline.DeletePipelineInput{ + Name: aws.String(d.Id()), + }) + + if err != nil { + return err + } + + d.SetId("") + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_identity_pool.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_identity_pool.go new file mode 100644 index 000000000..b85472cf9 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_cognito_identity_pool.go @@ -0,0 +1,238 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/cognitoidentity" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCognitoIdentityPool() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCognitoIdentityPoolCreate, + Read: resourceAwsCognitoIdentityPoolRead, + Update: resourceAwsCognitoIdentityPoolUpdate, + Delete: resourceAwsCognitoIdentityPoolDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "identity_pool_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateCognitoIdentityPoolName, + }, + + "cognito_identity_providers": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateCognitoIdentityProvidersClientId, + }, + "provider_name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateCognitoIdentityProvidersProviderName, + }, + "server_side_token_check": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, + + "developer_provider_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, // Forcing a new resource since it cannot be edited afterwards + ValidateFunc: validateCognitoProviderDeveloperName, + }, + + "allow_unauthenticated_identities": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "openid_connect_provider_arns": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateArn, + }, + }, + + "saml_provider_arns": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateArn, + }, + }, + + "supported_login_providers": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateCognitoSupportedLoginProviders, + }, + }, + }, + } +} + +func resourceAwsCognitoIdentityPoolCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cognitoconn + log.Print("[DEBUG] Creating Cognito Identity Pool") + + params := &cognitoidentity.CreateIdentityPoolInput{ + IdentityPoolName: aws.String(d.Get("identity_pool_name").(string)), + AllowUnauthenticatedIdentities: aws.Bool(d.Get("allow_unauthenticated_identities").(bool)), + } + + if v, ok := d.GetOk("developer_provider_name"); ok { + params.DeveloperProviderName = aws.String(v.(string)) + } + + if v, ok := d.GetOk("supported_login_providers"); ok { + params.SupportedLoginProviders = expandCognitoSupportedLoginProviders(v.(map[string]interface{})) + } + + if v, ok := d.GetOk("cognito_identity_providers"); ok { + params.CognitoIdentityProviders = expandCognitoIdentityProviders(v.(*schema.Set)) + } + + if v, ok := d.GetOk("saml_provider_arns"); ok { + params.SamlProviderARNs = expandStringList(v.([]interface{})) + } + + if v, ok := d.GetOk("openid_connect_provider_arns"); ok { + params.OpenIdConnectProviderARNs = expandStringList(v.([]interface{})) + } + + entity, err := conn.CreateIdentityPool(params) + if err != nil { + return fmt.Errorf("Error creating Cognito Identity Pool: %s", err) + } + + d.SetId(*entity.IdentityPoolId) + + return resourceAwsCognitoIdentityPoolRead(d, meta) +} + +func resourceAwsCognitoIdentityPoolRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cognitoconn + log.Printf("[DEBUG] Reading Cognito Identity Pool: %s", d.Id()) + + ip, err := conn.DescribeIdentityPool(&cognitoidentity.DescribeIdentityPoolInput{ + IdentityPoolId: aws.String(d.Id()), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceNotFoundException" { + d.SetId("") + return nil + } + return err + } + + d.Set("identity_pool_name", ip.IdentityPoolName) + d.Set("allow_unauthenticated_identities", ip.AllowUnauthenticatedIdentities) + d.Set("developer_provider_name", ip.DeveloperProviderName) + + if ip.CognitoIdentityProviders != nil { + if err := d.Set("cognito_identity_providers", flattenCognitoIdentityProviders(ip.CognitoIdentityProviders)); err != nil { + return fmt.Errorf("[DEBUG] Error setting cognito_identity_providers error: %#v", err) + } + } + + if ip.OpenIdConnectProviderARNs != nil { + if err := d.Set("openid_connect_provider_arns", flattenStringList(ip.OpenIdConnectProviderARNs)); err != nil { + return fmt.Errorf("[DEBUG] Error setting openid_connect_provider_arns error: %#v", err) + } + } + + if ip.SamlProviderARNs != nil { + if err := d.Set("saml_provider_arns", flattenStringList(ip.SamlProviderARNs)); err != nil { + return fmt.Errorf("[DEBUG] Error setting saml_provider_arns error: %#v", err) + } + } + + if ip.SupportedLoginProviders != nil { + if err := d.Set("supported_login_providers", flattenCognitoSupportedLoginProviders(ip.SupportedLoginProviders)); err != nil { + return fmt.Errorf("[DEBUG] Error setting supported_login_providers error: %#v", err) + } + } + + return nil +} + +func resourceAwsCognitoIdentityPoolUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cognitoconn + log.Print("[DEBUG] Updating Cognito Identity Pool") + + params := &cognitoidentity.IdentityPool{ + IdentityPoolId: aws.String(d.Id()), + AllowUnauthenticatedIdentities: aws.Bool(d.Get("allow_unauthenticated_identities").(bool)), + IdentityPoolName: aws.String(d.Get("identity_pool_name").(string)), + } + + if d.HasChange("developer_provider_name") { + params.DeveloperProviderName = aws.String(d.Get("developer_provider_name").(string)) + } + + if d.HasChange("cognito_identity_providers") { + params.CognitoIdentityProviders = expandCognitoIdentityProviders(d.Get("cognito_identity_providers").(*schema.Set)) + } + + if d.HasChange("supported_login_providers") { + params.SupportedLoginProviders = expandCognitoSupportedLoginProviders(d.Get("supported_login_providers").(map[string]interface{})) + } + + if d.HasChange("openid_connect_provider_arns") { + params.OpenIdConnectProviderARNs = expandStringList(d.Get("openid_connect_provider_arns").([]interface{})) + } + + if d.HasChange("saml_provider_arns") { + params.SamlProviderARNs = expandStringList(d.Get("saml_provider_arns").([]interface{})) + } + + _, err := conn.UpdateIdentityPool(params) + if err != nil { + return fmt.Errorf("Error creating Cognito Identity Pool: %s", err) + } + + return resourceAwsCognitoIdentityPoolRead(d, meta) +} + +func resourceAwsCognitoIdentityPoolDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cognitoconn + log.Printf("[DEBUG] Deleting Cognito Identity Pool: %s", d.Id()) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteIdentityPool(&cognitoidentity.DeleteIdentityPoolInput{ + IdentityPoolId: aws.String(d.Id()), + }) + + if err == nil { + return nil + } + + return resource.NonRetryableError(err) + }) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_config_rule.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_config_rule.go new file mode 100644 index 000000000..cc18d1995 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_config_rule.go @@ -0,0 +1,311 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/configservice" +) + +func resourceAwsConfigConfigRule() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsConfigConfigRulePut, + Read: resourceAwsConfigConfigRuleRead, + Update: resourceAwsConfigConfigRulePut, + Delete: resourceAwsConfigConfigRuleDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateMaxLength(64), + }, + "rule_id": { + Type: schema.TypeString, + Computed: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateMaxLength(256), + }, + "input_parameters": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateJsonString, + }, + "maximum_execution_frequency": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateConfigExecutionFrequency, + }, + "scope": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "compliance_resource_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateMaxLength(256), + }, + "compliance_resource_types": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 100, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateMaxLength(256), + }, + Set: schema.HashString, + }, + "tag_key": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateMaxLength(128), + }, + "tag_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateMaxLength(256), + }, + }, + }, + }, + "source": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "owner": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateConfigRuleSourceOwner, + }, + "source_detail": { + Type: schema.TypeSet, + Set: configRuleSourceDetailsHash, + Optional: true, + MaxItems: 25, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "event_source": { + Type: schema.TypeString, + Optional: true, + Default: "aws.config", + }, + "maximum_execution_frequency": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateConfigExecutionFrequency, + }, + "message_type": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "source_identifier": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateMaxLength(256), + }, + }, + }, + }, + }, + } +} + +func resourceAwsConfigConfigRulePut(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + + name := d.Get("name").(string) + ruleInput := configservice.ConfigRule{ + ConfigRuleName: aws.String(name), + Source: expandConfigRuleSource(d.Get("source").([]interface{})), + } + + scopes := d.Get("scope").([]interface{}) + if len(scopes) > 0 { + ruleInput.Scope = expandConfigRuleScope(scopes[0].(map[string]interface{})) + } + + if v, ok := d.GetOk("description"); ok { + ruleInput.Description = aws.String(v.(string)) + } + if v, ok := d.GetOk("input_parameters"); ok { + ruleInput.InputParameters = aws.String(v.(string)) + } + if v, ok := d.GetOk("maximum_execution_frequency"); ok { + ruleInput.MaximumExecutionFrequency = aws.String(v.(string)) + } + + input := configservice.PutConfigRuleInput{ + ConfigRule: &ruleInput, + } + log.Printf("[DEBUG] Creating AWSConfig config rule: %s", input) + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + _, err := conn.PutConfigRule(&input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "InsufficientPermissionsException" { + // IAM is eventually consistent + return resource.RetryableError(err) + } + } + + return resource.NonRetryableError(fmt.Errorf("Failed to create AWSConfig rule: %s", err)) + } + + return nil + }) + if err != nil { + return err + } + + d.SetId(name) + + log.Printf("[DEBUG] AWSConfig config rule %q created", name) + + return resourceAwsConfigConfigRuleRead(d, meta) +} + +func resourceAwsConfigConfigRuleRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + + out, err := conn.DescribeConfigRules(&configservice.DescribeConfigRulesInput{ + ConfigRuleNames: []*string{aws.String(d.Id())}, + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoSuchConfigRuleException" { + log.Printf("[WARN] Config Rule %q is gone (NoSuchConfigRuleException)", d.Id()) + d.SetId("") + return nil + } + return err + } + + numberOfRules := len(out.ConfigRules) + if numberOfRules < 1 { + log.Printf("[WARN] Config Rule %q is gone (no rules found)", d.Id()) + d.SetId("") + return nil + } + + if numberOfRules > 1 { + return fmt.Errorf("Expected exactly 1 Config Rule, received %d: %#v", + numberOfRules, out.ConfigRules) + } + + log.Printf("[DEBUG] AWS Config config rule received: %s", out) + + rule := out.ConfigRules[0] + d.Set("arn", rule.ConfigRuleArn) + d.Set("rule_id", rule.ConfigRuleId) + d.Set("name", rule.ConfigRuleName) + d.Set("description", rule.Description) + d.Set("input_parameters", rule.InputParameters) + d.Set("maximum_execution_frequency", rule.MaximumExecutionFrequency) + + if rule.Scope != nil { + d.Set("scope", flattenConfigRuleScope(rule.Scope)) + } + + d.Set("source", flattenConfigRuleSource(rule.Source)) + + return nil +} + +func resourceAwsConfigConfigRuleDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + + name := d.Get("name").(string) + + log.Printf("[DEBUG] Deleting AWS Config config rule %q", name) + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteConfigRule(&configservice.DeleteConfigRuleInput{ + ConfigRuleName: aws.String(name), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceInUseException" { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return fmt.Errorf("Deleting Config Rule failed: %s", err) + } + + conf := resource.StateChangeConf{ + Pending: []string{ + configservice.ConfigRuleStateActive, + configservice.ConfigRuleStateDeleting, + configservice.ConfigRuleStateDeletingResults, + configservice.ConfigRuleStateEvaluating, + }, + Target: []string{""}, + Timeout: 5 * time.Minute, + Refresh: func() (interface{}, string, error) { + out, err := conn.DescribeConfigRules(&configservice.DescribeConfigRulesInput{ + ConfigRuleNames: []*string{aws.String(d.Id())}, + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoSuchConfigRuleException" { + return 42, "", nil + } + return 42, "", fmt.Errorf("Failed to describe config rule %q: %s", d.Id(), err) + } + if len(out.ConfigRules) < 1 { + return 42, "", nil + } + rule := out.ConfigRules[0] + return out, *rule.ConfigRuleState, nil + }, + } + _, err = conf.WaitForState() + if err != nil { + return err + } + + log.Printf("[DEBUG] AWS Config config rule %q deleted", name) + + d.SetId("") + return nil +} + +func configRuleSourceDetailsHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + if v, ok := m["message_type"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["event_source"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["maximum_execution_frequency"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + return hashcode.String(buf.String()) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_configuration_recorder.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_configuration_recorder.go new file mode 100644 index 000000000..c635d63db --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_configuration_recorder.go @@ -0,0 +1,148 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/configservice" +) + +func resourceAwsConfigConfigurationRecorder() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsConfigConfigurationRecorderPut, + Read: resourceAwsConfigConfigurationRecorderRead, + Update: resourceAwsConfigConfigurationRecorderPut, + Delete: resourceAwsConfigConfigurationRecorderDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Default: "default", + ValidateFunc: validateMaxLength(256), + }, + "role_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + "recording_group": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "all_supported": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "include_global_resource_types": { + Type: schema.TypeBool, + Optional: true, + }, + "resource_types": { + Type: schema.TypeSet, + Set: schema.HashString, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + } +} + +func resourceAwsConfigConfigurationRecorderPut(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + + name := d.Get("name").(string) + recorder := configservice.ConfigurationRecorder{ + Name: aws.String(name), + RoleARN: aws.String(d.Get("role_arn").(string)), + } + + if g, ok := d.GetOk("recording_group"); ok { + recorder.RecordingGroup = expandConfigRecordingGroup(g.([]interface{})) + } + + input := configservice.PutConfigurationRecorderInput{ + ConfigurationRecorder: &recorder, + } + _, err := conn.PutConfigurationRecorder(&input) + if err != nil { + return fmt.Errorf("Creating Configuration Recorder failed: %s", err) + } + + d.SetId(name) + + return resourceAwsConfigConfigurationRecorderRead(d, meta) +} + +func resourceAwsConfigConfigurationRecorderRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + + input := configservice.DescribeConfigurationRecordersInput{ + ConfigurationRecorderNames: []*string{aws.String(d.Id())}, + } + out, err := conn.DescribeConfigurationRecorders(&input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoSuchConfigurationRecorderException" { + log.Printf("[WARN] Configuration Recorder %q is gone (NoSuchConfigurationRecorderException)", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("Getting Configuration Recorder failed: %s", err) + } + + numberOfRecorders := len(out.ConfigurationRecorders) + if numberOfRecorders < 1 { + log.Printf("[WARN] Configuration Recorder %q is gone (no recorders found)", d.Id()) + d.SetId("") + return nil + } + + if numberOfRecorders > 1 { + return fmt.Errorf("Expected exactly 1 Configuration Recorder, received %d: %#v", + numberOfRecorders, out.ConfigurationRecorders) + } + + recorder := out.ConfigurationRecorders[0] + + d.Set("name", recorder.Name) + d.Set("role_arn", recorder.RoleARN) + + if recorder.RecordingGroup != nil { + flattened := flattenConfigRecordingGroup(recorder.RecordingGroup) + err = d.Set("recording_group", flattened) + if err != nil { + return fmt.Errorf("Failed to set recording_group: %s", err) + } + } + + return nil +} + +func resourceAwsConfigConfigurationRecorderDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + input := configservice.DeleteConfigurationRecorderInput{ + ConfigurationRecorderName: aws.String(d.Id()), + } + _, err := conn.DeleteConfigurationRecorder(&input) + if err != nil { + return fmt.Errorf("Deleting Configuration Recorder failed: %s", err) + } + + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_configuration_recorder_status.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_configuration_recorder_status.go new file mode 100644 index 000000000..a2ba85b5d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_configuration_recorder_status.go @@ -0,0 +1,122 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/configservice" +) + +func resourceAwsConfigConfigurationRecorderStatus() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsConfigConfigurationRecorderStatusPut, + Read: resourceAwsConfigConfigurationRecorderStatusRead, + Update: resourceAwsConfigConfigurationRecorderStatusPut, + Delete: resourceAwsConfigConfigurationRecorderStatusDelete, + + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("name", d.Id()) + return []*schema.ResourceData{d}, nil + }, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "is_enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + } +} + +func resourceAwsConfigConfigurationRecorderStatusPut(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + + name := d.Get("name").(string) + d.SetId(name) + + if d.HasChange("is_enabled") { + isEnabled := d.Get("is_enabled").(bool) + if isEnabled { + log.Printf("[DEBUG] Starting AWSConfig Configuration recorder %q", name) + startInput := configservice.StartConfigurationRecorderInput{ + ConfigurationRecorderName: aws.String(name), + } + _, err := conn.StartConfigurationRecorder(&startInput) + if err != nil { + return fmt.Errorf("Failed to start Configuration Recorder: %s", err) + } + } else { + log.Printf("[DEBUG] Stopping AWSConfig Configuration recorder %q", name) + stopInput := configservice.StopConfigurationRecorderInput{ + ConfigurationRecorderName: aws.String(name), + } + _, err := conn.StopConfigurationRecorder(&stopInput) + if err != nil { + return fmt.Errorf("Failed to stop Configuration Recorder: %s", err) + } + } + } + + return resourceAwsConfigConfigurationRecorderStatusRead(d, meta) +} + +func resourceAwsConfigConfigurationRecorderStatusRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + + name := d.Id() + statusInput := configservice.DescribeConfigurationRecorderStatusInput{ + ConfigurationRecorderNames: []*string{aws.String(name)}, + } + statusOut, err := conn.DescribeConfigurationRecorderStatus(&statusInput) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "NoSuchConfigurationRecorderException" { + log.Printf("[WARN] Configuration Recorder (status) %q is gone (NoSuchConfigurationRecorderException)", name) + d.SetId("") + return nil + } + } + return fmt.Errorf("Failed describing Configuration Recorder %q status: %s", + name, err) + } + + numberOfStatuses := len(statusOut.ConfigurationRecordersStatus) + if numberOfStatuses < 1 { + log.Printf("[WARN] Configuration Recorder (status) %q is gone (no recorders found)", name) + d.SetId("") + return nil + } + + if numberOfStatuses > 1 { + return fmt.Errorf("Expected exactly 1 Configuration Recorder (status), received %d: %#v", + numberOfStatuses, statusOut.ConfigurationRecordersStatus) + } + + d.Set("is_enabled", statusOut.ConfigurationRecordersStatus[0].Recording) + + return nil +} + +func resourceAwsConfigConfigurationRecorderStatusDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + input := configservice.StopConfigurationRecorderInput{ + ConfigurationRecorderName: aws.String(d.Get("name").(string)), + } + _, err := conn.StopConfigurationRecorder(&input) + if err != nil { + return fmt.Errorf("Stopping Configuration Recorder failed: %s", err) + } + + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_delivery_channel.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_delivery_channel.go new file mode 100644 index 000000000..e77836dc7 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_config_delivery_channel.go @@ -0,0 +1,171 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/configservice" +) + +func resourceAwsConfigDeliveryChannel() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsConfigDeliveryChannelPut, + Read: resourceAwsConfigDeliveryChannelRead, + Update: resourceAwsConfigDeliveryChannelPut, + Delete: resourceAwsConfigDeliveryChannelDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Default: "default", + ValidateFunc: validateMaxLength(256), + }, + "s3_bucket_name": { + Type: schema.TypeString, + Required: true, + }, + "s3_key_prefix": { + Type: schema.TypeString, + Optional: true, + }, + "sns_topic_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, + "snapshot_delivery_properties": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delivery_frequency": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateConfigExecutionFrequency, + }, + }, + }, + }, + }, + } +} + +func resourceAwsConfigDeliveryChannelPut(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + + name := d.Get("name").(string) + channel := configservice.DeliveryChannel{ + Name: aws.String(name), + S3BucketName: aws.String(d.Get("s3_bucket_name").(string)), + } + + if v, ok := d.GetOk("s3_key_prefix"); ok { + channel.S3KeyPrefix = aws.String(v.(string)) + } + if v, ok := d.GetOk("sns_topic_arn"); ok { + channel.SnsTopicARN = aws.String(v.(string)) + } + + if p, ok := d.GetOk("snapshot_delivery_properties"); ok { + propertiesBlocks := p.([]interface{}) + block := propertiesBlocks[0].(map[string]interface{}) + + if v, ok := block["delivery_frequency"]; ok { + channel.ConfigSnapshotDeliveryProperties = &configservice.ConfigSnapshotDeliveryProperties{ + DeliveryFrequency: aws.String(v.(string)), + } + } + } + + input := configservice.PutDeliveryChannelInput{DeliveryChannel: &channel} + + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + _, err := conn.PutDeliveryChannel(&input) + if err == nil { + return nil + } + + awsErr, ok := err.(awserr.Error) + if ok && awsErr.Code() == "InsufficientDeliveryPolicyException" { + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + }) + if err != nil { + return fmt.Errorf("Creating Delivery Channel failed: %s", err) + } + + d.SetId(name) + + return resourceAwsConfigDeliveryChannelRead(d, meta) +} + +func resourceAwsConfigDeliveryChannelRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + + input := configservice.DescribeDeliveryChannelsInput{ + DeliveryChannelNames: []*string{aws.String(d.Id())}, + } + out, err := conn.DescribeDeliveryChannels(&input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "NoSuchDeliveryChannelException" { + log.Printf("[WARN] Delivery Channel %q is gone (NoSuchDeliveryChannelException)", d.Id()) + d.SetId("") + return nil + } + } + return fmt.Errorf("Getting Delivery Channel failed: %s", err) + } + + if len(out.DeliveryChannels) < 1 { + log.Printf("[WARN] Delivery Channel %q is gone (no channels found)", d.Id()) + d.SetId("") + return nil + } + + if len(out.DeliveryChannels) > 1 { + return fmt.Errorf("Received %d delivery channels under %s (expected exactly 1): %s", + len(out.DeliveryChannels), d.Id(), out.DeliveryChannels) + } + + channel := out.DeliveryChannels[0] + + d.Set("name", channel.Name) + d.Set("s3_bucket_name", channel.S3BucketName) + d.Set("s3_key_prefix", channel.S3KeyPrefix) + d.Set("sns_topic_arn", channel.SnsTopicARN) + + if channel.ConfigSnapshotDeliveryProperties != nil { + d.Set("snapshot_delivery_properties", flattenConfigSnapshotDeliveryProperties(channel.ConfigSnapshotDeliveryProperties)) + } + + return nil +} + +func resourceAwsConfigDeliveryChannelDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + input := configservice.DeleteDeliveryChannelInput{ + DeliveryChannelName: aws.String(d.Id()), + } + _, err := conn.DeleteDeliveryChannel(&input) + if err != nil { + return fmt.Errorf("Unable to delete delivery channel: %s", err) + } + + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_customer_gateway.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_customer_gateway.go new file mode 100644 index 000000000..668f8a80c --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_customer_gateway.go @@ -0,0 +1,283 @@ +package aws + +import ( + "fmt" + "log" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCustomerGateway() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCustomerGatewayCreate, + Read: resourceAwsCustomerGatewayRead, + Update: resourceAwsCustomerGatewayUpdate, + Delete: resourceAwsCustomerGatewayDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "bgp_asn": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "ip_address": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsCustomerGatewayCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + ipAddress := d.Get("ip_address").(string) + vpnType := d.Get("type").(string) + bgpAsn := d.Get("bgp_asn").(int) + + alreadyExists, err := resourceAwsCustomerGatewayExists(vpnType, ipAddress, bgpAsn, conn) + if err != nil { + return err + } + + if alreadyExists { + return fmt.Errorf("An existing customer gateway for IpAddress: %s, VpnType: %s, BGP ASN: %d has been found", ipAddress, vpnType, bgpAsn) + } + + createOpts := &ec2.CreateCustomerGatewayInput{ + BgpAsn: aws.Int64(int64(bgpAsn)), + PublicIp: aws.String(ipAddress), + Type: aws.String(vpnType), + } + + // Create the Customer Gateway. + log.Printf("[DEBUG] Creating customer gateway") + resp, err := conn.CreateCustomerGateway(createOpts) + if err != nil { + return fmt.Errorf("Error creating customer gateway: %s", err) + } + + // Store the ID + customerGateway := resp.CustomerGateway + d.SetId(*customerGateway.CustomerGatewayId) + log.Printf("[INFO] Customer gateway ID: %s", *customerGateway.CustomerGatewayId) + + // Wait for the CustomerGateway to be available. + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"available"}, + Refresh: customerGatewayRefreshFunc(conn, *customerGateway.CustomerGatewayId), + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, stateErr := stateConf.WaitForState() + if stateErr != nil { + return fmt.Errorf( + "Error waiting for customer gateway (%s) to become ready: %s", + *customerGateway.CustomerGatewayId, err) + } + + // Create tags. + if err := setTags(conn, d); err != nil { + return err + } + + return nil +} + +func customerGatewayRefreshFunc(conn *ec2.EC2, gatewayId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + gatewayFilter := &ec2.Filter{ + Name: aws.String("customer-gateway-id"), + Values: []*string{aws.String(gatewayId)}, + } + + resp, err := conn.DescribeCustomerGateways(&ec2.DescribeCustomerGatewaysInput{ + Filters: []*ec2.Filter{gatewayFilter}, + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidCustomerGatewayID.NotFound" { + resp = nil + } else { + log.Printf("Error on CustomerGatewayRefresh: %s", err) + return nil, "", err + } + } + + if resp == nil || len(resp.CustomerGateways) == 0 { + // handle consistency issues + return nil, "", nil + } + + gateway := resp.CustomerGateways[0] + return gateway, *gateway.State, nil + } +} + +func resourceAwsCustomerGatewayExists(vpnType, ipAddress string, bgpAsn int, conn *ec2.EC2) (bool, error) { + ipAddressFilter := &ec2.Filter{ + Name: aws.String("ip-address"), + Values: []*string{aws.String(ipAddress)}, + } + + typeFilter := &ec2.Filter{ + Name: aws.String("type"), + Values: []*string{aws.String(vpnType)}, + } + + bgp := strconv.Itoa(bgpAsn) + bgpAsnFilter := &ec2.Filter{ + Name: aws.String("bgp-asn"), + Values: []*string{aws.String(bgp)}, + } + + resp, err := conn.DescribeCustomerGateways(&ec2.DescribeCustomerGatewaysInput{ + Filters: []*ec2.Filter{ipAddressFilter, typeFilter, bgpAsnFilter}, + }) + if err != nil { + return false, err + } + + if len(resp.CustomerGateways) > 0 && *resp.CustomerGateways[0].State != "deleted" { + return true, nil + } + + return false, nil +} + +func resourceAwsCustomerGatewayRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + gatewayFilter := &ec2.Filter{ + Name: aws.String("customer-gateway-id"), + Values: []*string{aws.String(d.Id())}, + } + + resp, err := conn.DescribeCustomerGateways(&ec2.DescribeCustomerGatewaysInput{ + Filters: []*ec2.Filter{gatewayFilter}, + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidCustomerGatewayID.NotFound" { + d.SetId("") + return nil + } else { + log.Printf("[ERROR] Error finding CustomerGateway: %s", err) + return err + } + } + + if len(resp.CustomerGateways) != 1 { + return fmt.Errorf("[ERROR] Error finding CustomerGateway: %s", d.Id()) + } + + if *resp.CustomerGateways[0].State == "deleted" { + log.Printf("[INFO] Customer Gateway is in `deleted` state: %s", d.Id()) + d.SetId("") + return nil + } + + customerGateway := resp.CustomerGateways[0] + d.Set("ip_address", customerGateway.IpAddress) + d.Set("type", customerGateway.Type) + d.Set("tags", tagsToMap(customerGateway.Tags)) + + if *customerGateway.BgpAsn != "" { + val, err := strconv.ParseInt(*customerGateway.BgpAsn, 0, 0) + if err != nil { + return fmt.Errorf("error parsing bgp_asn: %s", err) + } + + d.Set("bgp_asn", int(val)) + } + + return nil +} + +func resourceAwsCustomerGatewayUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + // Update tags if required. + if err := setTags(conn, d); err != nil { + return err + } + + d.SetPartial("tags") + + return resourceAwsCustomerGatewayRead(d, meta) +} + +func resourceAwsCustomerGatewayDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + _, err := conn.DeleteCustomerGateway(&ec2.DeleteCustomerGatewayInput{ + CustomerGatewayId: aws.String(d.Id()), + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidCustomerGatewayID.NotFound" { + d.SetId("") + return nil + } else { + log.Printf("[ERROR] Error deleting CustomerGateway: %s", err) + return err + } + } + + gatewayFilter := &ec2.Filter{ + Name: aws.String("customer-gateway-id"), + Values: []*string{aws.String(d.Id())}, + } + + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + resp, err := conn.DescribeCustomerGateways(&ec2.DescribeCustomerGatewaysInput{ + Filters: []*ec2.Filter{gatewayFilter}, + }) + + if err != nil { + if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "InvalidCustomerGatewayID.NotFound" { + return nil + } + return resource.NonRetryableError(err) + } + + if len(resp.CustomerGateways) != 1 { + return resource.RetryableError(fmt.Errorf("[ERROR] Error finding CustomerGateway for delete: %s", d.Id())) + } + + switch *resp.CustomerGateways[0].State { + case "pending", "available", "deleting": + return resource.RetryableError(fmt.Errorf("[DEBUG] Gateway (%s) in state (%s), retrying", d.Id(), *resp.CustomerGateways[0].State)) + case "deleted": + return nil + default: + return resource.RetryableError(fmt.Errorf("[DEBUG] Unrecognized state (%s) for Customer Gateway delete on (%s)", *resp.CustomerGateways[0].State, d.Id())) + } + }) + + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_event_subscription.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_event_subscription.go new file mode 100644 index 000000000..9e725ce2d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_event_subscription.go @@ -0,0 +1,385 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsDbEventSubscription() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDbEventSubscriptionCreate, + Read: resourceAwsDbEventSubscriptionRead, + Update: resourceAwsDbEventSubscriptionUpdate, + Delete: resourceAwsDbEventSubscriptionDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsDbEventSubscriptionImport, + }, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateDbEventSubscriptionName, + }, + "sns_topic": { + Type: schema.TypeString, + Required: true, + }, + "event_categories": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "source_ids": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + // ValidateFunc: validateDbEventSubscriptionSourceIds, + // requires source_type to be set, does not seem to be a way to validate this + }, + "source_type": { + Type: schema.TypeString, + Optional: true, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "customer_aws_id": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchema(), + }, + } +} + +func resourceAwsDbEventSubscriptionCreate(d *schema.ResourceData, meta interface{}) error { + rdsconn := meta.(*AWSClient).rdsconn + name := d.Get("name").(string) + tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) + + sourceIdsSet := d.Get("source_ids").(*schema.Set) + sourceIds := make([]*string, sourceIdsSet.Len()) + for i, sourceId := range sourceIdsSet.List() { + sourceIds[i] = aws.String(sourceId.(string)) + } + + eventCategoriesSet := d.Get("event_categories").(*schema.Set) + eventCategories := make([]*string, eventCategoriesSet.Len()) + for i, eventCategory := range eventCategoriesSet.List() { + eventCategories[i] = aws.String(eventCategory.(string)) + } + + request := &rds.CreateEventSubscriptionInput{ + SubscriptionName: aws.String(name), + SnsTopicArn: aws.String(d.Get("sns_topic").(string)), + Enabled: aws.Bool(d.Get("enabled").(bool)), + SourceIds: sourceIds, + SourceType: aws.String(d.Get("source_type").(string)), + EventCategories: eventCategories, + Tags: tags, + } + + log.Println("[DEBUG] Create RDS Event Subscription:", request) + + _, err := rdsconn.CreateEventSubscription(request) + if err != nil { + return fmt.Errorf("Error creating RDS Event Subscription %s: %s", name, err) + } + + log.Println( + "[INFO] Waiting for RDS Event Subscription to be ready") + + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating"}, + Target: []string{"active"}, + Refresh: resourceAwsDbEventSubscriptionRefreshFunc(d, meta.(*AWSClient).rdsconn), + Timeout: 40 * time.Minute, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Creating RDS Event Subscription %s failed: %s", d.Id(), err) + } + + return resourceAwsDbEventSubscriptionRead(d, meta) +} + +func resourceAwsDbEventSubscriptionRead(d *schema.ResourceData, meta interface{}) error { + sub, err := resourceAwsDbEventSubscriptionRetrieve(d.Get("name").(string), meta.(*AWSClient).rdsconn) + if err != nil { + return fmt.Errorf("Error retrieving RDS Event Subscription %s: %s", d.Id(), err) + } + if sub == nil { + d.SetId("") + return nil + } + + d.SetId(*sub.CustSubscriptionId) + if err := d.Set("name", sub.CustSubscriptionId); err != nil { + return err + } + if err := d.Set("sns_topic", sub.SnsTopicArn); err != nil { + return err + } + if err := d.Set("source_type", sub.SourceType); err != nil { + return err + } + if err := d.Set("enabled", sub.Enabled); err != nil { + return err + } + if err := d.Set("source_ids", flattenStringList(sub.SourceIdsList)); err != nil { + return err + } + if err := d.Set("event_categories", flattenStringList(sub.EventCategoriesList)); err != nil { + return err + } + if err := d.Set("customer_aws_id", sub.CustomerAwsId); err != nil { + return err + } + + // list tags for resource + // set tags + conn := meta.(*AWSClient).rdsconn + if arn, err := buildRDSEventSubscriptionARN(d.Get("customer_aws_id").(string), d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).region); err != nil { + log.Printf("[DEBUG] Error building ARN for RDS Event Subscription, not setting Tags for Event Subscription %s", *sub.CustSubscriptionId) + } else { + resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ + ResourceName: aws.String(arn), + }) + + if err != nil { + log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn) + } + + var dt []*rds.Tag + if len(resp.TagList) > 0 { + dt = resp.TagList + } + d.Set("tags", tagsToMapRDS(dt)) + } + + return nil +} + +func resourceAwsDbEventSubscriptionRetrieve( + name string, rdsconn *rds.RDS) (*rds.EventSubscription, error) { + + request := &rds.DescribeEventSubscriptionsInput{ + SubscriptionName: aws.String(name), + } + + describeResp, err := rdsconn.DescribeEventSubscriptions(request) + if err != nil { + if rdserr, ok := err.(awserr.Error); ok && rdserr.Code() == "SubscriptionNotFound" { + log.Printf("[WARN] No RDS Event Subscription by name (%s) found", name) + return nil, nil + } + return nil, fmt.Errorf("Error reading RDS Event Subscription %s: %s", name, err) + } + + if len(describeResp.EventSubscriptionsList) != 1 { + return nil, fmt.Errorf("Unable to find RDS Event Subscription: %#v", describeResp.EventSubscriptionsList) + } + + return describeResp.EventSubscriptionsList[0], nil +} + +func resourceAwsDbEventSubscriptionUpdate(d *schema.ResourceData, meta interface{}) error { + rdsconn := meta.(*AWSClient).rdsconn + + d.Partial(true) + requestUpdate := false + + req := &rds.ModifyEventSubscriptionInput{ + SubscriptionName: aws.String(d.Id()), + } + + if d.HasChange("event_categories") { + eventCategoriesSet := d.Get("event_categories").(*schema.Set) + req.EventCategories = make([]*string, eventCategoriesSet.Len()) + for i, eventCategory := range eventCategoriesSet.List() { + req.EventCategories[i] = aws.String(eventCategory.(string)) + } + requestUpdate = true + } + + if d.HasChange("enabled") { + req.Enabled = aws.Bool(d.Get("enabled").(bool)) + requestUpdate = true + } + + if d.HasChange("sns_topic") { + req.SnsTopicArn = aws.String(d.Get("sns_topic").(string)) + requestUpdate = true + } + + if d.HasChange("source_type") { + req.SourceType = aws.String(d.Get("source_type").(string)) + requestUpdate = true + } + + log.Printf("[DEBUG] Send RDS Event Subscription modification request: %#v", requestUpdate) + if requestUpdate { + log.Printf("[DEBUG] RDS Event Subscription modification request: %#v", req) + _, err := rdsconn.ModifyEventSubscription(req) + if err != nil { + return fmt.Errorf("Modifying RDS Event Subscription %s failed: %s", d.Id(), err) + } + + log.Println( + "[INFO] Waiting for RDS Event Subscription modification to finish") + + stateConf := &resource.StateChangeConf{ + Pending: []string{"modifying"}, + Target: []string{"active"}, + Refresh: resourceAwsDbEventSubscriptionRefreshFunc(d, meta.(*AWSClient).rdsconn), + Timeout: 40 * time.Minute, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Modifying RDS Event Subscription %s failed: %s", d.Id(), err) + } + d.SetPartial("event_categories") + d.SetPartial("enabled") + d.SetPartial("sns_topic") + d.SetPartial("source_type") + } + + if arn, err := buildRDSEventSubscriptionARN(d.Get("customer_aws_id").(string), d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).region); err == nil { + if err := setTagsRDS(rdsconn, d, arn); err != nil { + return err + } else { + d.SetPartial("tags") + } + } + + if d.HasChange("source_ids") { + o, n := d.GetChange("source_ids") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove := expandStringList(os.Difference(ns).List()) + add := expandStringList(ns.Difference(os).List()) + + if len(remove) > 0 { + for _, removing := range remove { + log.Printf("[INFO] Removing %s as a Source Identifier from %q", *removing, d.Id()) + _, err := rdsconn.RemoveSourceIdentifierFromSubscription(&rds.RemoveSourceIdentifierFromSubscriptionInput{ + SourceIdentifier: removing, + SubscriptionName: aws.String(d.Id()), + }) + if err != nil { + return err + } + } + } + + if len(add) > 0 { + for _, adding := range add { + log.Printf("[INFO] Adding %s as a Source Identifier to %q", *adding, d.Id()) + _, err := rdsconn.AddSourceIdentifierToSubscription(&rds.AddSourceIdentifierToSubscriptionInput{ + SourceIdentifier: adding, + SubscriptionName: aws.String(d.Id()), + }) + if err != nil { + return err + } + } + } + d.SetPartial("source_ids") + } + + d.Partial(false) + + return nil +} + +func resourceAwsDbEventSubscriptionDelete(d *schema.ResourceData, meta interface{}) error { + rdsconn := meta.(*AWSClient).rdsconn + deleteOpts := rds.DeleteEventSubscriptionInput{ + SubscriptionName: aws.String(d.Id()), + } + + if _, err := rdsconn.DeleteEventSubscription(&deleteOpts); err != nil { + rdserr, ok := err.(awserr.Error) + if !ok { + return fmt.Errorf("Error deleting RDS Event Subscription %s: %s", d.Id(), err) + } + + if rdserr.Code() != "DBEventSubscriptionNotFoundFault" { + log.Printf("[WARN] RDS Event Subscription %s missing during delete", d.Id()) + return fmt.Errorf("Error deleting RDS Event Subscription %s: %s", d.Id(), err) + } + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting"}, + Target: []string{}, + Refresh: resourceAwsDbEventSubscriptionRefreshFunc(d, meta.(*AWSClient).rdsconn), + Timeout: 40 * time.Minute, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + _, err := stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting RDS Event Subscription %s: %s", d.Id(), err) + } + return err +} + +func resourceAwsDbEventSubscriptionRefreshFunc( + d *schema.ResourceData, + rdsconn *rds.RDS) resource.StateRefreshFunc { + + return func() (interface{}, string, error) { + sub, err := resourceAwsDbEventSubscriptionRetrieve(d.Get("name").(string), rdsconn) + + if err != nil { + log.Printf("Error on retrieving DB Event Subscription when waiting: %s", err) + return nil, "", err + } + + if sub == nil { + return nil, "", nil + } + + if sub.Status != nil { + log.Printf("[DEBUG] DB Event Subscription status for %s: %s", d.Id(), *sub.Status) + } + + return sub, *sub.Status, nil + } +} + +func buildRDSEventSubscriptionARN(customerAwsId, subscriptionId, partition, region string) (string, error) { + if partition == "" { + return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS partition") + } + arn := fmt.Sprintf("arn:%s:rds:%s:%s:es:%s", partition, region, customerAwsId, subscriptionId) + return arn, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_instance.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_instance.go new file mode 100644 index 000000000..05621fb5d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_instance.go @@ -0,0 +1,1152 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/rds" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsDbInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDbInstanceCreate, + Read: resourceAwsDbInstanceRead, + Update: resourceAwsDbInstanceUpdate, + Delete: resourceAwsDbInstanceDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsDbInstanceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(40 * time.Minute), + Update: schema.DefaultTimeout(80 * time.Minute), + Delete: schema.DefaultTimeout(40 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "username": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, + + "engine": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + StateFunc: func(v interface{}) string { + value := v.(string) + return strings.ToLower(value) + }, + }, + + "engine_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + DiffSuppressFunc: suppressAwsDbEngineVersionDiffs, + }, + + "character_set_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "storage_encrypted": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + + "allocated_storage": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "storage_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "identifier": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"identifier_prefix"}, + ValidateFunc: validateRdsIdentifier, + }, + "identifier_prefix": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateRdsIdentifierPrefix, + }, + + "instance_class": { + Type: schema.TypeString, + Required: true, + }, + + "availability_zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "backup_retention_period": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "backup_window": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateOnceADayWindowFormat, + }, + + "iops": { + Type: schema.TypeInt, + Optional: true, + }, + + "license_model": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "maintenance_window": { + Type: schema.TypeString, + Optional: true, + Computed: true, + StateFunc: func(v interface{}) string { + if v != nil { + value := v.(string) + return strings.ToLower(value) + } + return "" + }, + ValidateFunc: validateOnceAWeekWindowFormat, + }, + + "multi_az": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "port": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "publicly_accessible": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "vpc_security_group_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "security_group_names": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "final_snapshot_identifier": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) { + es = append(es, fmt.Errorf( + "only alphanumeric characters and hyphens allowed in %q", k)) + } + if regexp.MustCompile(`--`).MatchString(value) { + es = append(es, fmt.Errorf("%q cannot contain two consecutive hyphens", k)) + } + if regexp.MustCompile(`-$`).MatchString(value) { + es = append(es, fmt.Errorf("%q cannot end in a hyphen", k)) + } + return + }, + }, + + "skip_final_snapshot": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "copy_tags_to_snapshot": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "db_subnet_group_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "parameter_group_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "address": { + Type: schema.TypeString, + Computed: true, + }, + + "endpoint": { + Type: schema.TypeString, + Computed: true, + }, + + "hosted_zone_id": { + Type: schema.TypeString, + Computed: true, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + }, + + // apply_immediately is used to determine when the update modifications + // take place. + // See http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html + "apply_immediately": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "replicate_source_db": { + Type: schema.TypeString, + Optional: true, + }, + + "replicas": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "snapshot_identifier": { + Type: schema.TypeString, + Computed: false, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "auto_minor_version_upgrade": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "allow_major_version_upgrade": { + Type: schema.TypeBool, + Computed: false, + Optional: true, + }, + + "monitoring_role_arn": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "monitoring_interval": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + }, + + "option_group_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + + "timezone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "iam_database_authentication_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + + "resource_id": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) + + var identifier string + if v, ok := d.GetOk("identifier"); ok { + identifier = v.(string) + } else { + if v, ok := d.GetOk("identifier_prefix"); ok { + identifier = resource.PrefixedUniqueId(v.(string)) + } else { + identifier = resource.UniqueId() + } + + // SQL Server identifier size is max 15 chars, so truncate + if engine := d.Get("engine").(string); engine != "" { + if strings.Contains(strings.ToLower(engine), "sqlserver") { + identifier = identifier[:15] + } + } + d.Set("identifier", identifier) + } + + if v, ok := d.GetOk("replicate_source_db"); ok { + opts := rds.CreateDBInstanceReadReplicaInput{ + SourceDBInstanceIdentifier: aws.String(v.(string)), + CopyTagsToSnapshot: aws.Bool(d.Get("copy_tags_to_snapshot").(bool)), + DBInstanceClass: aws.String(d.Get("instance_class").(string)), + DBInstanceIdentifier: aws.String(identifier), + PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), + Tags: tags, + } + if attr, ok := d.GetOk("iops"); ok { + opts.Iops = aws.Int64(int64(attr.(int))) + } + + if attr, ok := d.GetOk("port"); ok { + opts.Port = aws.Int64(int64(attr.(int))) + } + + if attr, ok := d.GetOk("availability_zone"); ok { + opts.AvailabilityZone = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("storage_type"); ok { + opts.StorageType = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("db_subnet_group_name"); ok { + opts.DBSubnetGroupName = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("monitoring_role_arn"); ok { + opts.MonitoringRoleArn = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("monitoring_interval"); ok { + opts.MonitoringInterval = aws.Int64(int64(attr.(int))) + } + + if attr, ok := d.GetOk("option_group_name"); ok { + opts.OptionGroupName = aws.String(attr.(string)) + } + + log.Printf("[DEBUG] DB Instance Replica create configuration: %#v", opts) + _, err := conn.CreateDBInstanceReadReplica(&opts) + if err != nil { + return fmt.Errorf("Error creating DB Instance: %s", err) + } + } else if _, ok := d.GetOk("snapshot_identifier"); ok { + opts := rds.RestoreDBInstanceFromDBSnapshotInput{ + DBInstanceClass: aws.String(d.Get("instance_class").(string)), + DBInstanceIdentifier: aws.String(d.Get("identifier").(string)), + DBSnapshotIdentifier: aws.String(d.Get("snapshot_identifier").(string)), + AutoMinorVersionUpgrade: aws.Bool(d.Get("auto_minor_version_upgrade").(bool)), + PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), + Tags: tags, + CopyTagsToSnapshot: aws.Bool(d.Get("copy_tags_to_snapshot").(bool)), + } + + if attr, ok := d.GetOk("name"); ok { + // "Note: This parameter [DBName] doesn't apply to the MySQL, PostgreSQL, or MariaDB engines." + // https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_RestoreDBInstanceFromDBSnapshot.html + switch strings.ToLower(d.Get("engine").(string)) { + case "mysql", "postgres", "mariadb": + // skip + default: + opts.DBName = aws.String(attr.(string)) + } + } + + if attr, ok := d.GetOk("availability_zone"); ok { + opts.AvailabilityZone = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("db_subnet_group_name"); ok { + opts.DBSubnetGroupName = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("engine"); ok { + opts.Engine = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("iops"); ok { + opts.Iops = aws.Int64(int64(attr.(int))) + } + + if attr, ok := d.GetOk("license_model"); ok { + opts.LicenseModel = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("multi_az"); ok { + opts.MultiAZ = aws.Bool(attr.(bool)) + } + + if attr, ok := d.GetOk("option_group_name"); ok { + opts.OptionGroupName = aws.String(attr.(string)) + + } + + if attr, ok := d.GetOk("port"); ok { + opts.Port = aws.Int64(int64(attr.(int))) + } + + if attr, ok := d.GetOk("tde_credential_arn"); ok { + opts.TdeCredentialArn = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("storage_type"); ok { + opts.StorageType = aws.String(attr.(string)) + } + + log.Printf("[DEBUG] DB Instance restore from snapshot configuration: %s", opts) + _, err := conn.RestoreDBInstanceFromDBSnapshot(&opts) + if err != nil { + return fmt.Errorf("Error creating DB Instance: %s", err) + } + + var sgUpdate bool + var passwordUpdate bool + + if _, ok := d.GetOk("password"); ok { + passwordUpdate = true + } + + if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { + sgUpdate = true + } + if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 { + sgUpdate = true + } + if sgUpdate || passwordUpdate { + log.Printf("[INFO] DB is restoring from snapshot with default security, but custom security should be set, will now update after snapshot is restored!") + + // wait for instance to get up and then modify security + d.SetId(d.Get("identifier").(string)) + + log.Printf("[INFO] DB Instance ID: %s", d.Id()) + + log.Println( + "[INFO] Waiting for DB Instance to be available") + + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "backing-up", "modifying", "resetting-master-credentials", + "maintenance", "renaming", "rebooting", "upgrading"}, + Target: []string{"available"}, + Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutCreate), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + + // Wait, catching any errors + _, err := stateConf.WaitForState() + if err != nil { + return err + } + + err = resourceAwsDbInstanceUpdate(d, meta) + if err != nil { + return err + } + + } + } else { + if _, ok := d.GetOk("allocated_storage"); !ok { + return fmt.Errorf(`provider.aws: aws_db_instance: %s: "allocated_storage": required field is not set`, d.Get("name").(string)) + } + if _, ok := d.GetOk("engine"); !ok { + return fmt.Errorf(`provider.aws: aws_db_instance: %s: "engine": required field is not set`, d.Get("name").(string)) + } + if _, ok := d.GetOk("password"); !ok { + return fmt.Errorf(`provider.aws: aws_db_instance: %s: "password": required field is not set`, d.Get("name").(string)) + } + if _, ok := d.GetOk("username"); !ok { + return fmt.Errorf(`provider.aws: aws_db_instance: %s: "username": required field is not set`, d.Get("name").(string)) + } + opts := rds.CreateDBInstanceInput{ + AllocatedStorage: aws.Int64(int64(d.Get("allocated_storage").(int))), + DBName: aws.String(d.Get("name").(string)), + DBInstanceClass: aws.String(d.Get("instance_class").(string)), + DBInstanceIdentifier: aws.String(d.Get("identifier").(string)), + MasterUsername: aws.String(d.Get("username").(string)), + MasterUserPassword: aws.String(d.Get("password").(string)), + Engine: aws.String(d.Get("engine").(string)), + EngineVersion: aws.String(d.Get("engine_version").(string)), + StorageEncrypted: aws.Bool(d.Get("storage_encrypted").(bool)), + AutoMinorVersionUpgrade: aws.Bool(d.Get("auto_minor_version_upgrade").(bool)), + PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), + Tags: tags, + CopyTagsToSnapshot: aws.Bool(d.Get("copy_tags_to_snapshot").(bool)), + } + + attr := d.Get("backup_retention_period") + opts.BackupRetentionPeriod = aws.Int64(int64(attr.(int))) + if attr, ok := d.GetOk("multi_az"); ok { + opts.MultiAZ = aws.Bool(attr.(bool)) + + } + + if attr, ok := d.GetOk("character_set_name"); ok { + opts.CharacterSetName = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("timezone"); ok { + opts.Timezone = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("maintenance_window"); ok { + opts.PreferredMaintenanceWindow = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("backup_window"); ok { + opts.PreferredBackupWindow = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("license_model"); ok { + opts.LicenseModel = aws.String(attr.(string)) + } + if attr, ok := d.GetOk("parameter_group_name"); ok { + opts.DBParameterGroupName = aws.String(attr.(string)) + } + + if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { + var s []*string + for _, v := range attr.List() { + s = append(s, aws.String(v.(string))) + } + opts.VpcSecurityGroupIds = s + } + + if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 { + var s []*string + for _, v := range attr.List() { + s = append(s, aws.String(v.(string))) + } + opts.DBSecurityGroups = s + } + if attr, ok := d.GetOk("storage_type"); ok { + opts.StorageType = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("db_subnet_group_name"); ok { + opts.DBSubnetGroupName = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("iops"); ok { + opts.Iops = aws.Int64(int64(attr.(int))) + } + + if attr, ok := d.GetOk("port"); ok { + opts.Port = aws.Int64(int64(attr.(int))) + } + + if attr, ok := d.GetOk("availability_zone"); ok { + opts.AvailabilityZone = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("monitoring_role_arn"); ok { + opts.MonitoringRoleArn = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("monitoring_interval"); ok { + opts.MonitoringInterval = aws.Int64(int64(attr.(int))) + } + + if attr, ok := d.GetOk("option_group_name"); ok { + opts.OptionGroupName = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("kms_key_id"); ok { + opts.KmsKeyId = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("iam_database_authentication_enabled"); ok { + opts.EnableIAMDatabaseAuthentication = aws.Bool(attr.(bool)) + } + + log.Printf("[DEBUG] DB Instance create configuration: %#v", opts) + var err error + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err = conn.CreateDBInstance(&opts) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "InvalidParameterValue" && strings.Contains(awsErr.Message(), "ENHANCED_MONITORING") { + return resource.RetryableError(awsErr) + } + } + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return fmt.Errorf("Error creating DB Instance: %s", err) + } + } + + d.SetId(d.Get("identifier").(string)) + + log.Printf("[INFO] DB Instance ID: %s", d.Id()) + + log.Println( + "[INFO] Waiting for DB Instance to be available") + + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "backing-up", "modifying", "resetting-master-credentials", + "maintenance", "renaming", "rebooting", "upgrading", "configuring-enhanced-monitoring"}, + Target: []string{"available"}, + Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutCreate), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + + // Wait, catching any errors + _, err := stateConf.WaitForState() + if err != nil { + return err + } + + return resourceAwsDbInstanceRead(d, meta) +} + +func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error { + v, err := resourceAwsDbInstanceRetrieve(d, meta) + + if err != nil { + return err + } + if v == nil { + d.SetId("") + return nil + } + + d.Set("name", v.DBName) + d.Set("identifier", v.DBInstanceIdentifier) + d.Set("resource_id", v.DbiResourceId) + d.Set("username", v.MasterUsername) + d.Set("engine", v.Engine) + d.Set("engine_version", v.EngineVersion) + d.Set("allocated_storage", v.AllocatedStorage) + d.Set("iops", v.Iops) + d.Set("copy_tags_to_snapshot", v.CopyTagsToSnapshot) + d.Set("auto_minor_version_upgrade", v.AutoMinorVersionUpgrade) + d.Set("storage_type", v.StorageType) + d.Set("instance_class", v.DBInstanceClass) + d.Set("availability_zone", v.AvailabilityZone) + d.Set("backup_retention_period", v.BackupRetentionPeriod) + d.Set("backup_window", v.PreferredBackupWindow) + d.Set("license_model", v.LicenseModel) + d.Set("maintenance_window", v.PreferredMaintenanceWindow) + d.Set("publicly_accessible", v.PubliclyAccessible) + d.Set("multi_az", v.MultiAZ) + d.Set("kms_key_id", v.KmsKeyId) + d.Set("port", v.DbInstancePort) + d.Set("iam_database_authentication_enabled", v.IAMDatabaseAuthenticationEnabled) + if v.DBSubnetGroup != nil { + d.Set("db_subnet_group_name", v.DBSubnetGroup.DBSubnetGroupName) + } + + if v.CharacterSetName != nil { + d.Set("character_set_name", v.CharacterSetName) + } + + d.Set("timezone", v.Timezone) + + if len(v.DBParameterGroups) > 0 { + d.Set("parameter_group_name", v.DBParameterGroups[0].DBParameterGroupName) + } + + if v.Endpoint != nil { + d.Set("port", v.Endpoint.Port) + d.Set("address", v.Endpoint.Address) + d.Set("hosted_zone_id", v.Endpoint.HostedZoneId) + if v.Endpoint.Address != nil && v.Endpoint.Port != nil { + d.Set("endpoint", + fmt.Sprintf("%s:%d", *v.Endpoint.Address, *v.Endpoint.Port)) + } + } + + d.Set("status", v.DBInstanceStatus) + d.Set("storage_encrypted", v.StorageEncrypted) + if v.OptionGroupMemberships != nil { + d.Set("option_group_name", v.OptionGroupMemberships[0].OptionGroupName) + } + + if v.MonitoringInterval != nil { + d.Set("monitoring_interval", v.MonitoringInterval) + } + + if v.MonitoringRoleArn != nil { + d.Set("monitoring_role_arn", v.MonitoringRoleArn) + } + + // list tags for resource + // set tags + conn := meta.(*AWSClient).rdsconn + arn, err := buildRDSARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) + if err != nil { + name := "" + if v.DBName != nil && *v.DBName != "" { + name = *v.DBName + } + log.Printf("[DEBUG] Error building ARN for DB Instance, not setting Tags for DB %s", name) + } else { + d.Set("arn", arn) + resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ + ResourceName: aws.String(arn), + }) + + if err != nil { + log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn) + } + + var dt []*rds.Tag + if len(resp.TagList) > 0 { + dt = resp.TagList + } + d.Set("tags", tagsToMapRDS(dt)) + } + + // Create an empty schema.Set to hold all vpc security group ids + ids := &schema.Set{ + F: schema.HashString, + } + for _, v := range v.VpcSecurityGroups { + ids.Add(*v.VpcSecurityGroupId) + } + d.Set("vpc_security_group_ids", ids) + + // Create an empty schema.Set to hold all security group names + sgn := &schema.Set{ + F: schema.HashString, + } + for _, v := range v.DBSecurityGroups { + sgn.Add(*v.DBSecurityGroupName) + } + d.Set("security_group_names", sgn) + + // replica things + + var replicas []string + for _, v := range v.ReadReplicaDBInstanceIdentifiers { + replicas = append(replicas, *v) + } + if err := d.Set("replicas", replicas); err != nil { + return fmt.Errorf("[DEBUG] Error setting replicas attribute: %#v, error: %#v", replicas, err) + } + + d.Set("replicate_source_db", v.ReadReplicaSourceDBInstanceIdentifier) + + return nil +} + +func resourceAwsDbInstanceDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + + log.Printf("[DEBUG] DB Instance destroy: %v", d.Id()) + + opts := rds.DeleteDBInstanceInput{DBInstanceIdentifier: aws.String(d.Id())} + + skipFinalSnapshot := d.Get("skip_final_snapshot").(bool) + opts.SkipFinalSnapshot = aws.Bool(skipFinalSnapshot) + + if skipFinalSnapshot == false { + if name, present := d.GetOk("final_snapshot_identifier"); present { + opts.FinalDBSnapshotIdentifier = aws.String(name.(string)) + } else { + return fmt.Errorf("DB Instance FinalSnapshotIdentifier is required when a final snapshot is required") + } + } + + log.Printf("[DEBUG] DB Instance destroy configuration: %v", opts) + if _, err := conn.DeleteDBInstance(&opts); err != nil { + return err + } + + log.Println( + "[INFO] Waiting for DB Instance to be destroyed") + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "backing-up", + "modifying", "deleting", "available"}, + Target: []string{}, + Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutDelete), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + if _, err := stateConf.WaitForState(); err != nil { + return err + } + + return nil +} + +func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + + d.Partial(true) + + req := &rds.ModifyDBInstanceInput{ + ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), + DBInstanceIdentifier: aws.String(d.Id()), + } + d.SetPartial("apply_immediately") + + if !d.Get("apply_immediately").(bool) { + log.Println("[INFO] Only settings updating, instance changes will be applied in next maintenance window") + } + + requestUpdate := false + if d.HasChange("allocated_storage") || d.HasChange("iops") { + d.SetPartial("allocated_storage") + d.SetPartial("iops") + req.Iops = aws.Int64(int64(d.Get("iops").(int))) + req.AllocatedStorage = aws.Int64(int64(d.Get("allocated_storage").(int))) + requestUpdate = true + } + if d.HasChange("allow_major_version_upgrade") { + d.SetPartial("allow_major_version_upgrade") + req.AllowMajorVersionUpgrade = aws.Bool(d.Get("allow_major_version_upgrade").(bool)) + requestUpdate = true + } + if d.HasChange("backup_retention_period") { + d.SetPartial("backup_retention_period") + req.BackupRetentionPeriod = aws.Int64(int64(d.Get("backup_retention_period").(int))) + requestUpdate = true + } + if d.HasChange("copy_tags_to_snapshot") { + d.SetPartial("copy_tags_to_snapshot") + req.CopyTagsToSnapshot = aws.Bool(d.Get("copy_tags_to_snapshot").(bool)) + requestUpdate = true + } + if d.HasChange("instance_class") { + d.SetPartial("instance_class") + req.DBInstanceClass = aws.String(d.Get("instance_class").(string)) + requestUpdate = true + } + if d.HasChange("parameter_group_name") { + d.SetPartial("parameter_group_name") + req.DBParameterGroupName = aws.String(d.Get("parameter_group_name").(string)) + requestUpdate = true + } + if d.HasChange("engine_version") { + d.SetPartial("engine_version") + req.EngineVersion = aws.String(d.Get("engine_version").(string)) + req.AllowMajorVersionUpgrade = aws.Bool(d.Get("allow_major_version_upgrade").(bool)) + requestUpdate = true + } + if d.HasChange("backup_window") { + d.SetPartial("backup_window") + req.PreferredBackupWindow = aws.String(d.Get("backup_window").(string)) + requestUpdate = true + } + if d.HasChange("maintenance_window") { + d.SetPartial("maintenance_window") + req.PreferredMaintenanceWindow = aws.String(d.Get("maintenance_window").(string)) + requestUpdate = true + } + if d.HasChange("password") { + d.SetPartial("password") + req.MasterUserPassword = aws.String(d.Get("password").(string)) + requestUpdate = true + } + if d.HasChange("multi_az") { + d.SetPartial("multi_az") + req.MultiAZ = aws.Bool(d.Get("multi_az").(bool)) + requestUpdate = true + } + if d.HasChange("publicly_accessible") { + d.SetPartial("publicly_accessible") + req.PubliclyAccessible = aws.Bool(d.Get("publicly_accessible").(bool)) + requestUpdate = true + } + if d.HasChange("storage_type") { + d.SetPartial("storage_type") + req.StorageType = aws.String(d.Get("storage_type").(string)) + requestUpdate = true + + if *req.StorageType == "io1" { + req.Iops = aws.Int64(int64(d.Get("iops").(int))) + } + } + if d.HasChange("auto_minor_version_upgrade") { + d.SetPartial("auto_minor_version_upgrade") + req.AutoMinorVersionUpgrade = aws.Bool(d.Get("auto_minor_version_upgrade").(bool)) + requestUpdate = true + } + + if d.HasChange("monitoring_role_arn") { + d.SetPartial("monitoring_role_arn") + req.MonitoringRoleArn = aws.String(d.Get("monitoring_role_arn").(string)) + requestUpdate = true + } + + if d.HasChange("monitoring_interval") { + d.SetPartial("monitoring_interval") + req.MonitoringInterval = aws.Int64(int64(d.Get("monitoring_interval").(int))) + requestUpdate = true + } + + if d.HasChange("vpc_security_group_ids") { + if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { + var s []*string + for _, v := range attr.List() { + s = append(s, aws.String(v.(string))) + } + req.VpcSecurityGroupIds = s + } + requestUpdate = true + } + + if d.HasChange("security_group_names") { + if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 { + var s []*string + for _, v := range attr.List() { + s = append(s, aws.String(v.(string))) + } + req.DBSecurityGroups = s + } + requestUpdate = true + } + + if d.HasChange("option_group_name") { + d.SetPartial("option_group_name") + req.OptionGroupName = aws.String(d.Get("option_group_name").(string)) + requestUpdate = true + } + + if d.HasChange("port") { + d.SetPartial("port") + req.DBPortNumber = aws.Int64(int64(d.Get("port").(int))) + requestUpdate = true + } + if d.HasChange("db_subnet_group_name") && !d.IsNewResource() { + d.SetPartial("db_subnet_group_name") + req.DBSubnetGroupName = aws.String(d.Get("db_subnet_group_name").(string)) + requestUpdate = true + } + + if d.HasChange("iam_database_authentication_enabled") { + req.EnableIAMDatabaseAuthentication = aws.Bool(d.Get("iam_database_authentication_enabled").(bool)) + requestUpdate = true + } + + log.Printf("[DEBUG] Send DB Instance Modification request: %t", requestUpdate) + if requestUpdate { + log.Printf("[DEBUG] DB Instance Modification request: %s", req) + _, err := conn.ModifyDBInstance(req) + if err != nil { + return fmt.Errorf("Error modifying DB Instance %s: %s", d.Id(), err) + } + + log.Println("[INFO] Waiting for DB Instance to be available") + + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "backing-up", "modifying", "resetting-master-credentials", + "maintenance", "renaming", "rebooting", "upgrading", "configuring-enhanced-monitoring", "moving-to-vpc"}, + Target: []string{"available"}, + Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutUpdate), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + + // Wait, catching any errors + _, dbStateErr := stateConf.WaitForState() + if dbStateErr != nil { + return dbStateErr + } + } + + // separate request to promote a database + if d.HasChange("replicate_source_db") { + if d.Get("replicate_source_db").(string) == "" { + // promote + opts := rds.PromoteReadReplicaInput{ + DBInstanceIdentifier: aws.String(d.Id()), + } + attr := d.Get("backup_retention_period") + opts.BackupRetentionPeriod = aws.Int64(int64(attr.(int))) + if attr, ok := d.GetOk("backup_window"); ok { + opts.PreferredBackupWindow = aws.String(attr.(string)) + } + _, err := conn.PromoteReadReplica(&opts) + if err != nil { + return fmt.Errorf("Error promoting database: %#v", err) + } + d.Set("replicate_source_db", "") + } else { + return fmt.Errorf("cannot elect new source database for replication") + } + } + + if arn, err := buildRDSARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { + if err := setTagsRDS(conn, d, arn); err != nil { + return err + } else { + d.SetPartial("tags") + } + } + d.Partial(false) + + return resourceAwsDbInstanceRead(d, meta) +} + +// resourceAwsDbInstanceRetrieve fetches DBInstance information from the AWS +// API. It returns an error if there is a communication problem or unexpected +// error with AWS. When the DBInstance is not found, it returns no error and a +// nil pointer. +func resourceAwsDbInstanceRetrieve( + d *schema.ResourceData, meta interface{}) (*rds.DBInstance, error) { + conn := meta.(*AWSClient).rdsconn + + opts := rds.DescribeDBInstancesInput{ + DBInstanceIdentifier: aws.String(d.Id()), + } + + log.Printf("[DEBUG] DB Instance describe configuration: %#v", opts) + + resp, err := conn.DescribeDBInstances(&opts) + if err != nil { + dbinstanceerr, ok := err.(awserr.Error) + if ok && dbinstanceerr.Code() == "DBInstanceNotFound" { + return nil, nil + } + return nil, fmt.Errorf("Error retrieving DB Instances: %s", err) + } + + if len(resp.DBInstances) != 1 || + *resp.DBInstances[0].DBInstanceIdentifier != d.Id() { + if err != nil { + return nil, nil + } + } + + return resp.DBInstances[0], nil +} + +func resourceAwsDbInstanceImport( + d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + // Neither skip_final_snapshot nor final_snapshot_identifier can be fetched + // from any API call, so we need to default skip_final_snapshot to true so + // that final_snapshot_identifier is not required + d.Set("skip_final_snapshot", true) + return []*schema.ResourceData{d}, nil +} + +func resourceAwsDbInstanceStateRefreshFunc( + d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + v, err := resourceAwsDbInstanceRetrieve(d, meta) + + if err != nil { + log.Printf("Error on retrieving DB Instance when waiting: %s", err) + return nil, "", err + } + + if v == nil { + return nil, "", nil + } + + if v.DBInstanceStatus != nil { + log.Printf("[DEBUG] DB Instance status for instance %s: %s", d.Id(), *v.DBInstanceStatus) + } + + return v, *v.DBInstanceStatus, nil + } +} + +func buildRDSARN(identifier, partition, accountid, region string) (string, error) { + if partition == "" { + return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS partition") + } + if accountid == "" { + return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS Account ID") + } + arn := fmt.Sprintf("arn:%s:rds:%s:%s:db:%s", partition, region, accountid, identifier) + return arn, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_option_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_option_group.go new file mode 100644 index 000000000..258572bb2 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_option_group.go @@ -0,0 +1,368 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsDbOptionGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDbOptionGroupCreate, + Read: resourceAwsDbOptionGroupRead, + Update: resourceAwsDbOptionGroupUpdate, + Delete: resourceAwsDbOptionGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Delete: schema.DefaultTimeout(15 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: validateDbOptionGroupName, + }, + "name_prefix": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateDbOptionGroupNamePrefix, + }, + "engine_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "major_engine_version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "option_group_description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "Managed by Terraform", + }, + + "option": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "option_name": { + Type: schema.TypeString, + Required: true, + }, + "option_settings": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "port": { + Type: schema.TypeInt, + Optional: true, + }, + "db_security_group_memberships": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "vpc_security_group_memberships": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + }, + Set: resourceAwsDbOptionHash, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsDbOptionGroupCreate(d *schema.ResourceData, meta interface{}) error { + rdsconn := meta.(*AWSClient).rdsconn + tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) + + var groupName string + if v, ok := d.GetOk("name"); ok { + groupName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + groupName = resource.PrefixedUniqueId(v.(string)) + } else { + groupName = resource.UniqueId() + } + + createOpts := &rds.CreateOptionGroupInput{ + EngineName: aws.String(d.Get("engine_name").(string)), + MajorEngineVersion: aws.String(d.Get("major_engine_version").(string)), + OptionGroupDescription: aws.String(d.Get("option_group_description").(string)), + OptionGroupName: aws.String(groupName), + Tags: tags, + } + + log.Printf("[DEBUG] Create DB Option Group: %#v", createOpts) + _, err := rdsconn.CreateOptionGroup(createOpts) + if err != nil { + return fmt.Errorf("Error creating DB Option Group: %s", err) + } + + d.SetId(strings.ToLower(groupName)) + log.Printf("[INFO] DB Option Group ID: %s", d.Id()) + + return resourceAwsDbOptionGroupUpdate(d, meta) +} + +func resourceAwsDbOptionGroupRead(d *schema.ResourceData, meta interface{}) error { + rdsconn := meta.(*AWSClient).rdsconn + params := &rds.DescribeOptionGroupsInput{ + OptionGroupName: aws.String(d.Id()), + } + + log.Printf("[DEBUG] Describe DB Option Group: %#v", params) + options, err := rdsconn.DescribeOptionGroups(params) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if "OptionGroupNotFoundFault" == awsErr.Code() { + d.SetId("") + log.Printf("[DEBUG] DB Option Group (%s) not found", d.Get("name").(string)) + return nil + } + } + return fmt.Errorf("Error Describing DB Option Group: %s", err) + } + + var option *rds.OptionGroup + for _, ogl := range options.OptionGroupsList { + if *ogl.OptionGroupName == d.Id() { + option = ogl + break + } + } + + if option == nil { + return fmt.Errorf("Unable to find Option Group: %#v", options.OptionGroupsList) + } + + d.Set("name", option.OptionGroupName) + d.Set("major_engine_version", option.MajorEngineVersion) + d.Set("engine_name", option.EngineName) + d.Set("option_group_description", option.OptionGroupDescription) + if len(option.Options) != 0 { + d.Set("option", flattenOptions(option.Options)) + } + + optionGroup := options.OptionGroupsList[0] + arn, err := buildRDSOptionGroupARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) + if err != nil { + name := "" + if optionGroup.OptionGroupName != nil && *optionGroup.OptionGroupName != "" { + name = *optionGroup.OptionGroupName + } + log.Printf("[DEBUG] Error building ARN for DB Option Group, not setting Tags for Option Group %s", name) + } else { + d.Set("arn", arn) + resp, err := rdsconn.ListTagsForResource(&rds.ListTagsForResourceInput{ + ResourceName: aws.String(arn), + }) + + if err != nil { + log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn) + } + + var dt []*rds.Tag + if len(resp.TagList) > 0 { + dt = resp.TagList + } + d.Set("tags", tagsToMapRDS(dt)) + } + + return nil +} + +func optionInList(optionName string, list []*string) bool { + for _, opt := range list { + if *opt == optionName { + return true + } + } + return false +} + +func resourceAwsDbOptionGroupUpdate(d *schema.ResourceData, meta interface{}) error { + rdsconn := meta.(*AWSClient).rdsconn + if d.HasChange("option") { + o, n := d.GetChange("option") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + addOptions, addErr := expandOptionConfiguration(ns.Difference(os).List()) + if addErr != nil { + return addErr + } + + addingOptionNames, err := flattenOptionNames(ns.Difference(os).List()) + if err != nil { + return err + } + + removeOptions := []*string{} + opts, err := flattenOptionNames(os.Difference(ns).List()) + if err != nil { + return err + } + + for _, optionName := range opts { + if optionInList(*optionName, addingOptionNames) { + continue + } + removeOptions = append(removeOptions, optionName) + } + + modifyOpts := &rds.ModifyOptionGroupInput{ + OptionGroupName: aws.String(d.Id()), + ApplyImmediately: aws.Bool(true), + } + + if len(addOptions) > 0 { + modifyOpts.OptionsToInclude = addOptions + } + + if len(removeOptions) > 0 { + modifyOpts.OptionsToRemove = removeOptions + } + + log.Printf("[DEBUG] Modify DB Option Group: %s", modifyOpts) + _, err = rdsconn.ModifyOptionGroup(modifyOpts) + if err != nil { + return fmt.Errorf("Error modifying DB Option Group: %s", err) + } + d.SetPartial("option") + + } + + if arn, err := buildRDSOptionGroupARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { + if err := setTagsRDS(rdsconn, d, arn); err != nil { + return err + } else { + d.SetPartial("tags") + } + } + + return resourceAwsDbOptionGroupRead(d, meta) +} + +func resourceAwsDbOptionGroupDelete(d *schema.ResourceData, meta interface{}) error { + rdsconn := meta.(*AWSClient).rdsconn + + deleteOpts := &rds.DeleteOptionGroupInput{ + OptionGroupName: aws.String(d.Id()), + } + + log.Printf("[DEBUG] Delete DB Option Group: %#v", deleteOpts) + ret := resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { + _, err := rdsconn.DeleteOptionGroup(deleteOpts) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "InvalidOptionGroupStateFault" { + log.Printf("[DEBUG] AWS believes the RDS Option Group is still in use, retrying") + return resource.RetryableError(awsErr) + } + } + return resource.NonRetryableError(err) + } + return nil + }) + if ret != nil { + return fmt.Errorf("Error Deleting DB Option Group: %s", ret) + } + return nil +} + +func flattenOptionNames(configured []interface{}) ([]*string, error) { + var optionNames []*string + for _, pRaw := range configured { + data := pRaw.(map[string]interface{}) + optionNames = append(optionNames, aws.String(data["option_name"].(string))) + } + + return optionNames, nil +} + +func resourceAwsDbOptionHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["option_name"].(string))) + if _, ok := m["port"]; ok { + buf.WriteString(fmt.Sprintf("%d-", m["port"].(int))) + } + + for _, oRaw := range m["option_settings"].(*schema.Set).List() { + o := oRaw.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", o["name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", o["value"].(string))) + } + + for _, vpcRaw := range m["vpc_security_group_memberships"].(*schema.Set).List() { + buf.WriteString(fmt.Sprintf("%s-", vpcRaw.(string))) + } + + for _, sgRaw := range m["db_security_group_memberships"].(*schema.Set).List() { + buf.WriteString(fmt.Sprintf("%s-", sgRaw.(string))) + } + return hashcode.String(buf.String()) +} + +func buildRDSOptionGroupARN(identifier, partition, accountid, region string) (string, error) { + if partition == "" { + return "", fmt.Errorf("Unable to construct RDS Option Group ARN because of missing AWS partition") + } + if accountid == "" { + return "", fmt.Errorf("Unable to construct RDS Option Group ARN because of missing AWS Account ID") + } + arn := fmt.Sprintf("arn:%s:rds:%s:%s:og:%s", partition, region, accountid, identifier) + return arn, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_parameter_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_parameter_group.go new file mode 100644 index 000000000..fe935b636 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_parameter_group.go @@ -0,0 +1,293 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/rds" +) + +func resourceAwsDbParameterGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDbParameterGroupCreate, + Read: resourceAwsDbParameterGroupRead, + Update: resourceAwsDbParameterGroupUpdate, + Delete: resourceAwsDbParameterGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: validateDbParamGroupName, + }, + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateDbParamGroupNamePrefix, + }, + "family": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "Managed by Terraform", + }, + "parameter": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: false, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "apply_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "immediate", + }, + }, + }, + Set: resourceAwsDbParameterHash, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsDbParameterGroupCreate(d *schema.ResourceData, meta interface{}) error { + rdsconn := meta.(*AWSClient).rdsconn + tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) + + var groupName string + if v, ok := d.GetOk("name"); ok { + groupName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + groupName = resource.PrefixedUniqueId(v.(string)) + } else { + groupName = resource.UniqueId() + } + d.Set("name", groupName) + + createOpts := rds.CreateDBParameterGroupInput{ + DBParameterGroupName: aws.String(groupName), + DBParameterGroupFamily: aws.String(d.Get("family").(string)), + Description: aws.String(d.Get("description").(string)), + Tags: tags, + } + + log.Printf("[DEBUG] Create DB Parameter Group: %#v", createOpts) + _, err := rdsconn.CreateDBParameterGroup(&createOpts) + if err != nil { + return fmt.Errorf("Error creating DB Parameter Group: %s", err) + } + + d.Partial(true) + d.SetPartial("name") + d.SetPartial("family") + d.SetPartial("description") + d.Partial(false) + + d.SetId(*createOpts.DBParameterGroupName) + log.Printf("[INFO] DB Parameter Group ID: %s", d.Id()) + + return resourceAwsDbParameterGroupUpdate(d, meta) +} + +func resourceAwsDbParameterGroupRead(d *schema.ResourceData, meta interface{}) error { + rdsconn := meta.(*AWSClient).rdsconn + + describeOpts := rds.DescribeDBParameterGroupsInput{ + DBParameterGroupName: aws.String(d.Id()), + } + + describeResp, err := rdsconn.DescribeDBParameterGroups(&describeOpts) + if err != nil { + return err + } + + if len(describeResp.DBParameterGroups) != 1 || + *describeResp.DBParameterGroups[0].DBParameterGroupName != d.Id() { + return fmt.Errorf("Unable to find Parameter Group: %#v", describeResp.DBParameterGroups) + } + + d.Set("name", describeResp.DBParameterGroups[0].DBParameterGroupName) + d.Set("family", describeResp.DBParameterGroups[0].DBParameterGroupFamily) + d.Set("description", describeResp.DBParameterGroups[0].Description) + + // Only include user customized parameters as there's hundreds of system/default ones + describeParametersOpts := rds.DescribeDBParametersInput{ + DBParameterGroupName: aws.String(d.Id()), + Source: aws.String("user"), + } + + describeParametersResp, err := rdsconn.DescribeDBParameters(&describeParametersOpts) + if err != nil { + return err + } + + d.Set("parameter", flattenParameters(describeParametersResp.Parameters)) + + paramGroup := describeResp.DBParameterGroups[0] + arn, err := buildRDSPGARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) + if err != nil { + name := "" + if paramGroup.DBParameterGroupName != nil && *paramGroup.DBParameterGroupName != "" { + name = *paramGroup.DBParameterGroupName + } + log.Printf("[DEBUG] Error building ARN for DB Parameter Group, not setting Tags for Param Group %s", name) + } else { + d.Set("arn", arn) + resp, err := rdsconn.ListTagsForResource(&rds.ListTagsForResourceInput{ + ResourceName: aws.String(arn), + }) + + if err != nil { + log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn) + } + + var dt []*rds.Tag + if len(resp.TagList) > 0 { + dt = resp.TagList + } + d.Set("tags", tagsToMapRDS(dt)) + } + + return nil +} + +func resourceAwsDbParameterGroupUpdate(d *schema.ResourceData, meta interface{}) error { + rdsconn := meta.(*AWSClient).rdsconn + + d.Partial(true) + + if d.HasChange("parameter") { + o, n := d.GetChange("parameter") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + // Expand the "parameter" set to aws-sdk-go compat []rds.Parameter + parameters, err := expandParameters(ns.Difference(os).List()) + if err != nil { + return err + } + + if len(parameters) > 0 { + // We can only modify 20 parameters at a time, so walk them until + // we've got them all. + maxParams := 20 + for parameters != nil { + paramsToModify := make([]*rds.Parameter, 0) + if len(parameters) <= maxParams { + paramsToModify, parameters = parameters[:], nil + } else { + paramsToModify, parameters = parameters[:maxParams], parameters[maxParams:] + } + modifyOpts := rds.ModifyDBParameterGroupInput{ + DBParameterGroupName: aws.String(d.Get("name").(string)), + Parameters: paramsToModify, + } + + log.Printf("[DEBUG] Modify DB Parameter Group: %s", modifyOpts) + _, err = rdsconn.ModifyDBParameterGroup(&modifyOpts) + if err != nil { + return fmt.Errorf("Error modifying DB Parameter Group: %s", err) + } + } + d.SetPartial("parameter") + } + } + + if arn, err := buildRDSPGARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { + if err := setTagsRDS(rdsconn, d, arn); err != nil { + return err + } else { + d.SetPartial("tags") + } + } + + d.Partial(false) + + return resourceAwsDbParameterGroupRead(d, meta) +} + +func resourceAwsDbParameterGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + return resource.Retry(3*time.Minute, func() *resource.RetryError { + deleteOpts := rds.DeleteDBParameterGroupInput{ + DBParameterGroupName: aws.String(d.Id()), + } + + _, err := conn.DeleteDBParameterGroup(&deleteOpts) + if err != nil { + awsErr, ok := err.(awserr.Error) + if ok && awsErr.Code() == "DBParameterGroupNotFoundFault" { + return resource.RetryableError(err) + } + if ok && awsErr.Code() == "InvalidDBParameterGroupState" { + return resource.RetryableError(err) + } + } + return resource.NonRetryableError(err) + }) +} + +func resourceAwsDbParameterHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + // Store the value as a lower case string, to match how we store them in flattenParameters + buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["value"].(string)))) + + return hashcode.String(buf.String()) +} + +func buildRDSPGARN(identifier, partition, accountid, region string) (string, error) { + if partition == "" { + return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS partition") + } + if accountid == "" { + return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS Account ID") + } + arn := fmt.Sprintf("arn:%s:rds:%s:%s:pg:%s", partition, region, accountid, identifier) + return arn, nil + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_security_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_security_group.go new file mode 100644 index 000000000..b9e73f2fb --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_security_group.go @@ -0,0 +1,434 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsDbSecurityGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDbSecurityGroupCreate, + Read: resourceAwsDbSecurityGroupRead, + Update: resourceAwsDbSecurityGroupUpdate, + Delete: resourceAwsDbSecurityGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "Managed by Terraform", + }, + + "ingress": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "security_group_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "security_group_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "security_group_owner_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + Set: resourceAwsDbSecurityGroupIngressHash, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsDbSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) + + var err error + var errs []error + + opts := rds.CreateDBSecurityGroupInput{ + DBSecurityGroupName: aws.String(d.Get("name").(string)), + DBSecurityGroupDescription: aws.String(d.Get("description").(string)), + Tags: tags, + } + + log.Printf("[DEBUG] DB Security Group create configuration: %#v", opts) + _, err = conn.CreateDBSecurityGroup(&opts) + if err != nil { + return fmt.Errorf("Error creating DB Security Group: %s", err) + } + + d.SetId(d.Get("name").(string)) + + log.Printf("[INFO] DB Security Group ID: %s", d.Id()) + + sg, err := resourceAwsDbSecurityGroupRetrieve(d, meta) + if err != nil { + return err + } + + ingresses := d.Get("ingress").(*schema.Set) + for _, ing := range ingresses.List() { + err := resourceAwsDbSecurityGroupAuthorizeRule(ing, *sg.DBSecurityGroupName, conn) + if err != nil { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return &multierror.Error{Errors: errs} + } + + log.Println( + "[INFO] Waiting for Ingress Authorizations to be authorized") + + stateConf := &resource.StateChangeConf{ + Pending: []string{"authorizing"}, + Target: []string{"authorized"}, + Refresh: resourceAwsDbSecurityGroupStateRefreshFunc(d, meta), + Timeout: 10 * time.Minute, + } + + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + return resourceAwsDbSecurityGroupRead(d, meta) +} + +func resourceAwsDbSecurityGroupRead(d *schema.ResourceData, meta interface{}) error { + sg, err := resourceAwsDbSecurityGroupRetrieve(d, meta) + if err != nil { + return err + } + + d.Set("name", *sg.DBSecurityGroupName) + d.Set("description", *sg.DBSecurityGroupDescription) + + // Create an empty schema.Set to hold all ingress rules + rules := &schema.Set{ + F: resourceAwsDbSecurityGroupIngressHash, + } + + for _, v := range sg.IPRanges { + rule := map[string]interface{}{"cidr": *v.CIDRIP} + rules.Add(rule) + } + + for _, g := range sg.EC2SecurityGroups { + rule := map[string]interface{}{} + if g.EC2SecurityGroupId != nil { + rule["security_group_id"] = *g.EC2SecurityGroupId + } + if g.EC2SecurityGroupName != nil { + rule["security_group_name"] = *g.EC2SecurityGroupName + } + if g.EC2SecurityGroupOwnerId != nil { + rule["security_group_owner_id"] = *g.EC2SecurityGroupOwnerId + } + rules.Add(rule) + } + + d.Set("ingress", rules) + + conn := meta.(*AWSClient).rdsconn + arn, err := buildRDSSecurityGroupARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) + if err != nil { + name := "" + if sg.DBSecurityGroupName != nil && *sg.DBSecurityGroupName != "" { + name = *sg.DBSecurityGroupName + } + log.Printf("[DEBUG] Error building ARN for DB Security Group, not setting Tags for DB Security Group %s", name) + } else { + d.Set("arn", arn) + resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ + ResourceName: aws.String(arn), + }) + + if err != nil { + log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn) + } + + var dt []*rds.Tag + if len(resp.TagList) > 0 { + dt = resp.TagList + } + d.Set("tags", tagsToMapRDS(dt)) + } + + return nil +} + +func resourceAwsDbSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + + d.Partial(true) + if arn, err := buildRDSSecurityGroupARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { + if err := setTagsRDS(conn, d, arn); err != nil { + return err + } else { + d.SetPartial("tags") + } + } + + if d.HasChange("ingress") { + sg, err := resourceAwsDbSecurityGroupRetrieve(d, meta) + if err != nil { + return err + } + + oi, ni := d.GetChange("ingress") + if oi == nil { + oi = new(schema.Set) + } + if ni == nil { + ni = new(schema.Set) + } + + ois := oi.(*schema.Set) + nis := ni.(*schema.Set) + removeIngress := ois.Difference(nis).List() + newIngress := nis.Difference(ois).List() + + // DELETE old Ingress rules + for _, ing := range removeIngress { + err := resourceAwsDbSecurityGroupRevokeRule(ing, *sg.DBSecurityGroupName, conn) + if err != nil { + return err + } + } + + // ADD new/updated Ingress rules + for _, ing := range newIngress { + err := resourceAwsDbSecurityGroupAuthorizeRule(ing, *sg.DBSecurityGroupName, conn) + if err != nil { + return err + } + } + } + d.Partial(false) + + return resourceAwsDbSecurityGroupRead(d, meta) +} + +func resourceAwsDbSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + + log.Printf("[DEBUG] DB Security Group destroy: %v", d.Id()) + + opts := rds.DeleteDBSecurityGroupInput{DBSecurityGroupName: aws.String(d.Id())} + + log.Printf("[DEBUG] DB Security Group destroy configuration: %v", opts) + _, err := conn.DeleteDBSecurityGroup(&opts) + + if err != nil { + newerr, ok := err.(awserr.Error) + if ok && newerr.Code() == "InvalidDBSecurityGroup.NotFound" { + return nil + } + return err + } + + return nil +} + +func resourceAwsDbSecurityGroupRetrieve(d *schema.ResourceData, meta interface{}) (*rds.DBSecurityGroup, error) { + conn := meta.(*AWSClient).rdsconn + + opts := rds.DescribeDBSecurityGroupsInput{ + DBSecurityGroupName: aws.String(d.Id()), + } + + log.Printf("[DEBUG] DB Security Group describe configuration: %#v", opts) + + resp, err := conn.DescribeDBSecurityGroups(&opts) + + if err != nil { + return nil, fmt.Errorf("Error retrieving DB Security Groups: %s", err) + } + + if len(resp.DBSecurityGroups) != 1 || + *resp.DBSecurityGroups[0].DBSecurityGroupName != d.Id() { + return nil, fmt.Errorf("Unable to find DB Security Group: %#v", resp.DBSecurityGroups) + } + + return resp.DBSecurityGroups[0], nil +} + +// Authorizes the ingress rule on the db security group +func resourceAwsDbSecurityGroupAuthorizeRule(ingress interface{}, dbSecurityGroupName string, conn *rds.RDS) error { + ing := ingress.(map[string]interface{}) + + opts := rds.AuthorizeDBSecurityGroupIngressInput{ + DBSecurityGroupName: aws.String(dbSecurityGroupName), + } + + if attr, ok := ing["cidr"]; ok && attr != "" { + opts.CIDRIP = aws.String(attr.(string)) + } + + if attr, ok := ing["security_group_name"]; ok && attr != "" { + opts.EC2SecurityGroupName = aws.String(attr.(string)) + } + + if attr, ok := ing["security_group_id"]; ok && attr != "" { + opts.EC2SecurityGroupId = aws.String(attr.(string)) + } + + if attr, ok := ing["security_group_owner_id"]; ok && attr != "" { + opts.EC2SecurityGroupOwnerId = aws.String(attr.(string)) + } + + log.Printf("[DEBUG] Authorize ingress rule configuration: %#v", opts) + + _, err := conn.AuthorizeDBSecurityGroupIngress(&opts) + + if err != nil { + return fmt.Errorf("Error authorizing security group ingress: %s", err) + } + + return nil +} + +// Revokes the ingress rule on the db security group +func resourceAwsDbSecurityGroupRevokeRule(ingress interface{}, dbSecurityGroupName string, conn *rds.RDS) error { + ing := ingress.(map[string]interface{}) + + opts := rds.RevokeDBSecurityGroupIngressInput{ + DBSecurityGroupName: aws.String(dbSecurityGroupName), + } + + if attr, ok := ing["cidr"]; ok && attr != "" { + opts.CIDRIP = aws.String(attr.(string)) + } + + if attr, ok := ing["security_group_name"]; ok && attr != "" { + opts.EC2SecurityGroupName = aws.String(attr.(string)) + } + + if attr, ok := ing["security_group_id"]; ok && attr != "" { + opts.EC2SecurityGroupId = aws.String(attr.(string)) + } + + if attr, ok := ing["security_group_owner_id"]; ok && attr != "" { + opts.EC2SecurityGroupOwnerId = aws.String(attr.(string)) + } + + log.Printf("[DEBUG] Revoking ingress rule configuration: %#v", opts) + + _, err := conn.RevokeDBSecurityGroupIngress(&opts) + + if err != nil { + return fmt.Errorf("Error revoking security group ingress: %s", err) + } + + return nil +} + +func resourceAwsDbSecurityGroupIngressHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + if v, ok := m["cidr"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["security_group_name"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["security_group_id"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["security_group_owner_id"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + return hashcode.String(buf.String()) +} + +func resourceAwsDbSecurityGroupStateRefreshFunc( + d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + v, err := resourceAwsDbSecurityGroupRetrieve(d, meta) + + if err != nil { + log.Printf("Error on retrieving DB Security Group when waiting: %s", err) + return nil, "", err + } + + statuses := make([]string, 0, len(v.EC2SecurityGroups)+len(v.IPRanges)) + for _, ec2g := range v.EC2SecurityGroups { + statuses = append(statuses, *ec2g.Status) + } + for _, ips := range v.IPRanges { + statuses = append(statuses, *ips.Status) + } + + for _, stat := range statuses { + // Not done + if stat != "authorized" { + return nil, "authorizing", nil + } + } + + return v, "authorized", nil + } +} + +func buildRDSSecurityGroupARN(identifier, partition, accountid, region string) (string, error) { + if partition == "" { + return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS partition") + } + if accountid == "" { + return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS Account ID") + } + arn := fmt.Sprintf("arn:%s:rds:%s:%s:secgrp:%s", partition, region, accountid, identifier) + return arn, nil + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_snapshot.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_snapshot.go new file mode 100644 index 000000000..f2ab24c4a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_snapshot.go @@ -0,0 +1,216 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsDbSnapshot() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDbSnapshotCreate, + Read: resourceAwsDbSnapshotRead, + Delete: resourceAwsDbSnapshotDelete, + + Timeouts: &schema.ResourceTimeout{ + Read: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "db_snapshot_identifier": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "db_instance_identifier": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "allocated_storage": { + Type: schema.TypeInt, + Computed: true, + }, + "availability_zone": { + Type: schema.TypeString, + Computed: true, + }, + "db_snapshot_arn": { + Type: schema.TypeString, + Computed: true, + }, + "encrypted": { + Type: schema.TypeBool, + Computed: true, + }, + "engine": { + Type: schema.TypeString, + Computed: true, + }, + "engine_version": { + Type: schema.TypeString, + Computed: true, + }, + "iops": { + Type: schema.TypeInt, + Computed: true, + }, + "kms_key_id": { + Type: schema.TypeString, + Computed: true, + }, + "license_model": { + Type: schema.TypeString, + Computed: true, + }, + "option_group_name": { + Type: schema.TypeString, + Computed: true, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + }, + "source_db_snapshot_identifier": { + Type: schema.TypeString, + Computed: true, + }, + "source_region": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "storage_type": { + Type: schema.TypeString, + Computed: true, + }, + "vpc_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsDbSnapshotCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + + params := &rds.CreateDBSnapshotInput{ + DBInstanceIdentifier: aws.String(d.Get("db_instance_identifier").(string)), + DBSnapshotIdentifier: aws.String(d.Get("db_snapshot_identifier").(string)), + } + + _, err := conn.CreateDBSnapshot(params) + if err != nil { + return err + } + d.SetId(d.Get("db_snapshot_identifier").(string)) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating"}, + Target: []string{"available"}, + Refresh: resourceAwsDbSnapshotStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutRead), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + return resourceAwsDbSnapshotRead(d, meta) +} + +func resourceAwsDbSnapshotRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + + params := &rds.DescribeDBSnapshotsInput{ + DBSnapshotIdentifier: aws.String(d.Id()), + } + resp, err := conn.DescribeDBSnapshots(params) + if err != nil { + return err + } + + snapshot := resp.DBSnapshots[0] + + d.Set("allocated_storage", snapshot.AllocatedStorage) + d.Set("availability_zone", snapshot.AvailabilityZone) + d.Set("db_snapshot_arn", snapshot.DBSnapshotArn) + d.Set("encrypted", snapshot.Encrypted) + d.Set("engine", snapshot.Engine) + d.Set("engine_version", snapshot.EngineVersion) + d.Set("iops", snapshot.Iops) + d.Set("kms_key_id", snapshot.KmsKeyId) + d.Set("license_model", snapshot.LicenseModel) + d.Set("option_group_name", snapshot.OptionGroupName) + d.Set("port", snapshot.Port) + d.Set("source_db_snapshot_identifier", snapshot.SourceDBSnapshotIdentifier) + d.Set("source_region", snapshot.SourceRegion) + d.Set("snapshot_type", snapshot.SnapshotType) + d.Set("status", snapshot.Status) + d.Set("vpc_id", snapshot.VpcId) + + return nil +} + +func resourceAwsDbSnapshotDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + + params := &rds.DeleteDBSnapshotInput{ + DBSnapshotIdentifier: aws.String(d.Id()), + } + _, err := conn.DeleteDBSnapshot(params) + if err != nil { + return err + } + + return nil +} + +func resourceAwsDbSnapshotStateRefreshFunc( + d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + conn := meta.(*AWSClient).rdsconn + + opts := &rds.DescribeDBSnapshotsInput{ + DBSnapshotIdentifier: aws.String(d.Id()), + } + + log.Printf("[DEBUG] DB Snapshot describe configuration: %#v", opts) + + resp, err := conn.DescribeDBSnapshots(opts) + if err != nil { + snapshoterr, ok := err.(awserr.Error) + if ok && snapshoterr.Code() == "DBSnapshotNotFound" { + return nil, "", nil + } + return nil, "", fmt.Errorf("Error retrieving DB Snapshots: %s", err) + } + + if len(resp.DBSnapshots) != 1 { + return nil, "", fmt.Errorf("No snapshots returned for %s", d.Id()) + } + + snapshot := resp.DBSnapshots[0] + + return resp, *snapshot.Status, nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_subnet_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_subnet_group.go new file mode 100644 index 000000000..c4e437bee --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_db_subnet_group.go @@ -0,0 +1,257 @@ +package aws + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsDbSubnetGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDbSubnetGroupCreate, + Read: resourceAwsDbSubnetGroupRead, + Update: resourceAwsDbSubnetGroupUpdate, + Delete: resourceAwsDbSubnetGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: validateDbSubnetGroupName, + }, + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateDbSubnetGroupNamePrefix, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "Managed by Terraform", + }, + + "subnet_ids": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsDbSubnetGroupCreate(d *schema.ResourceData, meta interface{}) error { + rdsconn := meta.(*AWSClient).rdsconn + tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) + + subnetIdsSet := d.Get("subnet_ids").(*schema.Set) + subnetIds := make([]*string, subnetIdsSet.Len()) + for i, subnetId := range subnetIdsSet.List() { + subnetIds[i] = aws.String(subnetId.(string)) + } + + var groupName string + if v, ok := d.GetOk("name"); ok { + groupName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + groupName = resource.PrefixedUniqueId(v.(string)) + } else { + groupName = resource.UniqueId() + } + + createOpts := rds.CreateDBSubnetGroupInput{ + DBSubnetGroupName: aws.String(groupName), + DBSubnetGroupDescription: aws.String(d.Get("description").(string)), + SubnetIds: subnetIds, + Tags: tags, + } + + log.Printf("[DEBUG] Create DB Subnet Group: %#v", createOpts) + _, err := rdsconn.CreateDBSubnetGroup(&createOpts) + if err != nil { + return fmt.Errorf("Error creating DB Subnet Group: %s", err) + } + + d.SetId(*createOpts.DBSubnetGroupName) + log.Printf("[INFO] DB Subnet Group ID: %s", d.Id()) + return resourceAwsDbSubnetGroupRead(d, meta) +} + +func resourceAwsDbSubnetGroupRead(d *schema.ResourceData, meta interface{}) error { + rdsconn := meta.(*AWSClient).rdsconn + + describeOpts := rds.DescribeDBSubnetGroupsInput{ + DBSubnetGroupName: aws.String(d.Id()), + } + + describeResp, err := rdsconn.DescribeDBSubnetGroups(&describeOpts) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "DBSubnetGroupNotFoundFault" { + // Update state to indicate the db subnet no longer exists. + d.SetId("") + return nil + } + return err + } + + if len(describeResp.DBSubnetGroups) == 0 { + return fmt.Errorf("Unable to find DB Subnet Group: %#v", describeResp.DBSubnetGroups) + } + + var subnetGroup *rds.DBSubnetGroup + for _, s := range describeResp.DBSubnetGroups { + // AWS is down casing the name provided, so we compare lower case versions + // of the names. We lower case both our name and their name in the check, + // incase they change that someday. + if strings.ToLower(d.Id()) == strings.ToLower(*s.DBSubnetGroupName) { + subnetGroup = describeResp.DBSubnetGroups[0] + } + } + + if subnetGroup.DBSubnetGroupName == nil { + return fmt.Errorf("Unable to find DB Subnet Group: %#v", describeResp.DBSubnetGroups) + } + + d.Set("name", subnetGroup.DBSubnetGroupName) + d.Set("description", subnetGroup.DBSubnetGroupDescription) + + subnets := make([]string, 0, len(subnetGroup.Subnets)) + for _, s := range subnetGroup.Subnets { + subnets = append(subnets, *s.SubnetIdentifier) + } + d.Set("subnet_ids", subnets) + + // list tags for resource + // set tags + conn := meta.(*AWSClient).rdsconn + arn, err := buildRDSsubgrpARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) + if err != nil { + log.Printf("[DEBUG] Error building ARN for DB Subnet Group, not setting Tags for group %s", *subnetGroup.DBSubnetGroupName) + } else { + d.Set("arn", arn) + resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ + ResourceName: aws.String(arn), + }) + + if err != nil { + log.Printf("[DEBUG] Error retreiving tags for ARN: %s", arn) + } + + var dt []*rds.Tag + if len(resp.TagList) > 0 { + dt = resp.TagList + } + d.Set("tags", tagsToMapRDS(dt)) + } + + return nil +} + +func resourceAwsDbSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + if d.HasChange("subnet_ids") || d.HasChange("description") { + _, n := d.GetChange("subnet_ids") + if n == nil { + n = new(schema.Set) + } + ns := n.(*schema.Set) + + var sIds []*string + for _, s := range ns.List() { + sIds = append(sIds, aws.String(s.(string))) + } + + _, err := conn.ModifyDBSubnetGroup(&rds.ModifyDBSubnetGroupInput{ + DBSubnetGroupName: aws.String(d.Id()), + DBSubnetGroupDescription: aws.String(d.Get("description").(string)), + SubnetIds: sIds, + }) + + if err != nil { + return err + } + } + + if arn, err := buildRDSsubgrpARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { + if err := setTagsRDS(conn, d, arn); err != nil { + return err + } else { + d.SetPartial("tags") + } + } + + return resourceAwsDbSubnetGroupRead(d, meta) +} + +func resourceAwsDbSubnetGroupDelete(d *schema.ResourceData, meta interface{}) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"destroyed"}, + Refresh: resourceAwsDbSubnetGroupDeleteRefreshFunc(d, meta), + Timeout: 3 * time.Minute, + MinTimeout: 1 * time.Second, + } + _, err := stateConf.WaitForState() + return err +} + +func resourceAwsDbSubnetGroupDeleteRefreshFunc( + d *schema.ResourceData, + meta interface{}) resource.StateRefreshFunc { + rdsconn := meta.(*AWSClient).rdsconn + + return func() (interface{}, string, error) { + + deleteOpts := rds.DeleteDBSubnetGroupInput{ + DBSubnetGroupName: aws.String(d.Id()), + } + + if _, err := rdsconn.DeleteDBSubnetGroup(&deleteOpts); err != nil { + rdserr, ok := err.(awserr.Error) + if !ok { + return d, "error", err + } + + if rdserr.Code() != "DBSubnetGroupNotFoundFault" { + return d, "error", err + } + } + + return d, "destroyed", nil + } +} + +func buildRDSsubgrpARN(identifier, partition, accountid, region string) (string, error) { + if partition == "" { + return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS partition") + } + if accountid == "" { + return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS Account ID") + } + arn := fmt.Sprintf("arn:%s:rds:%s:%s:subgrp:%s", partition, region, accountid, identifier) + return arn, nil + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_network_acl.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_network_acl.go new file mode 100644 index 000000000..419972b18 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_network_acl.go @@ -0,0 +1,287 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +// ACL Network ACLs all contain explicit deny-all rules that cannot be +// destroyed or changed by users. This rules are numbered very high to be a +// catch-all. +// See http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html#default-network-acl +const ( + awsDefaultAclRuleNumberIpv4 = 32767 + awsDefaultAclRuleNumberIpv6 = 32768 +) + +func resourceAwsDefaultNetworkAcl() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDefaultNetworkAclCreate, + // We reuse aws_network_acl's read method, the operations are the same + Read: resourceAwsNetworkAclRead, + Delete: resourceAwsDefaultNetworkAclDelete, + Update: resourceAwsDefaultNetworkAclUpdate, + + Schema: map[string]*schema.Schema{ + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "default_network_acl_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Computed: false, + }, + // We want explicit management of Subnets here, so we do not allow them to be + // computed. Instead, an empty config will enforce just that; removal of the + // any Subnets that have been assigned to the Default Network ACL. Because we + // can't actually remove them, this will be a continual plan until the + // Subnets are themselves destroyed or reassigned to a different Network + // ACL + "subnet_ids": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + // We want explicit management of Rules here, so we do not allow them to be + // computed. Instead, an empty config will enforce just that; removal of the + // rules + "ingress": &schema.Schema{ + Type: schema.TypeSet, + Required: false, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + "to_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + "rule_no": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + "action": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "cidr_block": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "icmp_type": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "icmp_code": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + Set: resourceAwsNetworkAclEntryHash, + }, + "egress": &schema.Schema{ + Type: schema.TypeSet, + Required: false, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + "to_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + "rule_no": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + "action": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "cidr_block": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "icmp_type": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "icmp_code": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + Set: resourceAwsNetworkAclEntryHash, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsDefaultNetworkAclCreate(d *schema.ResourceData, meta interface{}) error { + d.SetId(d.Get("default_network_acl_id").(string)) + + // revoke all default and pre-existing rules on the default network acl. + // In the UPDATE method, we'll apply only the rules in the configuration. + log.Printf("[DEBUG] Revoking default ingress and egress rules for Default Network ACL for %s", d.Id()) + err := revokeAllNetworkACLEntries(d.Id(), meta) + if err != nil { + return err + } + + return resourceAwsDefaultNetworkAclUpdate(d, meta) +} + +func resourceAwsDefaultNetworkAclUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + d.Partial(true) + + if d.HasChange("ingress") { + err := updateNetworkAclEntries(d, "ingress", conn) + if err != nil { + return err + } + } + + if d.HasChange("egress") { + err := updateNetworkAclEntries(d, "egress", conn) + if err != nil { + return err + } + } + + if d.HasChange("subnet_ids") { + o, n := d.GetChange("subnet_ids") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + remove := os.Difference(ns).List() + add := ns.Difference(os).List() + + if len(remove) > 0 { + // + // NO-OP + // + // Subnets *must* belong to a Network ACL. Subnets are not "removed" from + // Network ACLs, instead their association is replaced. In a normal + // Network ACL, any removal of a Subnet is done by replacing the + // Subnet/ACL association with an association between the Subnet and the + // Default Network ACL. Because we're managing the default here, we cannot + // do that, so we simply log a NO-OP. In order to remove the Subnet here, + // it must be destroyed, or assigned to different Network ACL. Those + // operations are not handled here + log.Printf("[WARN] Cannot remove subnets from the Default Network ACL. They must be re-assigned or destroyed") + } + + if len(add) > 0 { + for _, a := range add { + association, err := findNetworkAclAssociation(a.(string), conn) + if err != nil { + return fmt.Errorf("Failed to find acl association: acl %s with subnet %s: %s", d.Id(), a, err) + } + log.Printf("[DEBUG] Updating Network Association for Default Network ACL (%s) and Subnet (%s)", d.Id(), a.(string)) + _, err = conn.ReplaceNetworkAclAssociation(&ec2.ReplaceNetworkAclAssociationInput{ + AssociationId: association.NetworkAclAssociationId, + NetworkAclId: aws.String(d.Id()), + }) + if err != nil { + return err + } + } + } + } + + if err := setTags(conn, d); err != nil { + return err + } else { + d.SetPartial("tags") + } + + d.Partial(false) + // Re-use the exiting Network ACL Resources READ method + return resourceAwsNetworkAclRead(d, meta) +} + +func resourceAwsDefaultNetworkAclDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARN] Cannot destroy Default Network ACL. Terraform will remove this resource from the state file, however resources may remain.") + d.SetId("") + return nil +} + +// revokeAllNetworkACLEntries revoke all ingress and egress rules that the Default +// Network ACL currently has +func revokeAllNetworkACLEntries(netaclId string, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{ + NetworkAclIds: []*string{aws.String(netaclId)}, + }) + + if err != nil { + log.Printf("[DEBUG] Error looking up Network ACL: %s", err) + return err + } + + if resp == nil { + return fmt.Errorf("[ERR] Error looking up Default Network ACL Entries: No results") + } + + networkAcl := resp.NetworkAcls[0] + for _, e := range networkAcl.Entries { + // Skip the default rules added by AWS. They can be neither + // configured or deleted by users. See http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html#default-network-acl + if *e.RuleNumber == awsDefaultAclRuleNumberIpv4 || + *e.RuleNumber == awsDefaultAclRuleNumberIpv6 { + continue + } + + // track if this is an egress or ingress rule, for logging purposes + rt := "ingress" + if *e.Egress == true { + rt = "egress" + } + + log.Printf("[DEBUG] Destroying Network ACL (%s) Entry number (%d)", rt, int(*e.RuleNumber)) + _, err := conn.DeleteNetworkAclEntry(&ec2.DeleteNetworkAclEntryInput{ + NetworkAclId: aws.String(netaclId), + RuleNumber: e.RuleNumber, + Egress: e.Egress, + }) + if err != nil { + return fmt.Errorf("Error deleting entry (%s): %s", e, err) + } + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_route_table.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_route_table.go new file mode 100644 index 000000000..987dd4a7d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_route_table.go @@ -0,0 +1,236 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsDefaultRouteTable() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDefaultRouteTableCreate, + Read: resourceAwsDefaultRouteTableRead, + Update: resourceAwsRouteTableUpdate, + Delete: resourceAwsDefaultRouteTableDelete, + + Schema: map[string]*schema.Schema{ + "default_route_table_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "vpc_id": { + Type: schema.TypeString, + Computed: true, + }, + + "propagating_vgws": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "route": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr_block": { + Type: schema.TypeString, + Optional: true, + }, + + "ipv6_cidr_block": { + Type: schema.TypeString, + Optional: true, + }, + + "egress_only_gateway_id": { + Type: schema.TypeString, + Optional: true, + }, + + "gateway_id": { + Type: schema.TypeString, + Optional: true, + }, + + "instance_id": { + Type: schema.TypeString, + Optional: true, + }, + + "nat_gateway_id": { + Type: schema.TypeString, + Optional: true, + }, + + "vpc_peering_connection_id": { + Type: schema.TypeString, + Optional: true, + }, + + "network_interface_id": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + Set: resourceAwsRouteTableHash, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsDefaultRouteTableCreate(d *schema.ResourceData, meta interface{}) error { + d.SetId(d.Get("default_route_table_id").(string)) + + conn := meta.(*AWSClient).ec2conn + rtRaw, _, err := resourceAwsRouteTableStateRefreshFunc(conn, d.Id())() + if err != nil { + return err + } + if rtRaw == nil { + log.Printf("[WARN] Default Route Table not found") + d.SetId("") + return nil + } + + rt := rtRaw.(*ec2.RouteTable) + + d.Set("vpc_id", rt.VpcId) + + // revoke all default and pre-existing routes on the default route table. + // In the UPDATE method, we'll apply only the rules in the configuration. + log.Printf("[DEBUG] Revoking default routes for Default Route Table for %s", d.Id()) + if err := revokeAllRouteTableRules(d.Id(), meta); err != nil { + return err + } + + return resourceAwsRouteTableUpdate(d, meta) +} + +func resourceAwsDefaultRouteTableRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + // look up default route table for VPC + filter1 := &ec2.Filter{ + Name: aws.String("association.main"), + Values: []*string{aws.String("true")}, + } + filter2 := &ec2.Filter{ + Name: aws.String("vpc-id"), + Values: []*string{aws.String(d.Get("vpc_id").(string))}, + } + + findOpts := &ec2.DescribeRouteTablesInput{ + Filters: []*ec2.Filter{filter1, filter2}, + } + + resp, err := conn.DescribeRouteTables(findOpts) + if err != nil { + return err + } + + if len(resp.RouteTables) < 1 || resp.RouteTables[0] == nil { + return fmt.Errorf("Default Route table not found") + } + + rt := resp.RouteTables[0] + + d.Set("default_route_table_id", rt.RouteTableId) + d.SetId(*rt.RouteTableId) + + // re-use regular AWS Route Table READ. This is an extra API call but saves us + // from trying to manually keep parity + return resourceAwsRouteTableRead(d, meta) +} + +func resourceAwsDefaultRouteTableDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARN] Cannot destroy Default Route Table. Terraform will remove this resource from the state file, however resources may remain.") + d.SetId("") + return nil +} + +// revokeAllRouteTableRules revoke all routes on the Default Route Table +// This should only be ran once at creation time of this resource +func revokeAllRouteTableRules(defaultRouteTableId string, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + log.Printf("\n***\nrevokeAllRouteTableRules\n***\n") + + resp, err := conn.DescribeRouteTables(&ec2.DescribeRouteTablesInput{ + RouteTableIds: []*string{aws.String(defaultRouteTableId)}, + }) + if err != nil { + return err + } + + if len(resp.RouteTables) < 1 || resp.RouteTables[0] == nil { + return fmt.Errorf("Default Route table not found") + } + + rt := resp.RouteTables[0] + + // Remove all Gateway association + for _, r := range rt.PropagatingVgws { + log.Printf( + "[INFO] Deleting VGW propagation from %s: %s", + defaultRouteTableId, *r.GatewayId) + _, err := conn.DisableVgwRoutePropagation(&ec2.DisableVgwRoutePropagationInput{ + RouteTableId: aws.String(defaultRouteTableId), + GatewayId: r.GatewayId, + }) + if err != nil { + return err + } + } + + // Delete all routes + for _, r := range rt.Routes { + // you cannot delete the local route + if r.GatewayId != nil && *r.GatewayId == "local" { + continue + } + if r.DestinationPrefixListId != nil { + // Skipping because VPC endpoint routes are handled separately + // See aws_vpc_endpoint + continue + } + + if r.DestinationCidrBlock != nil { + log.Printf( + "[INFO] Deleting route from %s: %s", + defaultRouteTableId, *r.DestinationCidrBlock) + _, err := conn.DeleteRoute(&ec2.DeleteRouteInput{ + RouteTableId: aws.String(defaultRouteTableId), + DestinationCidrBlock: r.DestinationCidrBlock, + }) + if err != nil { + return err + } + } + + if r.DestinationIpv6CidrBlock != nil { + log.Printf( + "[INFO] Deleting route from %s: %s", + defaultRouteTableId, *r.DestinationIpv6CidrBlock) + _, err := conn.DeleteRoute(&ec2.DeleteRouteInput{ + RouteTableId: aws.String(defaultRouteTableId), + DestinationIpv6CidrBlock: r.DestinationIpv6CidrBlock, + }) + if err != nil { + return err + } + } + + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_security_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_security_group.go new file mode 100644 index 000000000..f4fb748bb --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_security_group.go @@ -0,0 +1,149 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsDefaultSecurityGroup() *schema.Resource { + // reuse aws_security_group_rule schema, and methods for READ, UPDATE + dsg := resourceAwsSecurityGroup() + dsg.Create = resourceAwsDefaultSecurityGroupCreate + dsg.Delete = resourceAwsDefaultSecurityGroupDelete + + // Descriptions cannot be updated + delete(dsg.Schema, "description") + + // name is a computed value for Default Security Groups and cannot be changed + delete(dsg.Schema, "name_prefix") + dsg.Schema["name"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + + // We want explicit management of Rules here, so we do not allow them to be + // computed. Instead, an empty config will enforce just that; removal of the + // rules + dsg.Schema["ingress"].Computed = false + dsg.Schema["egress"].Computed = false + return dsg +} + +func resourceAwsDefaultSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + securityGroupOpts := &ec2.DescribeSecurityGroupsInput{ + Filters: []*ec2.Filter{ + &ec2.Filter{ + Name: aws.String("group-name"), + Values: []*string{aws.String("default")}, + }, + }, + } + + var vpcId string + if v, ok := d.GetOk("vpc_id"); ok { + vpcId = v.(string) + securityGroupOpts.Filters = append(securityGroupOpts.Filters, &ec2.Filter{ + Name: aws.String("vpc-id"), + Values: []*string{aws.String(vpcId)}, + }) + } + + var err error + log.Printf("[DEBUG] Commandeer Default Security Group: %s", securityGroupOpts) + resp, err := conn.DescribeSecurityGroups(securityGroupOpts) + if err != nil { + return fmt.Errorf("Error creating Default Security Group: %s", err) + } + + var g *ec2.SecurityGroup + if vpcId != "" { + // if vpcId contains a value, then we expect just a single Security Group + // returned, as default is a protected name for each VPC, and for each + // Region on EC2 Classic + if len(resp.SecurityGroups) != 1 { + return fmt.Errorf("[ERR] Error finding default security group; found (%d) groups: %s", len(resp.SecurityGroups), resp) + } + g = resp.SecurityGroups[0] + } else { + // we need to filter through any returned security groups for the group + // named "default", and does not belong to a VPC + for _, sg := range resp.SecurityGroups { + if sg.VpcId == nil && *sg.GroupName == "default" { + g = sg + } + } + } + + if g == nil { + return fmt.Errorf("[ERR] Error finding default security group: no matching group found") + } + + d.SetId(*g.GroupId) + + log.Printf("[INFO] Default Security Group ID: %s", d.Id()) + + if err := setTags(conn, d); err != nil { + return err + } + + if err := revokeDefaultSecurityGroupRules(meta, g); err != nil { + return errwrap.Wrapf("{{err}}", err) + } + + return resourceAwsSecurityGroupUpdate(d, meta) +} + +func resourceAwsDefaultSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARN] Cannot destroy Default Security Group. Terraform will remove this resource from the state file, however resources may remain.") + d.SetId("") + return nil +} + +func revokeDefaultSecurityGroupRules(meta interface{}, g *ec2.SecurityGroup) error { + conn := meta.(*AWSClient).ec2conn + + log.Printf("[WARN] Removing all ingress and egress rules found on Default Security Group (%s)", *g.GroupId) + if len(g.IpPermissionsEgress) > 0 { + req := &ec2.RevokeSecurityGroupEgressInput{ + GroupId: g.GroupId, + IpPermissions: g.IpPermissionsEgress, + } + + log.Printf("[DEBUG] Revoking default egress rules for Default Security Group for %s", *g.GroupId) + if _, err := conn.RevokeSecurityGroupEgress(req); err != nil { + return fmt.Errorf( + "Error revoking default egress rules for Default Security Group (%s): %s", + *g.GroupId, err) + } + } + if len(g.IpPermissions) > 0 { + // a limitation in EC2 Classic is that a call to RevokeSecurityGroupIngress + // cannot contain both the GroupName and the GroupId + for _, p := range g.IpPermissions { + for _, uigp := range p.UserIdGroupPairs { + if uigp.GroupId != nil && uigp.GroupName != nil { + uigp.GroupName = nil + } + } + } + req := &ec2.RevokeSecurityGroupIngressInput{ + GroupId: g.GroupId, + IpPermissions: g.IpPermissions, + } + + log.Printf("[DEBUG] Revoking default ingress rules for Default Security Group for (%s): %s", *g.GroupId, req) + if _, err := conn.RevokeSecurityGroupIngress(req); err != nil { + return fmt.Errorf( + "Error revoking default ingress rules for Default Security Group (%s): %s", + *g.GroupId, err) + } + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_subnet.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_subnet.go new file mode 100644 index 000000000..fc10723db --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_subnet.go @@ -0,0 +1,85 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsDefaultSubnet() *schema.Resource { + // reuse aws_subnet schema, and methods for READ, UPDATE + dsubnet := resourceAwsSubnet() + dsubnet.Create = resourceAwsDefaultSubnetCreate + dsubnet.Delete = resourceAwsDefaultSubnetDelete + + // vpc_id is a required value for Default Subnets + dsubnet.Schema["availability_zone"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + } + // vpc_id is a computed value for Default Subnets + dsubnet.Schema["vpc_id"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + // cidr_block is a computed value for Default Subnets + dsubnet.Schema["cidr_block"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + // ipv6_cidr_block is a computed value for Default Subnets + dsubnet.Schema["ipv6_cidr_block"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + // map_public_ip_on_launch is a computed value for Default Subnets + dsubnet.Schema["map_public_ip_on_launch"] = &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + } + // assign_ipv6_address_on_creation is a computed value for Default Subnets + dsubnet.Schema["assign_ipv6_address_on_creation"] = &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + } + + return dsubnet +} + +func resourceAwsDefaultSubnetCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + req := &ec2.DescribeSubnetsInput{ + Filters: []*ec2.Filter{ + &ec2.Filter{ + Name: aws.String("availabilityZone"), + Values: aws.StringSlice([]string{d.Get("availability_zone").(string)}), + }, + &ec2.Filter{ + Name: aws.String("defaultForAz"), + Values: aws.StringSlice([]string{"true"}), + }, + }, + } + + resp, err := conn.DescribeSubnets(req) + if err != nil { + return err + } + + if len(resp.Subnets) != 1 || resp.Subnets[0] == nil { + return fmt.Errorf("Default subnet not found") + } + + d.SetId(aws.StringValue(resp.Subnets[0].SubnetId)) + + return resourceAwsSubnetUpdate(d, meta) +} + +func resourceAwsDefaultSubnetDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARN] Cannot destroy Default Subnet. Terraform will remove this resource from the state file, however resources may remain.") + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_vpc.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_vpc.go new file mode 100644 index 000000000..8953534a0 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_vpc.go @@ -0,0 +1,66 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsDefaultVpc() *schema.Resource { + // reuse aws_vpc schema, and methods for READ, UPDATE + dvpc := resourceAwsVpc() + dvpc.Create = resourceAwsDefaultVpcCreate + dvpc.Delete = resourceAwsDefaultVpcDelete + + // cidr_block is a computed value for Default VPCs + dvpc.Schema["cidr_block"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + // instance_tenancy is a computed value for Default VPCs + dvpc.Schema["instance_tenancy"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + // assign_generated_ipv6_cidr_block is a computed value for Default VPCs + dvpc.Schema["assign_generated_ipv6_cidr_block"] = &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + } + + return dvpc +} + +func resourceAwsDefaultVpcCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + req := &ec2.DescribeVpcsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("isDefault"), + Values: aws.StringSlice([]string{"true"}), + }, + }, + } + + resp, err := conn.DescribeVpcs(req) + if err != nil { + return err + } + + if resp.Vpcs == nil || len(resp.Vpcs) == 0 { + return fmt.Errorf("No default VPC found in this region.") + } + + d.SetId(aws.StringValue(resp.Vpcs[0].VpcId)) + + return resourceAwsVpcUpdate(d, meta) +} + +func resourceAwsDefaultVpcDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARN] Cannot destroy Default VPC. Terraform will remove this resource from the state file, however resources may remain.") + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_vpc_dhcp_options.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_vpc_dhcp_options.go new file mode 100644 index 000000000..cb433ff4b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_default_vpc_dhcp_options.go @@ -0,0 +1,90 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsDefaultVpcDhcpOptions() *schema.Resource { + // reuse aws_vpc_dhcp_options schema, and methods for READ, UPDATE + dvpc := resourceAwsVpcDhcpOptions() + dvpc.Create = resourceAwsDefaultVpcDhcpOptionsCreate + dvpc.Delete = resourceAwsDefaultVpcDhcpOptionsDelete + + // domain_name is a computed value for Default Default DHCP Options Sets + dvpc.Schema["domain_name"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + // domain_name_servers is a computed value for Default Default DHCP Options Sets + dvpc.Schema["domain_name_servers"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + // ntp_servers is a computed value for Default Default DHCP Options Sets + dvpc.Schema["ntp_servers"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + + return dvpc +} + +func resourceAwsDefaultVpcDhcpOptionsCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + var domainName string + awsRegion := meta.(*AWSClient).region + if awsRegion == "us-east-1" { + domainName = "ec2.internal" + } else { + domainName = awsRegion + ".compute.internal" + } + req := &ec2.DescribeDhcpOptionsInput{ + Filters: []*ec2.Filter{ + &ec2.Filter{ + Name: aws.String("key"), + Values: aws.StringSlice([]string{"domain-name"}), + }, + &ec2.Filter{ + Name: aws.String("value"), + Values: aws.StringSlice([]string{domainName}), + }, + &ec2.Filter{ + Name: aws.String("key"), + Values: aws.StringSlice([]string{"domain-name-servers"}), + }, + &ec2.Filter{ + Name: aws.String("value"), + Values: aws.StringSlice([]string{"AmazonProvidedDNS"}), + }, + }, + } + + resp, err := conn.DescribeDhcpOptions(req) + if err != nil { + return err + } + + if len(resp.DhcpOptions) != 1 || resp.DhcpOptions[0] == nil { + return fmt.Errorf("Default DHCP Options Set not found") + } + + d.SetId(aws.StringValue(resp.DhcpOptions[0].DhcpOptionsId)) + + if err := resourceAwsVpcDhcpOptionsUpdate(d, meta); err != nil { + return err + } + + return resourceAwsVpcDhcpOptionsRead(d, meta) +} + +func resourceAwsDefaultVpcDhcpOptionsDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARN] Cannot destroy Default DHCP Options Set. Terraform will remove this resource from the state file, however resources may remain.") + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_devicefarm_project.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_devicefarm_project.go new file mode 100644 index 000000000..e7e377eaf --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_devicefarm_project.go @@ -0,0 +1,112 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/devicefarm" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsDevicefarmProject() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDevicefarmProjectCreate, + Read: resourceAwsDevicefarmProjectRead, + Update: resourceAwsDevicefarmProjectUpdate, + Delete: resourceAwsDevicefarmProjectDelete, + + Schema: map[string]*schema.Schema{ + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceAwsDevicefarmProjectCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).devicefarmconn + region := meta.(*AWSClient).region + + // We need to ensure that DeviceFarm is only being run against us-west-2 + // As this is the only place that AWS currently supports it + if region != "us-west-2" { + return fmt.Errorf("DeviceFarm can only be used with us-west-2. You are trying to use it on %s", region) + } + + input := &devicefarm.CreateProjectInput{ + Name: aws.String(d.Get("name").(string)), + } + + log.Printf("[DEBUG] Creating DeviceFarm Project: %s", d.Get("name").(string)) + out, err := conn.CreateProject(input) + if err != nil { + return fmt.Errorf("Error creating DeviceFarm Project: %s", err) + } + + log.Printf("[DEBUG] Successsfully Created DeviceFarm Project: %s", *out.Project.Arn) + d.SetId(*out.Project.Arn) + + return resourceAwsDevicefarmProjectRead(d, meta) +} + +func resourceAwsDevicefarmProjectRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).devicefarmconn + + input := &devicefarm.GetProjectInput{ + Arn: aws.String(d.Id()), + } + + log.Printf("[DEBUG] Reading DeviceFarm Project: %s", d.Id()) + out, err := conn.GetProject(input) + if err != nil { + return fmt.Errorf("Error reading DeviceFarm Project: %s", err) + } + + d.Set("name", out.Project.Name) + d.Set("arn", out.Project.Arn) + + return nil +} + +func resourceAwsDevicefarmProjectUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).devicefarmconn + + if d.HasChange("name") { + input := &devicefarm.UpdateProjectInput{ + Arn: aws.String(d.Id()), + Name: aws.String(d.Get("name").(string)), + } + + log.Printf("[DEBUG] Updating DeviceFarm Project: %s", d.Id()) + _, err := conn.UpdateProject(input) + if err != nil { + return fmt.Errorf("Error Updating DeviceFarm Project: %s", err) + } + + } + + return resourceAwsDevicefarmProjectRead(d, meta) +} + +func resourceAwsDevicefarmProjectDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).devicefarmconn + + input := &devicefarm.DeleteProjectInput{ + Arn: aws.String(d.Id()), + } + + log.Printf("[DEBUG] Deleting DeviceFarm Project: %s", d.Id()) + _, err := conn.DeleteProject(input) + if err != nil { + return fmt.Errorf("Error deleting DeviceFarm Project: %s", err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_directory_service_directory.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_directory_service_directory.go new file mode 100644 index 000000000..a9bd952dd --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_directory_service_directory.go @@ -0,0 +1,490 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/directoryservice" + "github.com/hashicorp/terraform/helper/resource" +) + +var directoryCreationFuncs = map[string]func(*directoryservice.DirectoryService, *schema.ResourceData) (string, error){ + "SimpleAD": createSimpleDirectoryService, + "MicrosoftAD": createActiveDirectoryService, + "ADConnector": createDirectoryConnector, +} + +func resourceAwsDirectoryServiceDirectory() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDirectoryServiceDirectoryCreate, + Read: resourceAwsDirectoryServiceDirectoryRead, + Update: resourceAwsDirectoryServiceDirectoryUpdate, + Delete: resourceAwsDirectoryServiceDirectoryDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "password": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Sensitive: true, + }, + "size": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "alias": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "short_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "vpc_settings": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "subnet_ids": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + }, + }, + "connect_settings": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "customer_username": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "customer_dns_ips": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "subnet_ids": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + }, + }, + "enable_sso": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "access_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "dns_ip_addresses": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Computed: true, + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "SimpleAD", + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + validTypes := []string{"SimpleAD", "MicrosoftAD"} + value := v.(string) + for validType, _ := range directoryCreationFuncs { + if validType == value { + return + } + } + es = append(es, fmt.Errorf("%q must be one of %q", k, validTypes)) + return + }, + }, + }, + } +} + +func buildVpcSettings(d *schema.ResourceData) (vpcSettings *directoryservice.DirectoryVpcSettings, err error) { + if v, ok := d.GetOk("vpc_settings"); !ok { + return nil, fmt.Errorf("vpc_settings is required for type = SimpleAD or MicrosoftAD") + } else { + settings := v.([]interface{}) + + if len(settings) > 1 { + return nil, fmt.Errorf("Only a single vpc_settings block is expected") + } else if len(settings) == 1 { + s := settings[0].(map[string]interface{}) + var subnetIds []*string + for _, id := range s["subnet_ids"].(*schema.Set).List() { + subnetIds = append(subnetIds, aws.String(id.(string))) + } + + vpcSettings = &directoryservice.DirectoryVpcSettings{ + SubnetIds: subnetIds, + VpcId: aws.String(s["vpc_id"].(string)), + } + } + } + + return vpcSettings, nil +} + +func buildConnectSettings(d *schema.ResourceData) (connectSettings *directoryservice.DirectoryConnectSettings, err error) { + if v, ok := d.GetOk("connect_settings"); !ok { + return nil, fmt.Errorf("connect_settings is required for type = ADConnector") + } else { + settings := v.([]interface{}) + + if len(settings) > 1 { + return nil, fmt.Errorf("Only a single connect_settings block is expected") + } else if len(settings) == 1 { + s := settings[0].(map[string]interface{}) + + var subnetIds []*string + for _, id := range s["subnet_ids"].(*schema.Set).List() { + subnetIds = append(subnetIds, aws.String(id.(string))) + } + + var customerDnsIps []*string + for _, id := range s["customer_dns_ips"].(*schema.Set).List() { + customerDnsIps = append(customerDnsIps, aws.String(id.(string))) + } + + connectSettings = &directoryservice.DirectoryConnectSettings{ + CustomerDnsIps: customerDnsIps, + CustomerUserName: aws.String(s["customer_username"].(string)), + SubnetIds: subnetIds, + VpcId: aws.String(s["vpc_id"].(string)), + } + } + } + + return connectSettings, nil +} + +func createDirectoryConnector(dsconn *directoryservice.DirectoryService, d *schema.ResourceData) (directoryId string, err error) { + if _, ok := d.GetOk("size"); !ok { + return "", fmt.Errorf("size is required for type = ADConnector") + } + + input := directoryservice.ConnectDirectoryInput{ + Name: aws.String(d.Get("name").(string)), + Password: aws.String(d.Get("password").(string)), + Size: aws.String(d.Get("size").(string)), + } + + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + if v, ok := d.GetOk("short_name"); ok { + input.ShortName = aws.String(v.(string)) + } + + input.ConnectSettings, err = buildConnectSettings(d) + if err != nil { + return "", err + } + + log.Printf("[DEBUG] Creating Directory Connector: %s", input) + out, err := dsconn.ConnectDirectory(&input) + if err != nil { + return "", err + } + log.Printf("[DEBUG] Directory Connector created: %s", out) + + return *out.DirectoryId, nil +} + +func createSimpleDirectoryService(dsconn *directoryservice.DirectoryService, d *schema.ResourceData) (directoryId string, err error) { + if _, ok := d.GetOk("size"); !ok { + return "", fmt.Errorf("size is required for type = SimpleAD") + } + + input := directoryservice.CreateDirectoryInput{ + Name: aws.String(d.Get("name").(string)), + Password: aws.String(d.Get("password").(string)), + Size: aws.String(d.Get("size").(string)), + } + + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + if v, ok := d.GetOk("short_name"); ok { + input.ShortName = aws.String(v.(string)) + } + + input.VpcSettings, err = buildVpcSettings(d) + if err != nil { + return "", err + } + + log.Printf("[DEBUG] Creating Simple Directory Service: %s", input) + out, err := dsconn.CreateDirectory(&input) + if err != nil { + return "", err + } + log.Printf("[DEBUG] Simple Directory Service created: %s", out) + + return *out.DirectoryId, nil +} + +func createActiveDirectoryService(dsconn *directoryservice.DirectoryService, d *schema.ResourceData) (directoryId string, err error) { + input := directoryservice.CreateMicrosoftADInput{ + Name: aws.String(d.Get("name").(string)), + Password: aws.String(d.Get("password").(string)), + } + + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + if v, ok := d.GetOk("short_name"); ok { + input.ShortName = aws.String(v.(string)) + } + + input.VpcSettings, err = buildVpcSettings(d) + if err != nil { + return "", err + } + + log.Printf("[DEBUG] Creating Microsoft AD Directory Service: %s", input) + out, err := dsconn.CreateMicrosoftAD(&input) + if err != nil { + return "", err + } + log.Printf("[DEBUG] Microsoft AD Directory Service created: %s", out) + + return *out.DirectoryId, nil +} + +func resourceAwsDirectoryServiceDirectoryCreate(d *schema.ResourceData, meta interface{}) error { + dsconn := meta.(*AWSClient).dsconn + + creationFunc, ok := directoryCreationFuncs[d.Get("type").(string)] + if !ok { + // Shouldn't happen as this is validated above + return fmt.Errorf("Unsupported directory type: %s", d.Get("type")) + } + + directoryId, err := creationFunc(dsconn, d) + if err != nil { + return err + } + + d.SetId(directoryId) + + // Wait for creation + log.Printf("[DEBUG] Waiting for DS (%q) to become available", d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"Requested", "Creating", "Created"}, + Target: []string{"Active"}, + Refresh: func() (interface{}, string, error) { + resp, err := dsconn.DescribeDirectories(&directoryservice.DescribeDirectoriesInput{ + DirectoryIds: []*string{aws.String(d.Id())}, + }) + if err != nil { + log.Printf("Error during creation of DS: %q", err.Error()) + return nil, "", err + } + + ds := resp.DirectoryDescriptions[0] + log.Printf("[DEBUG] Creation of DS %q is in following stage: %q.", + d.Id(), *ds.Stage) + return ds, *ds.Stage, nil + }, + Timeout: 60 * time.Minute, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for Directory Service (%s) to become available: %s", + d.Id(), err) + } + + if v, ok := d.GetOk("alias"); ok { + d.SetPartial("alias") + + input := directoryservice.CreateAliasInput{ + DirectoryId: aws.String(d.Id()), + Alias: aws.String(v.(string)), + } + + log.Printf("[DEBUG] Assigning alias %q to DS directory %q", + v.(string), d.Id()) + out, err := dsconn.CreateAlias(&input) + if err != nil { + return err + } + log.Printf("[DEBUG] Alias %q assigned to DS directory %q", + *out.Alias, *out.DirectoryId) + } + + return resourceAwsDirectoryServiceDirectoryUpdate(d, meta) +} + +func resourceAwsDirectoryServiceDirectoryUpdate(d *schema.ResourceData, meta interface{}) error { + dsconn := meta.(*AWSClient).dsconn + + if d.HasChange("enable_sso") { + d.SetPartial("enable_sso") + var err error + + if v, ok := d.GetOk("enable_sso"); ok && v.(bool) { + log.Printf("[DEBUG] Enabling SSO for DS directory %q", d.Id()) + _, err = dsconn.EnableSso(&directoryservice.EnableSsoInput{ + DirectoryId: aws.String(d.Id()), + }) + } else { + log.Printf("[DEBUG] Disabling SSO for DS directory %q", d.Id()) + _, err = dsconn.DisableSso(&directoryservice.DisableSsoInput{ + DirectoryId: aws.String(d.Id()), + }) + } + + if err != nil { + return err + } + } + + return resourceAwsDirectoryServiceDirectoryRead(d, meta) +} + +func resourceAwsDirectoryServiceDirectoryRead(d *schema.ResourceData, meta interface{}) error { + dsconn := meta.(*AWSClient).dsconn + + input := directoryservice.DescribeDirectoriesInput{ + DirectoryIds: []*string{aws.String(d.Id())}, + } + out, err := dsconn.DescribeDirectories(&input) + if err != nil { + return err + + } + + if len(out.DirectoryDescriptions) == 0 { + log.Printf("[WARN] Directory %s not found", d.Id()) + d.SetId("") + return nil + } + + dir := out.DirectoryDescriptions[0] + log.Printf("[DEBUG] Received DS directory: %s", dir) + + d.Set("access_url", *dir.AccessUrl) + d.Set("alias", *dir.Alias) + if dir.Description != nil { + d.Set("description", *dir.Description) + } + + if *dir.Type == "ADConnector" { + d.Set("dns_ip_addresses", schema.NewSet(schema.HashString, flattenStringList(dir.ConnectSettings.ConnectIps))) + } else { + d.Set("dns_ip_addresses", schema.NewSet(schema.HashString, flattenStringList(dir.DnsIpAddrs))) + } + d.Set("name", *dir.Name) + if dir.ShortName != nil { + d.Set("short_name", *dir.ShortName) + } + if dir.Size != nil { + d.Set("size", *dir.Size) + } + d.Set("type", *dir.Type) + d.Set("vpc_settings", flattenDSVpcSettings(dir.VpcSettings)) + d.Set("connect_settings", flattenDSConnectSettings(dir.DnsIpAddrs, dir.ConnectSettings)) + d.Set("enable_sso", *dir.SsoEnabled) + + return nil +} + +func resourceAwsDirectoryServiceDirectoryDelete(d *schema.ResourceData, meta interface{}) error { + dsconn := meta.(*AWSClient).dsconn + + input := directoryservice.DeleteDirectoryInput{ + DirectoryId: aws.String(d.Id()), + } + + log.Printf("[DEBUG] Delete Directory input: %s", input) + _, err := dsconn.DeleteDirectory(&input) + if err != nil { + return err + } + + // Wait for deletion + log.Printf("[DEBUG] Waiting for DS (%q) to be deleted", d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"Deleting"}, + Target: []string{"Deleted"}, + Refresh: func() (interface{}, string, error) { + resp, err := dsconn.DescribeDirectories(&directoryservice.DescribeDirectoriesInput{ + DirectoryIds: []*string{aws.String(d.Id())}, + }) + if err != nil { + if dserr, ok := err.(awserr.Error); ok && dserr.Code() == "EntityDoesNotExistException" { + return 42, "Deleted", nil + } + return nil, "error", err + } + + if len(resp.DirectoryDescriptions) == 0 { + return 42, "Deleted", nil + } + + ds := resp.DirectoryDescriptions[0] + log.Printf("[DEBUG] Deletion of DS %q is in following stage: %q.", + d.Id(), *ds.Stage) + return ds, *ds.Stage, nil + }, + Timeout: 60 * time.Minute, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for Directory Service (%s) to be deleted: %q", + d.Id(), err.Error()) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_certificate.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_certificate.go new file mode 100644 index 000000000..8fd3f9f88 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_certificate.go @@ -0,0 +1,138 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsDmsCertificate() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDmsCertificateCreate, + Read: resourceAwsDmsCertificateRead, + Delete: resourceAwsDmsCertificateDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "certificate_arn": { + Type: schema.TypeString, + Computed: true, + }, + "certificate_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateDmsCertificateId, + }, + "certificate_pem": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + }, + "certificate_wallet": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + }, + }, + } +} + +func resourceAwsDmsCertificateCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dmsconn + + request := &dms.ImportCertificateInput{ + CertificateIdentifier: aws.String(d.Get("certificate_id").(string)), + } + + pem, pemSet := d.GetOk("certificate_pem") + wallet, walletSet := d.GetOk("certificate_wallet") + + if !pemSet && !walletSet { + return fmt.Errorf("Must set either certificate_pem and certificate_wallet.") + } + if pemSet && walletSet { + return fmt.Errorf("Cannot set both certificate_pem and certificate_wallet.") + } + + if pemSet { + request.CertificatePem = aws.String(pem.(string)) + } + if walletSet { + request.CertificateWallet = []byte(wallet.(string)) + } + + log.Println("[DEBUG] DMS import certificate:", request) + + _, err := conn.ImportCertificate(request) + if err != nil { + return err + } + + d.SetId(d.Get("certificate_id").(string)) + return resourceAwsDmsCertificateRead(d, meta) +} + +func resourceAwsDmsCertificateRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dmsconn + + response, err := conn.DescribeCertificates(&dms.DescribeCertificatesInput{ + Filters: []*dms.Filter{ + { + Name: aws.String("certificate-id"), + Values: []*string{aws.String(d.Id())}, // Must use d.Id() to work with import. + }, + }, + }) + if err != nil { + if dmserr, ok := err.(awserr.Error); ok && dmserr.Code() == "ResourceNotFoundFault" { + d.SetId("") + return nil + } + return err + } + + return resourceAwsDmsCertificateSetState(d, response.Certificates[0]) +} + +func resourceAwsDmsCertificateDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dmsconn + + request := &dms.DeleteCertificateInput{ + CertificateArn: aws.String(d.Get("certificate_arn").(string)), + } + + log.Printf("[DEBUG] DMS delete certificate: %#v", request) + + _, err := conn.DeleteCertificate(request) + if err != nil { + return err + } + + return nil +} + +func resourceAwsDmsCertificateSetState(d *schema.ResourceData, cert *dms.Certificate) error { + d.SetId(*cert.CertificateIdentifier) + + d.Set("certificate_id", cert.CertificateIdentifier) + d.Set("certificate_arn", cert.CertificateArn) + + if cert.CertificatePem != nil && *cert.CertificatePem != "" { + d.Set("certificate_pem", cert.CertificatePem) + } + if cert.CertificateWallet != nil && len(cert.CertificateWallet) == 0 { + d.Set("certificate_wallet", cert.CertificateWallet) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_endpoint.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_endpoint.go new file mode 100644 index 000000000..586ed9f7c --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_endpoint.go @@ -0,0 +1,307 @@ +package aws + +import ( + "log" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" +) + +func resourceAwsDmsEndpoint() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDmsEndpointCreate, + Read: resourceAwsDmsEndpointRead, + Update: resourceAwsDmsEndpointUpdate, + Delete: resourceAwsDmsEndpointDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "certificate_arn": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: validateArn, + }, + "database_name": { + Type: schema.TypeString, + Optional: true, + }, + "endpoint_arn": { + Type: schema.TypeString, + Computed: true, + }, + "endpoint_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateDmsEndpointId, + }, + "endpoint_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "source", + "target", + }, false), + }, + "engine_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "mysql", + "oracle", + "postgres", + "mariadb", + "aurora", + "redshift", + "sybase", + "sqlserver", + }, false), + }, + "extra_connection_attributes": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + "kms_key_arn": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + "password": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + }, + "port": { + Type: schema.TypeInt, + Required: true, + }, + "server_name": { + Type: schema.TypeString, + Required: true, + }, + "ssl_mode": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "none", + "require", + "verify-ca", + "verify-full", + }, false), + }, + "tags": { + Type: schema.TypeMap, + Optional: true, + }, + "username": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceAwsDmsEndpointCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dmsconn + + request := &dms.CreateEndpointInput{ + EndpointIdentifier: aws.String(d.Get("endpoint_id").(string)), + EndpointType: aws.String(d.Get("endpoint_type").(string)), + EngineName: aws.String(d.Get("engine_name").(string)), + Password: aws.String(d.Get("password").(string)), + Port: aws.Int64(int64(d.Get("port").(int))), + ServerName: aws.String(d.Get("server_name").(string)), + Tags: dmsTagsFromMap(d.Get("tags").(map[string]interface{})), + Username: aws.String(d.Get("username").(string)), + } + + if v, ok := d.GetOk("database_name"); ok { + request.DatabaseName = aws.String(v.(string)) + } + if v, ok := d.GetOk("certificate_arn"); ok { + request.CertificateArn = aws.String(v.(string)) + } + if v, ok := d.GetOk("extra_connection_attributes"); ok { + request.ExtraConnectionAttributes = aws.String(v.(string)) + } + if v, ok := d.GetOk("kms_key_arn"); ok { + request.KmsKeyId = aws.String(v.(string)) + } + if v, ok := d.GetOk("ssl_mode"); ok { + request.SslMode = aws.String(v.(string)) + } + + log.Println("[DEBUG] DMS create endpoint:", request) + + _, err := conn.CreateEndpoint(request) + if err != nil { + return err + } + + d.SetId(d.Get("endpoint_id").(string)) + return resourceAwsDmsEndpointRead(d, meta) +} + +func resourceAwsDmsEndpointRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dmsconn + + response, err := conn.DescribeEndpoints(&dms.DescribeEndpointsInput{ + Filters: []*dms.Filter{ + { + Name: aws.String("endpoint-id"), + Values: []*string{aws.String(d.Id())}, // Must use d.Id() to work with import. + }, + }, + }) + if err != nil { + if dmserr, ok := err.(awserr.Error); ok && dmserr.Code() == "ResourceNotFoundFault" { + log.Printf("[DEBUG] DMS Replication Endpoint %q Not Found", d.Id()) + d.SetId("") + return nil + } + return err + } + + err = resourceAwsDmsEndpointSetState(d, response.Endpoints[0]) + if err != nil { + return err + } + + tagsResp, err := conn.ListTagsForResource(&dms.ListTagsForResourceInput{ + ResourceArn: aws.String(d.Get("endpoint_arn").(string)), + }) + if err != nil { + return err + } + d.Set("tags", dmsTagsToMap(tagsResp.TagList)) + + return nil +} + +func resourceAwsDmsEndpointUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dmsconn + + request := &dms.ModifyEndpointInput{ + EndpointArn: aws.String(d.Get("endpoint_arn").(string)), + } + hasChanges := false + + if d.HasChange("certificate_arn") { + request.CertificateArn = aws.String(d.Get("certificate_arn").(string)) + hasChanges = true + } + + if d.HasChange("database_name") { + request.DatabaseName = aws.String(d.Get("database_name").(string)) + hasChanges = true + } + + if d.HasChange("endpoint_type") { + request.EndpointType = aws.String(d.Get("endpoint_type").(string)) + hasChanges = true + } + + if d.HasChange("engine_name") { + request.EngineName = aws.String(d.Get("engine_name").(string)) + hasChanges = true + } + + if d.HasChange("extra_connection_attributes") { + request.ExtraConnectionAttributes = aws.String(d.Get("extra_connection_attributes").(string)) + hasChanges = true + } + + if d.HasChange("password") { + request.Password = aws.String(d.Get("password").(string)) + hasChanges = true + } + + if d.HasChange("port") { + request.Port = aws.Int64(int64(d.Get("port").(int))) + hasChanges = true + } + + if d.HasChange("server_name") { + request.ServerName = aws.String(d.Get("server_name").(string)) + hasChanges = true + } + + if d.HasChange("ssl_mode") { + request.SslMode = aws.String(d.Get("ssl_mode").(string)) + hasChanges = true + } + + if d.HasChange("username") { + request.Username = aws.String(d.Get("username").(string)) + hasChanges = true + } + + if d.HasChange("tags") { + err := dmsSetTags(d.Get("endpoint_arn").(string), d, meta) + if err != nil { + return err + } + } + + if hasChanges { + log.Println("[DEBUG] DMS update endpoint:", request) + + _, err := conn.ModifyEndpoint(request) + if err != nil { + return err + } + + return resourceAwsDmsEndpointRead(d, meta) + } + + return nil +} + +func resourceAwsDmsEndpointDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dmsconn + + request := &dms.DeleteEndpointInput{ + EndpointArn: aws.String(d.Get("endpoint_arn").(string)), + } + + log.Printf("[DEBUG] DMS delete endpoint: %#v", request) + + _, err := conn.DeleteEndpoint(request) + if err != nil { + return err + } + + return nil +} + +func resourceAwsDmsEndpointSetState(d *schema.ResourceData, endpoint *dms.Endpoint) error { + d.SetId(*endpoint.EndpointIdentifier) + + d.Set("certificate_arn", endpoint.CertificateArn) + d.Set("database_name", endpoint.DatabaseName) + d.Set("endpoint_arn", endpoint.EndpointArn) + d.Set("endpoint_id", endpoint.EndpointIdentifier) + // For some reason the AWS API only accepts lowercase type but returns it as uppercase + d.Set("endpoint_type", strings.ToLower(*endpoint.EndpointType)) + d.Set("engine_name", endpoint.EngineName) + d.Set("extra_connection_attributes", endpoint.ExtraConnectionAttributes) + d.Set("kms_key_arn", endpoint.KmsKeyId) + d.Set("port", endpoint.Port) + d.Set("server_name", endpoint.ServerName) + d.Set("ssl_mode", endpoint.SslMode) + d.Set("username", endpoint.Username) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_instance.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_instance.go new file mode 100644 index 000000000..f0b0a3aed --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_instance.go @@ -0,0 +1,433 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsDmsReplicationInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDmsReplicationInstanceCreate, + Read: resourceAwsDmsReplicationInstanceRead, + Update: resourceAwsDmsReplicationInstanceUpdate, + Delete: resourceAwsDmsReplicationInstanceDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "allocated_storage": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ValidateFunc: validateIntegerInRange(5, 6144), + }, + "apply_immediately": { + Type: schema.TypeBool, + Optional: true, + }, + "auto_minor_version_upgrade": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + }, + "availability_zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "engine_version": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + "kms_key_arn": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + "multi_az": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + }, + "preferred_maintenance_window": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: validateOnceAWeekWindowFormat, + }, + "publicly_accessible": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + }, + "replication_instance_arn": { + Type: schema.TypeString, + Computed: true, + }, + "replication_instance_class": { + Type: schema.TypeString, + Required: true, + // Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | + // dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge + }, + "replication_instance_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateDmsReplicationInstanceId, + }, + "replication_instance_private_ips": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + "replication_instance_public_ips": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + "replication_subnet_group_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "tags": { + Type: schema.TypeMap, + Optional: true, + }, + "vpc_security_group_ids": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Computed: true, + Optional: true, + }, + }, + } +} + +func resourceAwsDmsReplicationInstanceCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dmsconn + + request := &dms.CreateReplicationInstanceInput{ + AutoMinorVersionUpgrade: aws.Bool(d.Get("auto_minor_version_upgrade").(bool)), + PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), + ReplicationInstanceClass: aws.String(d.Get("replication_instance_class").(string)), + ReplicationInstanceIdentifier: aws.String(d.Get("replication_instance_id").(string)), + Tags: dmsTagsFromMap(d.Get("tags").(map[string]interface{})), + } + + // WARNING: GetOk returns the zero value for the type if the key is omitted in config. This means for optional + // keys that the zero value is valid we cannot know if the zero value was in the config and cannot allow the API + // to set the default value. See GitHub Issue #5694 https://github.com/hashicorp/terraform/issues/5694 + + if v, ok := d.GetOk("allocated_storage"); ok { + request.AllocatedStorage = aws.Int64(int64(v.(int))) + } + if v, ok := d.GetOk("engine_version"); ok { + request.EngineVersion = aws.String(v.(string)) + } + if v, ok := d.GetOk("kms_key_arn"); ok { + request.KmsKeyId = aws.String(v.(string)) + } + if v, ok := d.GetOk("preferred_maintenance_window"); ok { + request.PreferredMaintenanceWindow = aws.String(v.(string)) + } + if v, ok := d.GetOk("replication_subnet_group_id"); ok { + request.ReplicationSubnetGroupIdentifier = aws.String(v.(string)) + } + if v, ok := d.GetOk("vpc_security_group_ids"); ok { + request.VpcSecurityGroupIds = expandStringList(v.(*schema.Set).List()) + } + + az, azSet := d.GetOk("availability_zone") + if azSet { + request.AvailabilityZone = aws.String(az.(string)) + } + + if multiAz, ok := d.GetOk("multi_az"); ok { + request.MultiAZ = aws.Bool(multiAz.(bool)) + + if multiAz.(bool) && azSet { + return fmt.Errorf("Cannot set availability_zone if multi_az is set to true") + } + } + + log.Println("[DEBUG] DMS create replication instance:", request) + + _, err := conn.CreateReplicationInstance(request) + if err != nil { + return err + } + + d.SetId(d.Get("replication_instance_id").(string)) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating"}, + Target: []string{"available"}, + Refresh: resourceAwsDmsReplicationInstanceStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutCreate), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + return resourceAwsDmsReplicationInstanceRead(d, meta) +} + +func resourceAwsDmsReplicationInstanceRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dmsconn + + response, err := conn.DescribeReplicationInstances(&dms.DescribeReplicationInstancesInput{ + Filters: []*dms.Filter{ + { + Name: aws.String("replication-instance-id"), + Values: []*string{aws.String(d.Id())}, // Must use d.Id() to work with import. + }, + }, + }) + if err != nil { + if dmserr, ok := err.(awserr.Error); ok && dmserr.Code() == "ResourceNotFoundFault" { + log.Printf("[DEBUG] DMS Replication Instance %q Not Found", d.Id()) + d.SetId("") + return nil + } + return err + } + + err = resourceAwsDmsReplicationInstanceSetState(d, response.ReplicationInstances[0]) + if err != nil { + return err + } + + tagsResp, err := conn.ListTagsForResource(&dms.ListTagsForResourceInput{ + ResourceArn: aws.String(d.Get("replication_instance_arn").(string)), + }) + if err != nil { + return err + } + d.Set("tags", dmsTagsToMap(tagsResp.TagList)) + + return nil +} + +func resourceAwsDmsReplicationInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + request := &dms.ModifyReplicationInstanceInput{ + ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), + ReplicationInstanceArn: aws.String(d.Get("replication_instance_arn").(string)), + } + hasChanges := false + + if d.HasChange("auto_minor_version_upgrade") { + request.AutoMinorVersionUpgrade = aws.Bool(d.Get("auto_minor_version_upgrade").(bool)) + hasChanges = true + } + + if d.HasChange("allocated_storage") { + if v, ok := d.GetOk("allocated_storage"); ok { + request.AllocatedStorage = aws.Int64(int64(v.(int))) + hasChanges = true + } + } + + if d.HasChange("engine_version") { + if v, ok := d.GetOk("engine_version"); ok { + request.ReplicationInstanceClass = aws.String(v.(string)) + hasChanges = true + } + } + + if d.HasChange("multi_az") { + if v, ok := d.GetOk("multi_az"); ok { + request.MultiAZ = aws.Bool(v.(bool)) + hasChanges = true + } + } + + if d.HasChange("preferred_maintenance_window") { + if v, ok := d.GetOk("preferred_maintenance_window"); ok { + request.PreferredMaintenanceWindow = aws.String(v.(string)) + hasChanges = true + } + } + + if d.HasChange("replication_instance_class") { + if v, ok := d.GetOk("replication_instance_class"); ok { + request.ReplicationInstanceClass = aws.String(v.(string)) + hasChanges = true + } + } + + if d.HasChange("vpc_security_group_ids") { + if v, ok := d.GetOk("vpc_security_group_ids"); ok { + request.VpcSecurityGroupIds = expandStringList(v.(*schema.Set).List()) + hasChanges = true + } + } + + if d.HasChange("tags") { + err := dmsSetTags(d.Get("replication_instance_arn").(string), d, meta) + if err != nil { + return err + } + } + + if hasChanges { + conn := meta.(*AWSClient).dmsconn + + _, err := conn.ModifyReplicationInstance(request) + if err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"modifying"}, + Target: []string{"available"}, + Refresh: resourceAwsDmsReplicationInstanceStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutUpdate), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + return resourceAwsDmsReplicationInstanceRead(d, meta) + } + + return nil +} + +func resourceAwsDmsReplicationInstanceDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dmsconn + + request := &dms.DeleteReplicationInstanceInput{ + ReplicationInstanceArn: aws.String(d.Get("replication_instance_arn").(string)), + } + + log.Printf("[DEBUG] DMS delete replication instance: %#v", request) + + _, err := conn.DeleteReplicationInstance(request) + if err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting"}, + Target: []string{}, + Refresh: resourceAwsDmsReplicationInstanceStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutDelete), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + return nil +} + +func resourceAwsDmsReplicationInstanceSetState(d *schema.ResourceData, instance *dms.ReplicationInstance) error { + d.SetId(*instance.ReplicationInstanceIdentifier) + + d.Set("replication_instance_id", instance.ReplicationInstanceIdentifier) + d.Set("allocated_storage", instance.AllocatedStorage) + d.Set("auto_minor_version_upgrade", instance.AutoMinorVersionUpgrade) + d.Set("availability_zone", instance.AvailabilityZone) + d.Set("engine_version", instance.EngineVersion) + d.Set("kms_key_arn", instance.KmsKeyId) + d.Set("multi_az", instance.MultiAZ) + d.Set("preferred_maintenance_window", instance.PreferredMaintenanceWindow) + d.Set("publicly_accessible", instance.PubliclyAccessible) + d.Set("replication_instance_arn", instance.ReplicationInstanceArn) + d.Set("replication_instance_class", instance.ReplicationInstanceClass) + d.Set("replication_subnet_group_id", instance.ReplicationSubnetGroup.ReplicationSubnetGroupIdentifier) + + vpc_security_group_ids := []string{} + for _, sg := range instance.VpcSecurityGroups { + vpc_security_group_ids = append(vpc_security_group_ids, aws.StringValue(sg.VpcSecurityGroupId)) + } + + d.Set("vpc_security_group_ids", vpc_security_group_ids) + + private_ip_addresses := []string{} + for _, ip := range instance.ReplicationInstancePrivateIpAddresses { + private_ip_addresses = append(private_ip_addresses, aws.StringValue(ip)) + } + + d.Set("replication_instance_private_ips", private_ip_addresses) + + public_ip_addresses := []string{} + for _, ip := range instance.ReplicationInstancePublicIpAddresses { + public_ip_addresses = append(public_ip_addresses, aws.StringValue(ip)) + } + + d.Set("replication_instance_public_ips", public_ip_addresses) + + return nil +} + +func resourceAwsDmsReplicationInstanceStateRefreshFunc( + d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + conn := meta.(*AWSClient).dmsconn + + v, err := conn.DescribeReplicationInstances(&dms.DescribeReplicationInstancesInput{ + Filters: []*dms.Filter{ + { + Name: aws.String("replication-instance-id"), + Values: []*string{aws.String(d.Id())}, // Must use d.Id() to work with import. + }, + }, + }) + if err != nil { + if dmserr, ok := err.(awserr.Error); ok && dmserr.Code() == "ResourceNotFoundFault" { + return nil, "", nil + } + log.Printf("Error on retrieving DMS Replication Instance when waiting: %s", err) + return nil, "", err + } + + if v == nil { + return nil, "", nil + } + + if v.ReplicationInstances == nil { + return nil, "", fmt.Errorf("Error on retrieving DMS Replication Instance when waiting for State") + } + + return v, *v.ReplicationInstances[0].ReplicationInstanceStatus, nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_subnet_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_subnet_group.go new file mode 100644 index 000000000..b28165308 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_subnet_group.go @@ -0,0 +1,179 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsDmsReplicationSubnetGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDmsReplicationSubnetGroupCreate, + Read: resourceAwsDmsReplicationSubnetGroupRead, + Update: resourceAwsDmsReplicationSubnetGroupUpdate, + Delete: resourceAwsDmsReplicationSubnetGroupDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "replication_subnet_group_arn": { + Type: schema.TypeString, + Computed: true, + }, + "replication_subnet_group_description": { + Type: schema.TypeString, + Required: true, + }, + "replication_subnet_group_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateDmsReplicationSubnetGroupId, + }, + "subnet_ids": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Required: true, + }, + "tags": { + Type: schema.TypeMap, + Optional: true, + }, + "vpc_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsDmsReplicationSubnetGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dmsconn + + request := &dms.CreateReplicationSubnetGroupInput{ + ReplicationSubnetGroupIdentifier: aws.String(d.Get("replication_subnet_group_id").(string)), + ReplicationSubnetGroupDescription: aws.String(d.Get("replication_subnet_group_description").(string)), + SubnetIds: expandStringList(d.Get("subnet_ids").(*schema.Set).List()), + Tags: dmsTagsFromMap(d.Get("tags").(map[string]interface{})), + } + + log.Println("[DEBUG] DMS create replication subnet group:", request) + + _, err := conn.CreateReplicationSubnetGroup(request) + if err != nil { + return err + } + + d.SetId(d.Get("replication_subnet_group_id").(string)) + return resourceAwsDmsReplicationSubnetGroupRead(d, meta) +} + +func resourceAwsDmsReplicationSubnetGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dmsconn + + response, err := conn.DescribeReplicationSubnetGroups(&dms.DescribeReplicationSubnetGroupsInput{ + Filters: []*dms.Filter{ + { + Name: aws.String("replication-subnet-group-id"), + Values: []*string{aws.String(d.Id())}, // Must use d.Id() to work with import. + }, + }, + }) + if err != nil { + return err + } + if len(response.ReplicationSubnetGroups) == 0 { + d.SetId("") + return nil + } + + // The AWS API for DMS subnet groups does not return the ARN which is required to + // retrieve tags. This ARN can be built. + d.Set("replication_subnet_group_arn", fmt.Sprintf("arn:aws:dms:%s:%s:subgrp:%s", + meta.(*AWSClient).region, meta.(*AWSClient).accountid, d.Id())) + + err = resourceAwsDmsReplicationSubnetGroupSetState(d, response.ReplicationSubnetGroups[0]) + if err != nil { + return err + } + + tagsResp, err := conn.ListTagsForResource(&dms.ListTagsForResourceInput{ + ResourceArn: aws.String(d.Get("replication_subnet_group_arn").(string)), + }) + if err != nil { + return err + } + d.Set("tags", dmsTagsToMap(tagsResp.TagList)) + + return nil +} + +func resourceAwsDmsReplicationSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dmsconn + + // Updates to subnet groups are only valid when sending SubnetIds even if there are no + // changes to SubnetIds. + request := &dms.ModifyReplicationSubnetGroupInput{ + ReplicationSubnetGroupIdentifier: aws.String(d.Get("replication_subnet_group_id").(string)), + SubnetIds: expandStringList(d.Get("subnet_ids").(*schema.Set).List()), + } + + if d.HasChange("replication_subnet_group_description") { + request.ReplicationSubnetGroupDescription = aws.String(d.Get("replication_subnet_group_description").(string)) + } + + if d.HasChange("tags") { + err := dmsSetTags(d.Get("replication_subnet_group_arn").(string), d, meta) + if err != nil { + return err + } + } + + log.Println("[DEBUG] DMS update replication subnet group:", request) + + _, err := conn.ModifyReplicationSubnetGroup(request) + if err != nil { + return err + } + + return resourceAwsDmsReplicationSubnetGroupRead(d, meta) +} + +func resourceAwsDmsReplicationSubnetGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dmsconn + + request := &dms.DeleteReplicationSubnetGroupInput{ + ReplicationSubnetGroupIdentifier: aws.String(d.Get("replication_subnet_group_id").(string)), + } + + log.Printf("[DEBUG] DMS delete replication subnet group: %#v", request) + + _, err := conn.DeleteReplicationSubnetGroup(request) + if err != nil { + return err + } + + return nil +} + +func resourceAwsDmsReplicationSubnetGroupSetState(d *schema.ResourceData, group *dms.ReplicationSubnetGroup) error { + d.SetId(*group.ReplicationSubnetGroupIdentifier) + + subnet_ids := []string{} + for _, subnet := range group.Subnets { + subnet_ids = append(subnet_ids, aws.StringValue(subnet.SubnetIdentifier)) + } + + d.Set("replication_subnet_group_description", group.ReplicationSubnetGroupDescription) + d.Set("replication_subnet_group_id", group.ReplicationSubnetGroupIdentifier) + d.Set("subnet_ids", subnet_ids) + d.Set("vpc_id", group.VpcId) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_task.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_task.go new file mode 100644 index 000000000..ab10eedbc --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dms_replication_task.go @@ -0,0 +1,331 @@ +package aws + +import ( + "fmt" + "log" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" +) + +func resourceAwsDmsReplicationTask() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDmsReplicationTaskCreate, + Read: resourceAwsDmsReplicationTaskRead, + Update: resourceAwsDmsReplicationTaskUpdate, + Delete: resourceAwsDmsReplicationTaskDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "cdc_start_time": { + Type: schema.TypeString, + Optional: true, + // Requires a Unix timestamp in seconds. Example 1484346880 + }, + "migration_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "full-load", + "cdc", + "full-load-and-cdc", + }, false), + }, + "replication_instance_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + "replication_task_arn": { + Type: schema.TypeString, + Computed: true, + }, + "replication_task_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateDmsReplicationTaskId, + }, + "replication_task_settings": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateJsonString, + DiffSuppressFunc: suppressEquivalentJsonDiffs, + }, + "source_endpoint_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + "table_mappings": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateJsonString, + DiffSuppressFunc: suppressEquivalentJsonDiffs, + }, + "tags": { + Type: schema.TypeMap, + Optional: true, + }, + "target_endpoint_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + }, + } +} + +func resourceAwsDmsReplicationTaskCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dmsconn + + request := &dms.CreateReplicationTaskInput{ + MigrationType: aws.String(d.Get("migration_type").(string)), + ReplicationInstanceArn: aws.String(d.Get("replication_instance_arn").(string)), + ReplicationTaskIdentifier: aws.String(d.Get("replication_task_id").(string)), + SourceEndpointArn: aws.String(d.Get("source_endpoint_arn").(string)), + TableMappings: aws.String(d.Get("table_mappings").(string)), + Tags: dmsTagsFromMap(d.Get("tags").(map[string]interface{})), + TargetEndpointArn: aws.String(d.Get("target_endpoint_arn").(string)), + } + + if v, ok := d.GetOk("cdc_start_time"); ok { + seconds, err := strconv.ParseInt(v.(string), 10, 64) + if err != nil { + return fmt.Errorf("[ERROR] DMS create replication task. Invalid CDC Unix timestamp: %s", err) + } + request.CdcStartTime = aws.Time(time.Unix(seconds, 0)) + } + + if v, ok := d.GetOk("replication_task_settings"); ok { + request.ReplicationTaskSettings = aws.String(v.(string)) + } + + log.Println("[DEBUG] DMS create replication task:", request) + + _, err := conn.CreateReplicationTask(request) + if err != nil { + return err + } + + taskId := d.Get("replication_task_id").(string) + d.SetId(taskId) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating"}, + Target: []string{"ready"}, + Refresh: resourceAwsDmsReplicationTaskStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutCreate), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + return resourceAwsDmsReplicationTaskRead(d, meta) +} + +func resourceAwsDmsReplicationTaskRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dmsconn + + response, err := conn.DescribeReplicationTasks(&dms.DescribeReplicationTasksInput{ + Filters: []*dms.Filter{ + { + Name: aws.String("replication-task-id"), + Values: []*string{aws.String(d.Id())}, // Must use d.Id() to work with import. + }, + }, + }) + if err != nil { + if dmserr, ok := err.(awserr.Error); ok && dmserr.Code() == "ResourceNotFoundFault" { + log.Printf("[DEBUG] DMS Replication Task %q Not Found", d.Id()) + d.SetId("") + return nil + } + return err + } + + err = resourceAwsDmsReplicationTaskSetState(d, response.ReplicationTasks[0]) + if err != nil { + return err + } + + tagsResp, err := conn.ListTagsForResource(&dms.ListTagsForResourceInput{ + ResourceArn: aws.String(d.Get("replication_task_arn").(string)), + }) + if err != nil { + return err + } + d.Set("tags", dmsTagsToMap(tagsResp.TagList)) + + return nil +} + +func resourceAwsDmsReplicationTaskUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dmsconn + + request := &dms.ModifyReplicationTaskInput{ + ReplicationTaskArn: aws.String(d.Get("replication_task_arn").(string)), + } + hasChanges := false + + if d.HasChange("cdc_start_time") { + seconds, err := strconv.ParseInt(d.Get("cdc_start_time").(string), 10, 64) + if err != nil { + return fmt.Errorf("[ERROR] DMS update replication task. Invalid CRC Unix timestamp: %s", err) + } + request.CdcStartTime = aws.Time(time.Unix(seconds, 0)) + hasChanges = true + } + + if d.HasChange("migration_type") { + request.MigrationType = aws.String(d.Get("migration_type").(string)) + hasChanges = true + } + + if d.HasChange("replication_task_settings") { + request.ReplicationTaskSettings = aws.String(d.Get("replication_task_settings").(string)) + hasChanges = true + } + + if d.HasChange("table_mappings") { + request.TableMappings = aws.String(d.Get("table_mappings").(string)) + hasChanges = true + } + + if d.HasChange("tags") { + err := dmsSetTags(d.Get("replication_task_arn").(string), d, meta) + if err != nil { + return err + } + } + + if hasChanges { + log.Println("[DEBUG] DMS update replication task:", request) + + _, err := conn.ModifyReplicationTask(request) + if err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"modifying"}, + Target: []string{"ready", "stopped", "failed"}, + Refresh: resourceAwsDmsReplicationTaskStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutCreate), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + return resourceAwsDmsReplicationTaskRead(d, meta) + } + + return nil +} + +func resourceAwsDmsReplicationTaskDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dmsconn + + request := &dms.DeleteReplicationTaskInput{ + ReplicationTaskArn: aws.String(d.Get("replication_task_arn").(string)), + } + + log.Printf("[DEBUG] DMS delete replication task: %#v", request) + + _, err := conn.DeleteReplicationTask(request) + if err != nil { + if dmserr, ok := err.(awserr.Error); ok && dmserr.Code() == "ResourceNotFoundFault" { + log.Printf("[DEBUG] DMS Replication Task %q Not Found", d.Id()) + d.SetId("") + return nil + } + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting"}, + Target: []string{}, + Refresh: resourceAwsDmsReplicationTaskStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutCreate), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + return nil +} + +func resourceAwsDmsReplicationTaskSetState(d *schema.ResourceData, task *dms.ReplicationTask) error { + d.SetId(*task.ReplicationTaskIdentifier) + + d.Set("migration_type", task.MigrationType) + d.Set("replication_instance_arn", task.ReplicationInstanceArn) + d.Set("replication_task_arn", task.ReplicationTaskArn) + d.Set("replication_task_id", task.ReplicationTaskIdentifier) + d.Set("replication_task_settings", task.ReplicationTaskSettings) + d.Set("source_endpoint_arn", task.SourceEndpointArn) + d.Set("table_mappings", task.TableMappings) + d.Set("target_endpoint_arn", task.TargetEndpointArn) + + return nil +} + +func resourceAwsDmsReplicationTaskStateRefreshFunc( + d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + conn := meta.(*AWSClient).dmsconn + + v, err := conn.DescribeReplicationTasks(&dms.DescribeReplicationTasksInput{ + Filters: []*dms.Filter{ + { + Name: aws.String("replication-task-id"), + Values: []*string{aws.String(d.Id())}, // Must use d.Id() to work with import. + }, + }, + }) + if err != nil { + if dmserr, ok := err.(awserr.Error); ok && dmserr.Code() == "ResourceNotFoundFault" { + return nil, "", nil + } + log.Printf("Error on retrieving DMS Replication Task when waiting: %s", err) + return nil, "", err + } + + if v == nil { + return nil, "", nil + } + + if v.ReplicationTasks != nil { + log.Printf("[DEBUG] DMS Replication Task status for instance %s: %s", d.Id(), *v.ReplicationTasks[0].Status) + } + + return v, *v.ReplicationTasks[0].Status, nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_table.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_table.go new file mode 100644 index 000000000..2644f164d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_table.go @@ -0,0 +1,1087 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/hashicorp/terraform/helper/hashcode" +) + +// Number of times to retry if a throttling-related exception occurs +const DYNAMODB_MAX_THROTTLE_RETRIES = 5 + +// How long to sleep when a throttle-event happens +const DYNAMODB_THROTTLE_SLEEP = 5 * time.Second + +// How long to sleep if a limit-exceeded event happens +const DYNAMODB_LIMIT_EXCEEDED_SLEEP = 10 * time.Second + +// A number of these are marked as computed because if you don't +// provide a value, DynamoDB will provide you with defaults (which are the +// default values specified below) +func resourceAwsDynamoDbTable() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDynamoDbTableCreate, + Read: resourceAwsDynamoDbTableRead, + Update: resourceAwsDynamoDbTableUpdate, + Delete: resourceAwsDynamoDbTableDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + SchemaVersion: 1, + MigrateState: resourceAwsDynamoDbTableMigrateState, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "hash_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "range_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "write_capacity": { + Type: schema.TypeInt, + Required: true, + }, + "read_capacity": { + Type: schema.TypeInt, + Required: true, + }, + "attribute": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + return hashcode.String(buf.String()) + }, + }, + "ttl": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attribute_name": { + Type: schema.TypeString, + Required: true, + }, + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "local_secondary_index": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "range_key": { + Type: schema.TypeString, + Required: true, + }, + "projection_type": { + Type: schema.TypeString, + Required: true, + }, + "non_key_attributes": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + return hashcode.String(buf.String()) + }, + }, + "global_secondary_index": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "write_capacity": { + Type: schema.TypeInt, + Required: true, + }, + "read_capacity": { + Type: schema.TypeInt, + Required: true, + }, + "hash_key": { + Type: schema.TypeString, + Required: true, + }, + "range_key": { + Type: schema.TypeString, + Optional: true, + }, + "projection_type": { + Type: schema.TypeString, + Required: true, + }, + "non_key_attributes": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "stream_enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "stream_view_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + StateFunc: func(v interface{}) string { + value := v.(string) + return strings.ToUpper(value) + }, + ValidateFunc: validateStreamViewType, + }, + "stream_arn": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchema(), + }, + } +} + +func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) error { + dynamodbconn := meta.(*AWSClient).dynamodbconn + + name := d.Get("name").(string) + + log.Printf("[DEBUG] DynamoDB table create: %s", name) + + throughput := &dynamodb.ProvisionedThroughput{ + ReadCapacityUnits: aws.Int64(int64(d.Get("read_capacity").(int))), + WriteCapacityUnits: aws.Int64(int64(d.Get("write_capacity").(int))), + } + + hash_key_name := d.Get("hash_key").(string) + keyschema := []*dynamodb.KeySchemaElement{ + { + AttributeName: aws.String(hash_key_name), + KeyType: aws.String("HASH"), + }, + } + + if range_key, ok := d.GetOk("range_key"); ok { + range_schema_element := &dynamodb.KeySchemaElement{ + AttributeName: aws.String(range_key.(string)), + KeyType: aws.String("RANGE"), + } + keyschema = append(keyschema, range_schema_element) + } + + req := &dynamodb.CreateTableInput{ + TableName: aws.String(name), + ProvisionedThroughput: throughput, + KeySchema: keyschema, + } + + if attributedata, ok := d.GetOk("attribute"); ok { + attributes := []*dynamodb.AttributeDefinition{} + attributeSet := attributedata.(*schema.Set) + for _, attribute := range attributeSet.List() { + attr := attribute.(map[string]interface{}) + attributes = append(attributes, &dynamodb.AttributeDefinition{ + AttributeName: aws.String(attr["name"].(string)), + AttributeType: aws.String(attr["type"].(string)), + }) + } + + req.AttributeDefinitions = attributes + } + + if lsidata, ok := d.GetOk("local_secondary_index"); ok { + log.Printf("[DEBUG] Adding LSI data to the table") + + lsiSet := lsidata.(*schema.Set) + localSecondaryIndexes := []*dynamodb.LocalSecondaryIndex{} + for _, lsiObject := range lsiSet.List() { + lsi := lsiObject.(map[string]interface{}) + + projection := &dynamodb.Projection{ + ProjectionType: aws.String(lsi["projection_type"].(string)), + } + + if lsi["projection_type"] == "INCLUDE" { + non_key_attributes := []*string{} + for _, attr := range lsi["non_key_attributes"].([]interface{}) { + non_key_attributes = append(non_key_attributes, aws.String(attr.(string))) + } + projection.NonKeyAttributes = non_key_attributes + } + + localSecondaryIndexes = append(localSecondaryIndexes, &dynamodb.LocalSecondaryIndex{ + IndexName: aws.String(lsi["name"].(string)), + KeySchema: []*dynamodb.KeySchemaElement{ + { + AttributeName: aws.String(hash_key_name), + KeyType: aws.String("HASH"), + }, + { + AttributeName: aws.String(lsi["range_key"].(string)), + KeyType: aws.String("RANGE"), + }, + }, + Projection: projection, + }) + } + + req.LocalSecondaryIndexes = localSecondaryIndexes + + log.Printf("[DEBUG] Added %d LSI definitions", len(localSecondaryIndexes)) + } + + if gsidata, ok := d.GetOk("global_secondary_index"); ok { + globalSecondaryIndexes := []*dynamodb.GlobalSecondaryIndex{} + + gsiSet := gsidata.(*schema.Set) + for _, gsiObject := range gsiSet.List() { + gsi := gsiObject.(map[string]interface{}) + gsiObject := createGSIFromData(&gsi) + globalSecondaryIndexes = append(globalSecondaryIndexes, &gsiObject) + } + req.GlobalSecondaryIndexes = globalSecondaryIndexes + } + + if _, ok := d.GetOk("stream_enabled"); ok { + + req.StreamSpecification = &dynamodb.StreamSpecification{ + StreamEnabled: aws.Bool(d.Get("stream_enabled").(bool)), + StreamViewType: aws.String(d.Get("stream_view_type").(string)), + } + + log.Printf("[DEBUG] Adding StreamSpecifications to the table") + } + + _, timeToLiveOk := d.GetOk("ttl") + _, tagsOk := d.GetOk("tags") + + attemptCount := 1 + for attemptCount <= DYNAMODB_MAX_THROTTLE_RETRIES { + output, err := dynamodbconn.CreateTable(req) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + switch code := awsErr.Code(); code { + case "ThrottlingException": + log.Printf("[DEBUG] Attempt %d/%d: Sleeping for a bit to throttle back create request", attemptCount, DYNAMODB_MAX_THROTTLE_RETRIES) + time.Sleep(DYNAMODB_THROTTLE_SLEEP) + attemptCount += 1 + case "LimitExceededException": + // If we're at resource capacity, error out without retry + if strings.Contains(awsErr.Message(), "Subscriber limit exceeded:") { + return fmt.Errorf("AWS Error creating DynamoDB table: %s", err) + } + log.Printf("[DEBUG] Limit on concurrent table creations hit, sleeping for a bit") + time.Sleep(DYNAMODB_LIMIT_EXCEEDED_SLEEP) + attemptCount += 1 + default: + // Some other non-retryable exception occurred + return fmt.Errorf("AWS Error creating DynamoDB table: %s", err) + } + } else { + // Non-AWS exception occurred, give up + return fmt.Errorf("Error creating DynamoDB table: %s", err) + } + } else { + // No error, set ID and return + d.SetId(*output.TableDescription.TableName) + tableArn := *output.TableDescription.TableArn + if err := d.Set("arn", tableArn); err != nil { + return err + } + + // Wait, till table is active before imitating any TimeToLive changes + if err := waitForTableToBeActive(d.Id(), meta); err != nil { + log.Printf("[DEBUG] Error waiting for table to be active: %s", err) + return err + } + + log.Printf("[DEBUG] Setting DynamoDB TimeToLive on arn: %s", tableArn) + if timeToLiveOk { + if err := updateTimeToLive(d, meta); err != nil { + log.Printf("[DEBUG] Error updating table TimeToLive: %s", err) + return err + } + } + + if tagsOk { + log.Printf("[DEBUG] Setting DynamoDB Tags on arn: %s", tableArn) + if err := createTableTags(d, meta); err != nil { + return err + } + } + + return resourceAwsDynamoDbTableRead(d, meta) + } + } + + // Too many throttling events occurred, give up + return fmt.Errorf("Unable to create DynamoDB table '%s' after %d attempts", name, attemptCount) +} + +func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) error { + + log.Printf("[DEBUG] Updating DynamoDB table %s", d.Id()) + dynamodbconn := meta.(*AWSClient).dynamodbconn + + // Ensure table is active before trying to update + if err := waitForTableToBeActive(d.Id(), meta); err != nil { + return errwrap.Wrapf("Error waiting for Dynamo DB Table update: {{err}}", err) + } + + if d.HasChange("read_capacity") || d.HasChange("write_capacity") { + req := &dynamodb.UpdateTableInput{ + TableName: aws.String(d.Id()), + } + + throughput := &dynamodb.ProvisionedThroughput{ + ReadCapacityUnits: aws.Int64(int64(d.Get("read_capacity").(int))), + WriteCapacityUnits: aws.Int64(int64(d.Get("write_capacity").(int))), + } + req.ProvisionedThroughput = throughput + + _, err := dynamodbconn.UpdateTable(req) + + if err != nil { + return err + } + + if err := waitForTableToBeActive(d.Id(), meta); err != nil { + return errwrap.Wrapf("Error waiting for Dynamo DB Table update: {{err}}", err) + } + } + + if d.HasChange("stream_enabled") || d.HasChange("stream_view_type") { + req := &dynamodb.UpdateTableInput{ + TableName: aws.String(d.Id()), + } + + req.StreamSpecification = &dynamodb.StreamSpecification{ + StreamEnabled: aws.Bool(d.Get("stream_enabled").(bool)), + StreamViewType: aws.String(d.Get("stream_view_type").(string)), + } + + _, err := dynamodbconn.UpdateTable(req) + + if err != nil { + return err + } + + if err := waitForTableToBeActive(d.Id(), meta); err != nil { + return errwrap.Wrapf("Error waiting for Dynamo DB Table update: {{err}}", err) + } + } + + if d.HasChange("global_secondary_index") { + log.Printf("[DEBUG] Changed GSI data") + req := &dynamodb.UpdateTableInput{ + TableName: aws.String(d.Id()), + } + + o, n := d.GetChange("global_secondary_index") + + oldSet := o.(*schema.Set) + newSet := n.(*schema.Set) + + // Track old names so we can know which ones we need to just update based on + // capacity changes, terraform appears to only diff on the set hash, not the + // contents so we need to make sure we don't delete any indexes that we + // just want to update the capacity for + oldGsiNameSet := make(map[string]bool) + newGsiNameSet := make(map[string]bool) + + for _, gsidata := range oldSet.List() { + gsiName := gsidata.(map[string]interface{})["name"].(string) + oldGsiNameSet[gsiName] = true + } + + for _, gsidata := range newSet.List() { + gsiName := gsidata.(map[string]interface{})["name"].(string) + newGsiNameSet[gsiName] = true + } + + // First determine what's new + for _, newgsidata := range newSet.List() { + updates := []*dynamodb.GlobalSecondaryIndexUpdate{} + newGsiName := newgsidata.(map[string]interface{})["name"].(string) + if _, exists := oldGsiNameSet[newGsiName]; !exists { + attributes := []*dynamodb.AttributeDefinition{} + gsidata := newgsidata.(map[string]interface{}) + gsi := createGSIFromData(&gsidata) + log.Printf("[DEBUG] Adding GSI %s", *gsi.IndexName) + update := &dynamodb.GlobalSecondaryIndexUpdate{ + Create: &dynamodb.CreateGlobalSecondaryIndexAction{ + IndexName: gsi.IndexName, + KeySchema: gsi.KeySchema, + ProvisionedThroughput: gsi.ProvisionedThroughput, + Projection: gsi.Projection, + }, + } + updates = append(updates, update) + + // Hash key is required, range key isn't + hashkey_type, err := getAttributeType(d, *gsi.KeySchema[0].AttributeName) + if err != nil { + return err + } + + attributes = append(attributes, &dynamodb.AttributeDefinition{ + AttributeName: gsi.KeySchema[0].AttributeName, + AttributeType: aws.String(hashkey_type), + }) + + // If there's a range key, there will be 2 elements in KeySchema + if len(gsi.KeySchema) == 2 { + rangekey_type, err := getAttributeType(d, *gsi.KeySchema[1].AttributeName) + if err != nil { + return err + } + + attributes = append(attributes, &dynamodb.AttributeDefinition{ + AttributeName: gsi.KeySchema[1].AttributeName, + AttributeType: aws.String(rangekey_type), + }) + } + + req.AttributeDefinitions = attributes + req.GlobalSecondaryIndexUpdates = updates + _, err = dynamodbconn.UpdateTable(req) + + if err != nil { + return err + } + + if err := waitForTableToBeActive(d.Id(), meta); err != nil { + return errwrap.Wrapf("Error waiting for Dynamo DB Table update: {{err}}", err) + } + + if err := waitForGSIToBeActive(d.Id(), *gsi.IndexName, meta); err != nil { + return errwrap.Wrapf("Error waiting for Dynamo DB GSIT to be active: {{err}}", err) + } + + } + } + + for _, oldgsidata := range oldSet.List() { + updates := []*dynamodb.GlobalSecondaryIndexUpdate{} + oldGsiName := oldgsidata.(map[string]interface{})["name"].(string) + if _, exists := newGsiNameSet[oldGsiName]; !exists { + gsidata := oldgsidata.(map[string]interface{}) + log.Printf("[DEBUG] Deleting GSI %s", gsidata["name"].(string)) + update := &dynamodb.GlobalSecondaryIndexUpdate{ + Delete: &dynamodb.DeleteGlobalSecondaryIndexAction{ + IndexName: aws.String(gsidata["name"].(string)), + }, + } + updates = append(updates, update) + + req.GlobalSecondaryIndexUpdates = updates + _, err := dynamodbconn.UpdateTable(req) + + if err != nil { + return err + } + + if err := waitForTableToBeActive(d.Id(), meta); err != nil { + return errwrap.Wrapf("Error waiting for Dynamo DB Table update: {{err}}", err) + } + } + } + } + + // Update any out-of-date read / write capacity + if gsiObjects, ok := d.GetOk("global_secondary_index"); ok { + gsiSet := gsiObjects.(*schema.Set) + if len(gsiSet.List()) > 0 { + log.Printf("Updating capacity as needed!") + + // We can only change throughput, but we need to make sure it's actually changed + tableDescription, err := dynamodbconn.DescribeTable(&dynamodb.DescribeTableInput{ + TableName: aws.String(d.Id()), + }) + + if err != nil { + return err + } + + table := tableDescription.Table + + for _, updatedgsidata := range gsiSet.List() { + updates := []*dynamodb.GlobalSecondaryIndexUpdate{} + gsidata := updatedgsidata.(map[string]interface{}) + gsiName := gsidata["name"].(string) + gsiWriteCapacity := gsidata["write_capacity"].(int) + gsiReadCapacity := gsidata["read_capacity"].(int) + + log.Printf("[DEBUG] Updating GSI %s", gsiName) + gsi, err := getGlobalSecondaryIndex(gsiName, table.GlobalSecondaryIndexes) + + if err != nil { + return err + } + + capacityUpdated := false + + if int64(gsiReadCapacity) != *gsi.ProvisionedThroughput.ReadCapacityUnits || + int64(gsiWriteCapacity) != *gsi.ProvisionedThroughput.WriteCapacityUnits { + capacityUpdated = true + } + + if capacityUpdated { + update := &dynamodb.GlobalSecondaryIndexUpdate{ + Update: &dynamodb.UpdateGlobalSecondaryIndexAction{ + IndexName: aws.String(gsidata["name"].(string)), + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ + WriteCapacityUnits: aws.Int64(int64(gsiWriteCapacity)), + ReadCapacityUnits: aws.Int64(int64(gsiReadCapacity)), + }, + }, + } + updates = append(updates, update) + + } + + if len(updates) > 0 { + + req := &dynamodb.UpdateTableInput{ + TableName: aws.String(d.Id()), + } + + req.GlobalSecondaryIndexUpdates = updates + + log.Printf("[DEBUG] Updating GSI read / write capacity on %s", d.Id()) + _, err := dynamodbconn.UpdateTable(req) + + if err != nil { + log.Printf("[DEBUG] Error updating table: %s", err) + return err + } + + if err := waitForGSIToBeActive(d.Id(), gsiName, meta); err != nil { + return errwrap.Wrapf("Error waiting for Dynamo DB GSI to be active: {{err}}", err) + } + } + } + } + + } + + if d.HasChange("ttl") { + if err := updateTimeToLive(d, meta); err != nil { + log.Printf("[DEBUG] Error updating table TimeToLive: %s", err) + return err + } + } + + // Update tags + if err := setTagsDynamoDb(dynamodbconn, d); err != nil { + return err + } + + return resourceAwsDynamoDbTableRead(d, meta) +} + +func updateTimeToLive(d *schema.ResourceData, meta interface{}) error { + dynamodbconn := meta.(*AWSClient).dynamodbconn + + if ttl, ok := d.GetOk("ttl"); ok { + + timeToLiveSet := ttl.(*schema.Set) + + spec := &dynamodb.TimeToLiveSpecification{} + + timeToLive := timeToLiveSet.List()[0].(map[string]interface{}) + spec.AttributeName = aws.String(timeToLive["attribute_name"].(string)) + spec.Enabled = aws.Bool(timeToLive["enabled"].(bool)) + + req := &dynamodb.UpdateTimeToLiveInput{ + TableName: aws.String(d.Id()), + TimeToLiveSpecification: spec, + } + + _, err := dynamodbconn.UpdateTimeToLive(req) + + if err != nil { + // If ttl was not set within the .tf file before and has now been added we still run this command to update + // But there has been no change so lets continue + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ValidationException" && awsErr.Message() == "TimeToLive is already disabled" { + return nil + } + log.Printf("[DEBUG] Error updating TimeToLive on table: %s", err) + return err + } + + log.Printf("[DEBUG] Updated TimeToLive on table") + + if err := waitForTimeToLiveUpdateToBeCompleted(d.Id(), timeToLive["enabled"].(bool), meta); err != nil { + return errwrap.Wrapf("Error waiting for Dynamo DB TimeToLive to be updated: {{err}}", err) + } + } + + return nil +} + +func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) error { + dynamodbconn := meta.(*AWSClient).dynamodbconn + log.Printf("[DEBUG] Loading data for DynamoDB table '%s'", d.Id()) + req := &dynamodb.DescribeTableInput{ + TableName: aws.String(d.Id()), + } + + result, err := dynamodbconn.DescribeTable(req) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceNotFoundException" { + log.Printf("[WARN] Dynamodb Table (%s) not found, error code (404)", d.Id()) + d.SetId("") + return nil + } + return err + } + + table := result.Table + + d.Set("write_capacity", table.ProvisionedThroughput.WriteCapacityUnits) + d.Set("read_capacity", table.ProvisionedThroughput.ReadCapacityUnits) + + attributes := []interface{}{} + for _, attrdef := range table.AttributeDefinitions { + attribute := map[string]string{ + "name": *attrdef.AttributeName, + "type": *attrdef.AttributeType, + } + attributes = append(attributes, attribute) + log.Printf("[DEBUG] Added Attribute: %s", attribute["name"]) + } + + d.Set("attribute", attributes) + d.Set("name", table.TableName) + + for _, attribute := range table.KeySchema { + if *attribute.KeyType == "HASH" { + d.Set("hash_key", attribute.AttributeName) + } + + if *attribute.KeyType == "RANGE" { + d.Set("range_key", attribute.AttributeName) + } + } + + lsiList := make([]map[string]interface{}, 0, len(table.LocalSecondaryIndexes)) + for _, lsiObject := range table.LocalSecondaryIndexes { + lsi := map[string]interface{}{ + "name": *lsiObject.IndexName, + "projection_type": *lsiObject.Projection.ProjectionType, + } + + for _, attribute := range lsiObject.KeySchema { + + if *attribute.KeyType == "RANGE" { + lsi["range_key"] = *attribute.AttributeName + } + } + nkaList := make([]string, len(lsiObject.Projection.NonKeyAttributes)) + for _, nka := range lsiObject.Projection.NonKeyAttributes { + nkaList = append(nkaList, *nka) + } + lsi["non_key_attributes"] = nkaList + + lsiList = append(lsiList, lsi) + } + + err = d.Set("local_secondary_index", lsiList) + if err != nil { + return err + } + + gsiList := make([]map[string]interface{}, 0, len(table.GlobalSecondaryIndexes)) + for _, gsiObject := range table.GlobalSecondaryIndexes { + gsi := map[string]interface{}{ + "write_capacity": *gsiObject.ProvisionedThroughput.WriteCapacityUnits, + "read_capacity": *gsiObject.ProvisionedThroughput.ReadCapacityUnits, + "name": *gsiObject.IndexName, + } + + for _, attribute := range gsiObject.KeySchema { + if *attribute.KeyType == "HASH" { + gsi["hash_key"] = *attribute.AttributeName + } + + if *attribute.KeyType == "RANGE" { + gsi["range_key"] = *attribute.AttributeName + } + } + + gsi["projection_type"] = *(gsiObject.Projection.ProjectionType) + + nonKeyAttrs := make([]string, 0, len(gsiObject.Projection.NonKeyAttributes)) + for _, nonKeyAttr := range gsiObject.Projection.NonKeyAttributes { + nonKeyAttrs = append(nonKeyAttrs, *nonKeyAttr) + } + gsi["non_key_attributes"] = nonKeyAttrs + + gsiList = append(gsiList, gsi) + log.Printf("[DEBUG] Added GSI: %s - Read: %d / Write: %d", gsi["name"], gsi["read_capacity"], gsi["write_capacity"]) + } + + if table.StreamSpecification != nil { + d.Set("stream_view_type", table.StreamSpecification.StreamViewType) + d.Set("stream_enabled", table.StreamSpecification.StreamEnabled) + d.Set("stream_arn", table.LatestStreamArn) + } + + err = d.Set("global_secondary_index", gsiList) + if err != nil { + return err + } + + d.Set("arn", table.TableArn) + + timeToLiveReq := &dynamodb.DescribeTimeToLiveInput{ + TableName: aws.String(d.Id()), + } + timeToLiveOutput, err := dynamodbconn.DescribeTimeToLive(timeToLiveReq) + if err != nil { + return err + } + timeToLive := []interface{}{} + attribute := map[string]*string{ + "name": timeToLiveOutput.TimeToLiveDescription.AttributeName, + "type": timeToLiveOutput.TimeToLiveDescription.TimeToLiveStatus, + } + timeToLive = append(timeToLive, attribute) + d.Set("timeToLive", timeToLive) + + log.Printf("[DEBUG] Loaded TimeToLive data for DynamoDB table '%s'", d.Id()) + + tags, err := readTableTags(d, meta) + if err != nil { + return err + } + if len(tags) != 0 { + d.Set("tags", tags) + } + + return nil +} + +func resourceAwsDynamoDbTableDelete(d *schema.ResourceData, meta interface{}) error { + dynamodbconn := meta.(*AWSClient).dynamodbconn + + if err := waitForTableToBeActive(d.Id(), meta); err != nil { + return errwrap.Wrapf("Error waiting for Dynamo DB Table update: {{err}}", err) + } + + log.Printf("[DEBUG] DynamoDB delete table: %s", d.Id()) + + _, err := dynamodbconn.DeleteTable(&dynamodb.DeleteTableInput{ + TableName: aws.String(d.Id()), + }) + if err != nil { + return err + } + + params := &dynamodb.DescribeTableInput{ + TableName: aws.String(d.Id()), + } + + err = resource.Retry(10*time.Minute, func() *resource.RetryError { + t, err := dynamodbconn.DescribeTable(params) + if err != nil { + if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" { + return nil + } + // Didn't recognize the error, so shouldn't retry. + return resource.NonRetryableError(err) + } + + if t != nil { + if t.Table.TableStatus != nil && strings.ToLower(*t.Table.TableStatus) == "deleting" { + log.Printf("[DEBUG] AWS Dynamo DB table (%s) is still deleting", d.Id()) + return resource.RetryableError(fmt.Errorf("still deleting")) + } + } + + // we should be not found or deleting, so error here + return resource.NonRetryableError(err) + }) + + // check error from retry + if err != nil { + return err + } + + return nil +} + +func createGSIFromData(data *map[string]interface{}) dynamodb.GlobalSecondaryIndex { + + projection := &dynamodb.Projection{ + ProjectionType: aws.String((*data)["projection_type"].(string)), + } + + if (*data)["projection_type"] == "INCLUDE" { + non_key_attributes := []*string{} + for _, attr := range (*data)["non_key_attributes"].([]interface{}) { + non_key_attributes = append(non_key_attributes, aws.String(attr.(string))) + } + projection.NonKeyAttributes = non_key_attributes + } + + writeCapacity := (*data)["write_capacity"].(int) + readCapacity := (*data)["read_capacity"].(int) + + key_schema := []*dynamodb.KeySchemaElement{ + { + AttributeName: aws.String((*data)["hash_key"].(string)), + KeyType: aws.String("HASH"), + }, + } + + range_key_name := (*data)["range_key"] + if range_key_name != "" { + range_key_element := &dynamodb.KeySchemaElement{ + AttributeName: aws.String(range_key_name.(string)), + KeyType: aws.String("RANGE"), + } + + key_schema = append(key_schema, range_key_element) + } + + return dynamodb.GlobalSecondaryIndex{ + IndexName: aws.String((*data)["name"].(string)), + KeySchema: key_schema, + Projection: projection, + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ + WriteCapacityUnits: aws.Int64(int64(writeCapacity)), + ReadCapacityUnits: aws.Int64(int64(readCapacity)), + }, + } +} + +func getGlobalSecondaryIndex(indexName string, indexList []*dynamodb.GlobalSecondaryIndexDescription) (*dynamodb.GlobalSecondaryIndexDescription, error) { + for _, gsi := range indexList { + if *gsi.IndexName == indexName { + return gsi, nil + } + } + + return &dynamodb.GlobalSecondaryIndexDescription{}, fmt.Errorf("Can't find a GSI by that name...") +} + +func getAttributeType(d *schema.ResourceData, attributeName string) (string, error) { + if attributedata, ok := d.GetOk("attribute"); ok { + attributeSet := attributedata.(*schema.Set) + for _, attribute := range attributeSet.List() { + attr := attribute.(map[string]interface{}) + if attr["name"] == attributeName { + return attr["type"].(string), nil + } + } + } + + return "", fmt.Errorf("Unable to find an attribute named %s", attributeName) +} + +func waitForGSIToBeActive(tableName string, gsiName string, meta interface{}) error { + dynamodbconn := meta.(*AWSClient).dynamodbconn + req := &dynamodb.DescribeTableInput{ + TableName: aws.String(tableName), + } + + activeIndex := false + + for activeIndex == false { + + result, err := dynamodbconn.DescribeTable(req) + + if err != nil { + return err + } + + table := result.Table + var targetGSI *dynamodb.GlobalSecondaryIndexDescription = nil + + for _, gsi := range table.GlobalSecondaryIndexes { + if *gsi.IndexName == gsiName { + targetGSI = gsi + } + } + + if targetGSI != nil { + activeIndex = *targetGSI.IndexStatus == "ACTIVE" + + if !activeIndex { + log.Printf("[DEBUG] Sleeping for 5 seconds for %s GSI to become active", gsiName) + time.Sleep(5 * time.Second) + } + } else { + log.Printf("[DEBUG] GSI %s did not exist, giving up", gsiName) + break + } + } + + return nil + +} + +func waitForTableToBeActive(tableName string, meta interface{}) error { + dynamodbconn := meta.(*AWSClient).dynamodbconn + req := &dynamodb.DescribeTableInput{ + TableName: aws.String(tableName), + } + + activeState := false + + for activeState == false { + result, err := dynamodbconn.DescribeTable(req) + + if err != nil { + return err + } + + activeState = *result.Table.TableStatus == "ACTIVE" + + // Wait for a few seconds + if !activeState { + log.Printf("[DEBUG] Sleeping for 5 seconds for table to become active") + time.Sleep(5 * time.Second) + } + } + + return nil + +} + +func waitForTimeToLiveUpdateToBeCompleted(tableName string, enabled bool, meta interface{}) error { + dynamodbconn := meta.(*AWSClient).dynamodbconn + req := &dynamodb.DescribeTimeToLiveInput{ + TableName: aws.String(tableName), + } + + stateMatched := false + for stateMatched == false { + result, err := dynamodbconn.DescribeTimeToLive(req) + + if err != nil { + return err + } + + if enabled { + stateMatched = *result.TimeToLiveDescription.TimeToLiveStatus == dynamodb.TimeToLiveStatusEnabled + } else { + stateMatched = *result.TimeToLiveDescription.TimeToLiveStatus == dynamodb.TimeToLiveStatusDisabled + } + + // Wait for a few seconds, this may take a long time... + if !stateMatched { + log.Printf("[DEBUG] Sleeping for 5 seconds before checking TimeToLive state again") + time.Sleep(5 * time.Second) + } + } + + log.Printf("[DEBUG] TimeToLive update complete") + + return nil + +} + +func createTableTags(d *schema.ResourceData, meta interface{}) error { + // DynamoDB Table has to be in the ACTIVE state in order to tag the resource + if err := waitForTableToBeActive(d.Id(), meta); err != nil { + return err + } + tags := d.Get("tags").(map[string]interface{}) + arn := d.Get("arn").(string) + dynamodbconn := meta.(*AWSClient).dynamodbconn + req := &dynamodb.TagResourceInput{ + ResourceArn: aws.String(arn), + Tags: tagsFromMapDynamoDb(tags), + } + _, err := dynamodbconn.TagResource(req) + if err != nil { + return fmt.Errorf("Error tagging dynamodb resource: %s", err) + } + return nil +} + +func readTableTags(d *schema.ResourceData, meta interface{}) (map[string]string, error) { + if err := waitForTableToBeActive(d.Id(), meta); err != nil { + return nil, err + } + arn := d.Get("arn").(string) + //result := make(map[string]string) + + dynamodbconn := meta.(*AWSClient).dynamodbconn + req := &dynamodb.ListTagsOfResourceInput{ + ResourceArn: aws.String(arn), + } + + output, err := dynamodbconn.ListTagsOfResource(req) + if err != nil { + return nil, fmt.Errorf("Error reading tags from dynamodb resource: %s", err) + } + result := tagsToMapDynamoDb(output.Tags) + // TODO Read NextToken if avail + return result, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_table_migrate.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_table_migrate.go new file mode 100644 index 000000000..59865effc --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_dynamodb_table_migrate.go @@ -0,0 +1,70 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" + "strings" +) + +func resourceAwsDynamoDbTableMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found AWS DynamoDB Table State v0; migrating to v1") + return migrateDynamoDBStateV0toV1(is) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateDynamoDBStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + log.Printf("[DEBUG] DynamoDB Table Attributes before Migration: %#v", is.Attributes) + + prefix := "global_secondary_index" + entity := resourceAwsDynamoDbTable() + + // Read old keys + reader := &schema.MapFieldReader{ + Schema: entity.Schema, + Map: schema.BasicMapReader(is.Attributes), + } + result, err := reader.ReadField([]string{prefix}) + if err != nil { + return nil, err + } + + oldKeys, ok := result.Value.(*schema.Set) + if !ok { + return nil, fmt.Errorf("Got unexpected value from state: %#v", result.Value) + } + + // Delete old keys + for k := range is.Attributes { + if strings.HasPrefix(k, fmt.Sprintf("%s.", prefix)) { + delete(is.Attributes, k) + } + } + + // Write new keys + writer := schema.MapFieldWriter{ + Schema: entity.Schema, + } + if err := writer.WriteField([]string{prefix}, oldKeys); err != nil { + return is, err + } + for k, v := range writer.Map() { + is.Attributes[k] = v + } + + log.Printf("[DEBUG] DynamoDB Table Attributes after State Migration: %#v", is.Attributes) + + return is, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_snapshot.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_snapshot.go new file mode 100644 index 000000000..f444df4ef --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_snapshot.go @@ -0,0 +1,159 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsEbsSnapshot() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEbsSnapshotCreate, + Read: resourceAwsEbsSnapshotRead, + Delete: resourceAwsEbsSnapshotDelete, + + Schema: map[string]*schema.Schema{ + "volume_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "owner_alias": { + Type: schema.TypeString, + Computed: true, + }, + "encrypted": { + Type: schema.TypeBool, + Computed: true, + }, + "volume_size": { + Type: schema.TypeInt, + Computed: true, + }, + "kms_key_id": { + Type: schema.TypeString, + Computed: true, + }, + "data_encryption_key_id": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsEbsSnapshotCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + request := &ec2.CreateSnapshotInput{ + VolumeId: aws.String(d.Get("volume_id").(string)), + } + if v, ok := d.GetOk("description"); ok { + request.Description = aws.String(v.(string)) + } + + res, err := conn.CreateSnapshot(request) + if err != nil { + return err + } + + d.SetId(*res.SnapshotId) + + err = resourceAwsEbsSnapshotWaitForAvailable(d.Id(), conn) + if err != nil { + return err + } + + if err := setTags(conn, d); err != nil { + log.Printf("[WARN] error setting tags: %s", err) + } + + return resourceAwsEbsSnapshotRead(d, meta) +} + +func resourceAwsEbsSnapshotRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + req := &ec2.DescribeSnapshotsInput{ + SnapshotIds: []*string{aws.String(d.Id())}, + } + res, err := conn.DescribeSnapshots(req) + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidSnapshotID.NotFound" { + log.Printf("Snapshot %q Not found - removing from state", d.Id()) + d.SetId("") + return nil + } + + snapshot := res.Snapshots[0] + + d.Set("description", snapshot.Description) + d.Set("owner_id", snapshot.OwnerId) + d.Set("encrypted", snapshot.Encrypted) + d.Set("owner_alias", snapshot.OwnerAlias) + d.Set("volume_id", snapshot.VolumeId) + d.Set("data_encryption_key_id", snapshot.DataEncryptionKeyId) + d.Set("kms_keey_id", snapshot.KmsKeyId) + d.Set("volume_size", snapshot.VolumeSize) + + if err := d.Set("tags", tagsToMap(snapshot.Tags)); err != nil { + log.Printf("[WARN] error saving tags to state: %s", err) + } + + return nil +} + +func resourceAwsEbsSnapshotDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + request := &ec2.DeleteSnapshotInput{ + SnapshotId: aws.String(d.Id()), + } + _, err := conn.DeleteSnapshot(request) + if err == nil { + return nil + } + + ebsErr, ok := err.(awserr.Error) + if ebsErr.Code() == "SnapshotInUse" { + return resource.RetryableError(fmt.Errorf("EBS SnapshotInUse - trying again while it detaches")) + } + + if !ok { + return resource.NonRetryableError(err) + } + + return resource.NonRetryableError(err) + }) +} + +func resourceAwsEbsSnapshotWaitForAvailable(id string, conn *ec2.EC2) error { + log.Printf("Waiting for Snapshot %s to become available...", id) + + req := &ec2.DescribeSnapshotsInput{ + SnapshotIds: []*string{aws.String(id)}, + } + err := conn.WaitUntilSnapshotCompleted(req) + return err +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_volume.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_volume.go new file mode 100644 index 000000000..1beda135e --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ebs_volume.go @@ -0,0 +1,305 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsEbsVolume() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEbsVolumeCreate, + Read: resourceAwsEbsVolumeRead, + Update: resourceAWSEbsVolumeUpdate, + Delete: resourceAwsEbsVolumeDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "availability_zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "encrypted": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + "iops": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + "size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "snapshot_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "tags": tagsSchema(), + }, + } +} + +func resourceAwsEbsVolumeCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + request := &ec2.CreateVolumeInput{ + AvailabilityZone: aws.String(d.Get("availability_zone").(string)), + } + if value, ok := d.GetOk("encrypted"); ok { + request.Encrypted = aws.Bool(value.(bool)) + } + if value, ok := d.GetOk("kms_key_id"); ok { + request.KmsKeyId = aws.String(value.(string)) + } + if value, ok := d.GetOk("size"); ok { + request.Size = aws.Int64(int64(value.(int))) + } + if value, ok := d.GetOk("snapshot_id"); ok { + request.SnapshotId = aws.String(value.(string)) + } + + // IOPs are only valid, and required for, storage type io1. The current minimu + // is 100. Instead of a hard validation we we only apply the IOPs to the + // request if the type is io1, and log a warning otherwise. This allows users + // to "disable" iops. See https://github.com/hashicorp/terraform/pull/4146 + var t string + if value, ok := d.GetOk("type"); ok { + t = value.(string) + request.VolumeType = aws.String(t) + } + + iops := d.Get("iops").(int) + if t != "io1" && iops > 0 { + log.Printf("[WARN] IOPs is only valid for storate type io1 for EBS Volumes") + } else if t == "io1" { + // We add the iops value without validating it's size, to allow AWS to + // enforce a size requirement (currently 100) + request.Iops = aws.Int64(int64(iops)) + } + + log.Printf( + "[DEBUG] EBS Volume create opts: %s", request) + result, err := conn.CreateVolume(request) + if err != nil { + return fmt.Errorf("Error creating EC2 volume: %s", err) + } + + log.Println("[DEBUG] Waiting for Volume to become available") + + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating"}, + Target: []string{"available"}, + Refresh: volumeStateRefreshFunc(conn, *result.VolumeId), + Timeout: 5 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for Volume (%s) to become available: %s", + *result.VolumeId, err) + } + + d.SetId(*result.VolumeId) + + if _, ok := d.GetOk("tags"); ok { + if err := setTags(conn, d); err != nil { + return errwrap.Wrapf("Error setting tags for EBS Volume: {{err}}", err) + } + } + + return readVolume(d, result) +} + +func resourceAWSEbsVolumeUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + if _, ok := d.GetOk("tags"); ok { + if err := setTags(conn, d); err != nil { + return errwrap.Wrapf("Error updating tags for EBS Volume: {{err}}", err) + } + } + + requestUpdate := false + params := &ec2.ModifyVolumeInput{ + VolumeId: aws.String(d.Id()), + } + + if d.HasChange("size") { + requestUpdate = true + params.Size = aws.Int64(int64(d.Get("size").(int))) + } + + if d.HasChange("type") { + requestUpdate = true + params.VolumeType = aws.String(d.Get("type").(string)) + } + + if d.HasChange("iops") { + requestUpdate = true + params.Iops = aws.Int64(int64(d.Get("iops").(int))) + } + + if requestUpdate { + result, err := conn.ModifyVolume(params) + if err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "modifying"}, + Target: []string{"available", "in-use"}, + Refresh: volumeStateRefreshFunc(conn, *result.VolumeModification.VolumeId), + Timeout: 5 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for Volume (%s) to become available: %s", + *result.VolumeModification.VolumeId, err) + } + } + + return resourceAwsEbsVolumeRead(d, meta) +} + +// volumeStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// a the state of a Volume. Returns successfully when volume is available +func volumeStateRefreshFunc(conn *ec2.EC2, volumeID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeVolumes(&ec2.DescribeVolumesInput{ + VolumeIds: []*string{aws.String(volumeID)}, + }) + + if err != nil { + if ec2err, ok := err.(awserr.Error); ok { + // Set this to nil as if we didn't find anything. + log.Printf("Error on Volume State Refresh: message: \"%s\", code:\"%s\"", ec2err.Message(), ec2err.Code()) + resp = nil + return nil, "", err + } else { + log.Printf("Error on Volume State Refresh: %s", err) + return nil, "", err + } + } + + v := resp.Volumes[0] + return v, *v.State, nil + } +} + +func resourceAwsEbsVolumeRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + request := &ec2.DescribeVolumesInput{ + VolumeIds: []*string{aws.String(d.Id())}, + } + + response, err := conn.DescribeVolumes(request) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVolume.NotFound" { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading EC2 volume %s: %s", d.Id(), err) + } + + return readVolume(d, response.Volumes[0]) +} + +func resourceAwsEbsVolumeDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + request := &ec2.DeleteVolumeInput{ + VolumeId: aws.String(d.Id()), + } + _, err := conn.DeleteVolume(request) + if err == nil { + return nil + } + + ebsErr, ok := err.(awserr.Error) + if ebsErr.Code() == "VolumeInUse" { + return resource.RetryableError(fmt.Errorf("EBS VolumeInUse - trying again while it detaches")) + } + + if !ok { + return resource.NonRetryableError(err) + } + + return resource.NonRetryableError(err) + }) + +} + +func readVolume(d *schema.ResourceData, volume *ec2.Volume) error { + d.SetId(*volume.VolumeId) + + d.Set("availability_zone", *volume.AvailabilityZone) + if volume.Encrypted != nil { + d.Set("encrypted", *volume.Encrypted) + } + if volume.KmsKeyId != nil { + d.Set("kms_key_id", *volume.KmsKeyId) + } + if volume.Size != nil { + d.Set("size", *volume.Size) + } + if volume.SnapshotId != nil { + d.Set("snapshot_id", *volume.SnapshotId) + } + if volume.VolumeType != nil { + d.Set("type", *volume.VolumeType) + } + + if volume.VolumeType != nil && *volume.VolumeType == "io1" { + // Only set the iops attribute if the volume type is io1. Setting otherwise + // can trigger a refresh/plan loop based on the computed value that is given + // from AWS, and prevent us from specifying 0 as a valid iops. + // See https://github.com/hashicorp/terraform/pull/4146 + if volume.Iops != nil { + d.Set("iops", *volume.Iops) + } + } + + if volume.Tags != nil { + d.Set("tags", tagsToMap(volume.Tags)) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecr_repository.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecr_repository.go new file mode 100644 index 000000000..3a2447435 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecr_repository.go @@ -0,0 +1,152 @@ +package aws + +import ( + "log" + "time" + + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ecr" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsEcrRepository() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEcrRepositoryCreate, + Read: resourceAwsEcrRepositoryRead, + Delete: resourceAwsEcrRepositoryDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "registry_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "repository_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsEcrRepositoryCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecrconn + + input := ecr.CreateRepositoryInput{ + RepositoryName: aws.String(d.Get("name").(string)), + } + + log.Printf("[DEBUG] Creating ECR resository: %s", input) + out, err := conn.CreateRepository(&input) + if err != nil { + return err + } + + repository := *out.Repository + + log.Printf("[DEBUG] ECR repository created: %q", *repository.RepositoryArn) + + d.SetId(*repository.RepositoryName) + d.Set("arn", repository.RepositoryArn) + d.Set("registry_id", repository.RegistryId) + + return resourceAwsEcrRepositoryRead(d, meta) +} + +func resourceAwsEcrRepositoryRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecrconn + + log.Printf("[DEBUG] Reading repository %s", d.Id()) + out, err := conn.DescribeRepositories(&ecr.DescribeRepositoriesInput{ + RepositoryNames: []*string{aws.String(d.Id())}, + }) + if err != nil { + if ecrerr, ok := err.(awserr.Error); ok && ecrerr.Code() == "RepositoryNotFoundException" { + d.SetId("") + return nil + } + return err + } + + repository := out.Repositories[0] + + log.Printf("[DEBUG] Received repository %s", out) + + d.SetId(*repository.RepositoryName) + d.Set("arn", repository.RepositoryArn) + d.Set("registry_id", repository.RegistryId) + d.Set("name", repository.RepositoryName) + + repositoryUrl := buildRepositoryUrl(repository, meta.(*AWSClient).region) + log.Printf("[INFO] Setting the repository url to be %s", repositoryUrl) + d.Set("repository_url", repositoryUrl) + + return nil +} + +func buildRepositoryUrl(repo *ecr.Repository, region string) string { + return fmt.Sprintf("%s.dkr.ecr.%s.amazonaws.com/%s", *repo.RegistryId, region, *repo.RepositoryName) +} + +func resourceAwsEcrRepositoryDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecrconn + + _, err := conn.DeleteRepository(&ecr.DeleteRepositoryInput{ + RepositoryName: aws.String(d.Id()), + RegistryId: aws.String(d.Get("registry_id").(string)), + Force: aws.Bool(true), + }) + if err != nil { + if ecrerr, ok := err.(awserr.Error); ok && ecrerr.Code() == "RepositoryNotFoundException" { + d.SetId("") + return nil + } + return err + } + + log.Printf("[DEBUG] Waiting for ECR Repository %q to be deleted", d.Id()) + err = resource.Retry(20*time.Minute, func() *resource.RetryError { + _, err := conn.DescribeRepositories(&ecr.DescribeRepositoriesInput{ + RepositoryNames: []*string{aws.String(d.Id())}, + }) + + if err != nil { + awsErr, ok := err.(awserr.Error) + if !ok { + return resource.NonRetryableError(err) + } + + if awsErr.Code() == "RepositoryNotFoundException" { + return nil + } + + return resource.NonRetryableError(err) + } + + return resource.RetryableError( + fmt.Errorf("%q: Timeout while waiting for the ECR Repository to be deleted", d.Id())) + }) + if err != nil { + return err + } + + d.SetId("") + log.Printf("[DEBUG] repository %q deleted.", d.Get("name").(string)) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecr_repository_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecr_repository_policy.go new file mode 100644 index 000000000..77bfb78db --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecr_repository_policy.go @@ -0,0 +1,141 @@ +package aws + +import ( + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ecr" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsEcrRepositoryPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEcrRepositoryPolicyCreate, + Read: resourceAwsEcrRepositoryPolicyRead, + Update: resourceAwsEcrRepositoryPolicyUpdate, + Delete: resourceAwsEcrRepositoryPolicyDelete, + + Schema: map[string]*schema.Schema{ + "repository": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "policy": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "registry_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsEcrRepositoryPolicyCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecrconn + + input := ecr.SetRepositoryPolicyInput{ + RepositoryName: aws.String(d.Get("repository").(string)), + PolicyText: aws.String(d.Get("policy").(string)), + } + + log.Printf("[DEBUG] Creating ECR resository policy: %s", input) + out, err := conn.SetRepositoryPolicy(&input) + if err != nil { + return err + } + + repositoryPolicy := *out + + log.Printf("[DEBUG] ECR repository policy created: %s", *repositoryPolicy.RepositoryName) + + d.SetId(*repositoryPolicy.RepositoryName) + d.Set("registry_id", repositoryPolicy.RegistryId) + + return resourceAwsEcrRepositoryPolicyRead(d, meta) +} + +func resourceAwsEcrRepositoryPolicyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecrconn + + log.Printf("[DEBUG] Reading repository policy %s", d.Id()) + out, err := conn.GetRepositoryPolicy(&ecr.GetRepositoryPolicyInput{ + RegistryId: aws.String(d.Get("registry_id").(string)), + RepositoryName: aws.String(d.Id()), + }) + if err != nil { + if ecrerr, ok := err.(awserr.Error); ok { + switch ecrerr.Code() { + case "RepositoryNotFoundException", "RepositoryPolicyNotFoundException": + d.SetId("") + return nil + default: + return err + } + } + return err + } + + log.Printf("[DEBUG] Received repository policy %s", out) + + repositoryPolicy := out + + d.SetId(*repositoryPolicy.RepositoryName) + d.Set("registry_id", repositoryPolicy.RegistryId) + + return nil +} + +func resourceAwsEcrRepositoryPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecrconn + + if !d.HasChange("policy") { + return nil + } + + input := ecr.SetRepositoryPolicyInput{ + RepositoryName: aws.String(d.Get("repository").(string)), + RegistryId: aws.String(d.Get("registry_id").(string)), + PolicyText: aws.String(d.Get("policy").(string)), + } + + out, err := conn.SetRepositoryPolicy(&input) + if err != nil { + return err + } + + repositoryPolicy := *out + + d.SetId(*repositoryPolicy.RepositoryName) + d.Set("registry_id", repositoryPolicy.RegistryId) + + return nil +} + +func resourceAwsEcrRepositoryPolicyDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecrconn + + _, err := conn.DeleteRepositoryPolicy(&ecr.DeleteRepositoryPolicyInput{ + RepositoryName: aws.String(d.Id()), + RegistryId: aws.String(d.Get("registry_id").(string)), + }) + if err != nil { + if ecrerr, ok := err.(awserr.Error); ok { + switch ecrerr.Code() { + case "RepositoryNotFoundException", "RepositoryPolicyNotFoundException": + d.SetId("") + return nil + default: + return err + } + } + return err + } + + log.Printf("[DEBUG] repository policy %s deleted.", d.Id()) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_cluster.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_cluster.go new file mode 100644 index 000000000..0867db1ae --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_cluster.go @@ -0,0 +1,149 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ecs" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsEcsCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEcsClusterCreate, + Read: resourceAwsEcsClusterRead, + Delete: resourceAwsEcsClusterDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsEcsClusterCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecsconn + + clusterName := d.Get("name").(string) + log.Printf("[DEBUG] Creating ECS cluster %s", clusterName) + + out, err := conn.CreateCluster(&ecs.CreateClusterInput{ + ClusterName: aws.String(clusterName), + }) + if err != nil { + return err + } + log.Printf("[DEBUG] ECS cluster %s created", *out.Cluster.ClusterArn) + + d.SetId(*out.Cluster.ClusterArn) + d.Set("name", out.Cluster.ClusterName) + return nil +} + +func resourceAwsEcsClusterRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecsconn + + clusterName := d.Get("name").(string) + log.Printf("[DEBUG] Reading ECS cluster %s", clusterName) + out, err := conn.DescribeClusters(&ecs.DescribeClustersInput{ + Clusters: []*string{aws.String(clusterName)}, + }) + if err != nil { + return err + } + log.Printf("[DEBUG] Received ECS clusters: %s", out.Clusters) + + for _, c := range out.Clusters { + if *c.ClusterName == clusterName { + // Status==INACTIVE means deleted cluster + if *c.Status == "INACTIVE" { + log.Printf("[DEBUG] Removing ECS cluster %q because it's INACTIVE", *c.ClusterArn) + d.SetId("") + return nil + } + + d.SetId(*c.ClusterArn) + d.Set("name", c.ClusterName) + return nil + } + } + + log.Printf("[ERR] No matching ECS Cluster found for (%s)", d.Id()) + d.SetId("") + return nil +} + +func resourceAwsEcsClusterDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecsconn + + log.Printf("[DEBUG] Deleting ECS cluster %s", d.Id()) + + err := resource.Retry(10*time.Minute, func() *resource.RetryError { + out, err := conn.DeleteCluster(&ecs.DeleteClusterInput{ + Cluster: aws.String(d.Id()), + }) + + if err == nil { + log.Printf("[DEBUG] ECS cluster %s deleted: %s", d.Id(), out) + return nil + } + + awsErr, ok := err.(awserr.Error) + if !ok { + return resource.NonRetryableError(err) + } + + if awsErr.Code() == "ClusterContainsContainerInstancesException" { + log.Printf("[TRACE] Retrying ECS cluster %q deletion after %q", d.Id(), awsErr.Code()) + return resource.RetryableError(err) + } + + if awsErr.Code() == "ClusterContainsServicesException" { + log.Printf("[TRACE] Retrying ECS cluster %q deletion after %q", d.Id(), awsErr.Code()) + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + }) + if err != nil { + return err + } + + clusterName := d.Get("name").(string) + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + log.Printf("[DEBUG] Checking if ECS Cluster %q is INACTIVE", d.Id()) + out, err := conn.DescribeClusters(&ecs.DescribeClustersInput{ + Clusters: []*string{aws.String(clusterName)}, + }) + + for _, c := range out.Clusters { + if *c.ClusterName == clusterName { + if *c.Status == "INACTIVE" { + return nil + } + + return resource.RetryableError( + fmt.Errorf("ECS Cluster %q is still %q", clusterName, *c.Status)) + } + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) + if err != nil { + return err + } + + log.Printf("[DEBUG] ECS cluster %q deleted", d.Id()) + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_service.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_service.go new file mode 100644 index 000000000..27542633f --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_service.go @@ -0,0 +1,567 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ecs" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +var taskDefinitionRE = regexp.MustCompile("^([a-zA-Z0-9_-]+):([0-9]+)$") + +func resourceAwsEcsService() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEcsServiceCreate, + Read: resourceAwsEcsServiceRead, + Update: resourceAwsEcsServiceUpdate, + Delete: resourceAwsEcsServiceDelete, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "cluster": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "task_definition": { + Type: schema.TypeString, + Required: true, + }, + + "desired_count": { + Type: schema.TypeInt, + Optional: true, + }, + + "iam_role": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + + "deployment_maximum_percent": { + Type: schema.TypeInt, + Optional: true, + Default: 200, + }, + + "deployment_minimum_healthy_percent": { + Type: schema.TypeInt, + Optional: true, + Default: 100, + }, + + "load_balancer": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "elb_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "target_group_arn": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "container_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "container_port": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + }, + }, + Set: resourceAwsEcsLoadBalancerHash, + }, + + "placement_strategy": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + MaxItems: 5, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "field": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if strings.ToLower(old) == strings.ToLower(new) { + return true + } + return false + }, + }, + }, + }, + }, + + "placement_constraints": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + MaxItems: 10, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "expression": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsEcsServiceCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecsconn + + input := ecs.CreateServiceInput{ + ServiceName: aws.String(d.Get("name").(string)), + TaskDefinition: aws.String(d.Get("task_definition").(string)), + DesiredCount: aws.Int64(int64(d.Get("desired_count").(int))), + ClientToken: aws.String(resource.UniqueId()), + DeploymentConfiguration: &ecs.DeploymentConfiguration{ + MaximumPercent: aws.Int64(int64(d.Get("deployment_maximum_percent").(int))), + MinimumHealthyPercent: aws.Int64(int64(d.Get("deployment_minimum_healthy_percent").(int))), + }, + } + + if v, ok := d.GetOk("cluster"); ok { + input.Cluster = aws.String(v.(string)) + } + + loadBalancers := expandEcsLoadBalancers(d.Get("load_balancer").(*schema.Set).List()) + if len(loadBalancers) > 0 { + log.Printf("[DEBUG] Adding ECS load balancers: %s", loadBalancers) + input.LoadBalancers = loadBalancers + } + if v, ok := d.GetOk("iam_role"); ok { + input.Role = aws.String(v.(string)) + } + + strategies := d.Get("placement_strategy").(*schema.Set).List() + if len(strategies) > 0 { + var ps []*ecs.PlacementStrategy + for _, raw := range strategies { + p := raw.(map[string]interface{}) + t := p["type"].(string) + f := p["field"].(string) + if err := validateAwsEcsPlacementStrategy(t, f); err != nil { + return err + } + ps = append(ps, &ecs.PlacementStrategy{ + Type: aws.String(p["type"].(string)), + Field: aws.String(p["field"].(string)), + }) + } + input.PlacementStrategy = ps + } + + constraints := d.Get("placement_constraints").(*schema.Set).List() + if len(constraints) > 0 { + var pc []*ecs.PlacementConstraint + for _, raw := range constraints { + p := raw.(map[string]interface{}) + t := p["type"].(string) + e := p["expression"].(string) + if err := validateAwsEcsPlacementConstraint(t, e); err != nil { + return err + } + constraint := &ecs.PlacementConstraint{ + Type: aws.String(t), + } + if e != "" { + constraint.Expression = aws.String(e) + } + + pc = append(pc, constraint) + } + input.PlacementConstraints = pc + } + + log.Printf("[DEBUG] Creating ECS service: %s", input) + + // Retry due to AWS IAM & ECS eventual consistency + var out *ecs.CreateServiceOutput + var err error + err = resource.Retry(2*time.Minute, func() *resource.RetryError { + out, err = conn.CreateService(&input) + + if err != nil { + awsErr, ok := err.(awserr.Error) + if !ok { + return resource.NonRetryableError(err) + } + if awsErr.Code() == "InvalidParameterException" { + log.Printf("[DEBUG] Trying to create ECS service again: %q", + awsErr.Message()) + return resource.RetryableError(err) + } + if awsErr.Code() == "ClusterNotFoundException" { + log.Printf("[DEBUG] Trying to create ECS service again: %q", + awsErr.Message()) + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + } + + return nil + }) + if err != nil { + return fmt.Errorf("%s %q", err, d.Get("name").(string)) + } + + service := *out.Service + + log.Printf("[DEBUG] ECS service created: %s", *service.ServiceArn) + d.SetId(*service.ServiceArn) + + return resourceAwsEcsServiceUpdate(d, meta) +} + +func resourceAwsEcsServiceRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecsconn + + log.Printf("[DEBUG] Reading ECS service %s", d.Id()) + input := ecs.DescribeServicesInput{ + Services: []*string{aws.String(d.Id())}, + Cluster: aws.String(d.Get("cluster").(string)), + } + + out, err := conn.DescribeServices(&input) + if err != nil { + return err + } + + if len(out.Services) < 1 { + log.Printf("[DEBUG] Removing ECS service %s (%s) because it's gone", d.Get("name").(string), d.Id()) + d.SetId("") + return nil + } + + service := out.Services[0] + + // Status==INACTIVE means deleted service + if *service.Status == "INACTIVE" { + log.Printf("[DEBUG] Removing ECS service %q because it's INACTIVE", *service.ServiceArn) + d.SetId("") + return nil + } + + log.Printf("[DEBUG] Received ECS service %s", service) + + d.SetId(*service.ServiceArn) + d.Set("name", service.ServiceName) + + // Save task definition in the same format + if strings.HasPrefix(d.Get("task_definition").(string), "arn:"+meta.(*AWSClient).partition+":ecs:") { + d.Set("task_definition", service.TaskDefinition) + } else { + taskDefinition := buildFamilyAndRevisionFromARN(*service.TaskDefinition) + d.Set("task_definition", taskDefinition) + } + + d.Set("desired_count", service.DesiredCount) + + // Save cluster in the same format + if strings.HasPrefix(d.Get("cluster").(string), "arn:"+meta.(*AWSClient).partition+":ecs:") { + d.Set("cluster", service.ClusterArn) + } else { + clusterARN := getNameFromARN(*service.ClusterArn) + d.Set("cluster", clusterARN) + } + + // Save IAM role in the same format + if service.RoleArn != nil { + if strings.HasPrefix(d.Get("iam_role").(string), "arn:"+meta.(*AWSClient).partition+":iam:") { + d.Set("iam_role", service.RoleArn) + } else { + roleARN := getNameFromARN(*service.RoleArn) + d.Set("iam_role", roleARN) + } + } + + if service.DeploymentConfiguration != nil { + d.Set("deployment_maximum_percent", service.DeploymentConfiguration.MaximumPercent) + d.Set("deployment_minimum_healthy_percent", service.DeploymentConfiguration.MinimumHealthyPercent) + } + + if service.LoadBalancers != nil { + d.Set("load_balancers", flattenEcsLoadBalancers(service.LoadBalancers)) + } + + if err := d.Set("placement_strategy", flattenPlacementStrategy(service.PlacementStrategy)); err != nil { + log.Printf("[ERR] Error setting placement_strategy for (%s): %s", d.Id(), err) + } + if err := d.Set("placement_constraints", flattenServicePlacementConstraints(service.PlacementConstraints)); err != nil { + log.Printf("[ERR] Error setting placement_constraints for (%s): %s", d.Id(), err) + } + + return nil +} + +func flattenServicePlacementConstraints(pcs []*ecs.PlacementConstraint) []map[string]interface{} { + if len(pcs) == 0 { + return nil + } + results := make([]map[string]interface{}, 0) + for _, pc := range pcs { + c := make(map[string]interface{}) + c["type"] = *pc.Type + if pc.Expression != nil { + c["expression"] = *pc.Expression + } + + results = append(results, c) + } + return results +} + +func flattenPlacementStrategy(pss []*ecs.PlacementStrategy) []map[string]interface{} { + if len(pss) == 0 { + return nil + } + results := make([]map[string]interface{}, 0) + for _, ps := range pss { + c := make(map[string]interface{}) + c["type"] = *ps.Type + c["field"] = *ps.Field + + // for some fields the API requires lowercase for creation but will return uppercase on query + if *ps.Field == "MEMORY" || *ps.Field == "CPU" { + c["field"] = strings.ToLower(*ps.Field) + } + + results = append(results, c) + } + return results +} + +func resourceAwsEcsServiceUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecsconn + + log.Printf("[DEBUG] Updating ECS service %s", d.Id()) + input := ecs.UpdateServiceInput{ + Service: aws.String(d.Id()), + Cluster: aws.String(d.Get("cluster").(string)), + } + + if d.HasChange("desired_count") { + _, n := d.GetChange("desired_count") + input.DesiredCount = aws.Int64(int64(n.(int))) + } + if d.HasChange("task_definition") { + _, n := d.GetChange("task_definition") + input.TaskDefinition = aws.String(n.(string)) + } + + if d.HasChange("deployment_maximum_percent") || d.HasChange("deployment_minimum_healthy_percent") { + input.DeploymentConfiguration = &ecs.DeploymentConfiguration{ + MaximumPercent: aws.Int64(int64(d.Get("deployment_maximum_percent").(int))), + MinimumHealthyPercent: aws.Int64(int64(d.Get("deployment_minimum_healthy_percent").(int))), + } + } + + // Retry due to IAM & ECS eventual consistency + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + out, err := conn.UpdateService(&input) + if err != nil { + awsErr, ok := err.(awserr.Error) + if ok && awsErr.Code() == "InvalidParameterException" { + log.Printf("[DEBUG] Trying to update ECS service again: %#v", err) + return resource.RetryableError(err) + } + if ok && awsErr.Code() == "ServiceNotFoundException" { + log.Printf("[DEBUG] Trying to update ECS service again: %#v", err) + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + } + + log.Printf("[DEBUG] Updated ECS service %s", out.Service) + return nil + }) + if err != nil { + return err + } + + return resourceAwsEcsServiceRead(d, meta) +} + +func resourceAwsEcsServiceDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecsconn + + // Check if it's not already gone + resp, err := conn.DescribeServices(&ecs.DescribeServicesInput{ + Services: []*string{aws.String(d.Id())}, + Cluster: aws.String(d.Get("cluster").(string)), + }) + if err != nil { + return err + } + + if len(resp.Services) == 0 { + log.Printf("[DEBUG] ECS Service %q is already gone", d.Id()) + return nil + } + + log.Printf("[DEBUG] ECS service %s is currently %s", d.Id(), *resp.Services[0].Status) + + if *resp.Services[0].Status == "INACTIVE" { + return nil + } + + // Drain the ECS service + if *resp.Services[0].Status != "DRAINING" { + log.Printf("[DEBUG] Draining ECS service %s", d.Id()) + _, err = conn.UpdateService(&ecs.UpdateServiceInput{ + Service: aws.String(d.Id()), + Cluster: aws.String(d.Get("cluster").(string)), + DesiredCount: aws.Int64(int64(0)), + }) + if err != nil { + return err + } + } + + // Wait until the ECS service is drained + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + input := ecs.DeleteServiceInput{ + Service: aws.String(d.Id()), + Cluster: aws.String(d.Get("cluster").(string)), + } + + log.Printf("[DEBUG] Trying to delete ECS service %s", input) + _, err := conn.DeleteService(&input) + if err == nil { + return nil + } + + ec2err, ok := err.(awserr.Error) + if !ok { + return resource.NonRetryableError(err) + } + if ec2err.Code() == "InvalidParameterException" { + // Prevent "The service cannot be stopped while deployments are active." + log.Printf("[DEBUG] Trying to delete ECS service again: %q", + ec2err.Message()) + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + + }) + if err != nil { + return err + } + + // Wait until it's deleted + wait := resource.StateChangeConf{ + Pending: []string{"ACTIVE", "DRAINING"}, + Target: []string{"INACTIVE"}, + Timeout: 10 * time.Minute, + MinTimeout: 1 * time.Second, + Refresh: func() (interface{}, string, error) { + log.Printf("[DEBUG] Checking if ECS service %s is INACTIVE", d.Id()) + resp, err := conn.DescribeServices(&ecs.DescribeServicesInput{ + Services: []*string{aws.String(d.Id())}, + Cluster: aws.String(d.Get("cluster").(string)), + }) + if err != nil { + return resp, "FAILED", err + } + + log.Printf("[DEBUG] ECS service (%s) is currently %q", d.Id(), *resp.Services[0].Status) + return resp, *resp.Services[0].Status, nil + }, + } + + _, err = wait.WaitForState() + if err != nil { + return err + } + + log.Printf("[DEBUG] ECS service %s deleted.", d.Id()) + return nil +} + +func resourceAwsEcsLoadBalancerHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + buf.WriteString(fmt.Sprintf("%s-", m["elb_name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["container_name"].(string))) + buf.WriteString(fmt.Sprintf("%d-", m["container_port"].(int))) + + if s := m["target_group_arn"].(string); s != "" { + buf.WriteString(fmt.Sprintf("%s-", s)) + } + + return hashcode.String(buf.String()) +} + +func buildFamilyAndRevisionFromARN(arn string) string { + return strings.Split(arn, "/")[1] +} + +// Expects the following ARNs: +// arn:aws:iam::0123456789:role/EcsService +// arn:aws:ecs:us-west-2:0123456789:cluster/radek-cluster +func getNameFromARN(arn string) string { + return strings.Split(arn, "/")[1] +} + +func parseTaskDefinition(taskDefinition string) (string, string, error) { + matches := taskDefinitionRE.FindAllStringSubmatch(taskDefinition, 2) + + if len(matches) == 0 || len(matches[0]) != 3 { + return "", "", fmt.Errorf( + "Invalid task definition format, family:rev or ARN expected (%#v)", + taskDefinition) + } + + return matches[0][1], matches[0][2], nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_task_definition.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_task_definition.go new file mode 100644 index 000000000..fa082472b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ecs_task_definition.go @@ -0,0 +1,264 @@ +package aws + +import ( + "bytes" + "crypto/sha1" + "encoding/hex" + "fmt" + "log" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ecs" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsEcsTaskDefinition() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEcsTaskDefinitionCreate, + Read: resourceAwsEcsTaskDefinitionRead, + Delete: resourceAwsEcsTaskDefinitionDelete, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "family": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "revision": { + Type: schema.TypeInt, + Computed: true, + }, + + "container_definitions": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: func(v interface{}) string { + hash := sha1.Sum([]byte(v.(string))) + return hex.EncodeToString(hash[:]) + }, + ValidateFunc: validateAwsEcsTaskDefinitionContainerDefinitions, + }, + + "task_role_arn": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "network_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateAwsEcsTaskDefinitionNetworkMode, + }, + + "volume": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "host_path": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + Set: resourceAwsEcsTaskDefinitionVolumeHash, + }, + + "placement_constraints": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + MaxItems: 10, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "expression": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + }, + }, + }, + }, + } +} + +func validateAwsEcsTaskDefinitionNetworkMode(v interface{}, k string) (ws []string, errors []error) { + value := strings.ToLower(v.(string)) + validTypes := map[string]struct{}{ + "bridge": {}, + "host": {}, + "none": {}, + } + + if _, ok := validTypes[value]; !ok { + errors = append(errors, fmt.Errorf("ECS Task Definition network_mode %q is invalid, must be `bridge`, `host` or `none`", value)) + } + return +} + +func validateAwsEcsTaskDefinitionContainerDefinitions(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + _, err := expandEcsContainerDefinitions(value) + if err != nil { + errors = append(errors, fmt.Errorf("ECS Task Definition container_definitions is invalid: %s", err)) + } + return +} + +func resourceAwsEcsTaskDefinitionCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecsconn + + rawDefinitions := d.Get("container_definitions").(string) + definitions, err := expandEcsContainerDefinitions(rawDefinitions) + if err != nil { + return err + } + + input := ecs.RegisterTaskDefinitionInput{ + ContainerDefinitions: definitions, + Family: aws.String(d.Get("family").(string)), + } + + if v, ok := d.GetOk("task_role_arn"); ok { + input.TaskRoleArn = aws.String(v.(string)) + } + + if v, ok := d.GetOk("network_mode"); ok { + input.NetworkMode = aws.String(v.(string)) + } + + if v, ok := d.GetOk("volume"); ok { + volumes, err := expandEcsVolumes(v.(*schema.Set).List()) + if err != nil { + return err + } + input.Volumes = volumes + } + + constraints := d.Get("placement_constraints").(*schema.Set).List() + if len(constraints) > 0 { + var pc []*ecs.TaskDefinitionPlacementConstraint + for _, raw := range constraints { + p := raw.(map[string]interface{}) + t := p["type"].(string) + e := p["expression"].(string) + if err := validateAwsEcsPlacementConstraint(t, e); err != nil { + return err + } + pc = append(pc, &ecs.TaskDefinitionPlacementConstraint{ + Type: aws.String(t), + Expression: aws.String(e), + }) + } + input.PlacementConstraints = pc + } + + log.Printf("[DEBUG] Registering ECS task definition: %s", input) + out, err := conn.RegisterTaskDefinition(&input) + if err != nil { + return err + } + + taskDefinition := *out.TaskDefinition + + log.Printf("[DEBUG] ECS task definition registered: %q (rev. %d)", + *taskDefinition.TaskDefinitionArn, *taskDefinition.Revision) + + d.SetId(*taskDefinition.Family) + d.Set("arn", taskDefinition.TaskDefinitionArn) + + return resourceAwsEcsTaskDefinitionRead(d, meta) +} + +func resourceAwsEcsTaskDefinitionRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecsconn + + log.Printf("[DEBUG] Reading task definition %s", d.Id()) + out, err := conn.DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{ + TaskDefinition: aws.String(d.Get("arn").(string)), + }) + if err != nil { + return err + } + log.Printf("[DEBUG] Received task definition %s", out) + + taskDefinition := out.TaskDefinition + + d.SetId(*taskDefinition.Family) + d.Set("arn", taskDefinition.TaskDefinitionArn) + d.Set("family", taskDefinition.Family) + d.Set("revision", taskDefinition.Revision) + d.Set("container_definitions", taskDefinition.ContainerDefinitions) + d.Set("task_role_arn", taskDefinition.TaskRoleArn) + d.Set("network_mode", taskDefinition.NetworkMode) + d.Set("volumes", flattenEcsVolumes(taskDefinition.Volumes)) + if err := d.Set("placement_constraints", flattenPlacementConstraints(taskDefinition.PlacementConstraints)); err != nil { + log.Printf("[ERR] Error setting placement_constraints for (%s): %s", d.Id(), err) + } + + return nil +} + +func flattenPlacementConstraints(pcs []*ecs.TaskDefinitionPlacementConstraint) []map[string]interface{} { + if len(pcs) == 0 { + return nil + } + results := make([]map[string]interface{}, 0) + for _, pc := range pcs { + c := make(map[string]interface{}) + c["type"] = *pc.Type + c["expression"] = *pc.Expression + results = append(results, c) + } + return results +} + +func resourceAwsEcsTaskDefinitionDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecsconn + + _, err := conn.DeregisterTaskDefinition(&ecs.DeregisterTaskDefinitionInput{ + TaskDefinition: aws.String(d.Get("arn").(string)), + }) + if err != nil { + return err + } + + log.Printf("[DEBUG] Task definition %q deregistered.", d.Get("arn").(string)) + + return nil +} + +func resourceAwsEcsTaskDefinitionVolumeHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["host_path"].(string))) + + return hashcode.String(buf.String()) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_efs_file_system.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_efs_file_system.go new file mode 100644 index 000000000..445242d4d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_efs_file_system.go @@ -0,0 +1,274 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/efs" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsEfsFileSystem() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEfsFileSystemCreate, + Read: resourceAwsEfsFileSystemRead, + Update: resourceAwsEfsFileSystemUpdate, + Delete: resourceAwsEfsFileSystemDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "creation_token": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateMaxLength(64), + }, + + "reference_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Deprecated: "Please use attribute `creation_token' instead. This attribute might be removed in future releases.", + ValidateFunc: validateReferenceName, + }, + + "performance_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validatePerformanceModeType, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsEfsFileSystemCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).efsconn + + creationToken := "" + if v, ok := d.GetOk("creation_token"); ok { + creationToken = v.(string) + } else { + if v, ok := d.GetOk("reference_name"); ok { + creationToken = resource.PrefixedUniqueId(fmt.Sprintf("%s-", v.(string))) + log.Printf("[WARN] Using deprecated `reference_name' attribute.") + } else { + creationToken = resource.UniqueId() + } + } + + createOpts := &efs.CreateFileSystemInput{ + CreationToken: aws.String(creationToken), + } + + if v, ok := d.GetOk("performance_mode"); ok { + createOpts.PerformanceMode = aws.String(v.(string)) + } + + log.Printf("[DEBUG] EFS file system create options: %#v", *createOpts) + fs, err := conn.CreateFileSystem(createOpts) + if err != nil { + return fmt.Errorf("Error creating EFS file system: %s", err) + } + + d.SetId(*fs.FileSystemId) + log.Printf("[INFO] EFS file system ID: %s", d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating"}, + Target: []string{"available"}, + Refresh: func() (interface{}, string, error) { + resp, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{ + FileSystemId: aws.String(d.Id()), + }) + if err != nil { + return nil, "error", err + } + + if hasEmptyFileSystems(resp) { + return nil, "not-found", fmt.Errorf("EFS file system %q could not be found.", d.Id()) + } + + fs := resp.FileSystems[0] + log.Printf("[DEBUG] current status of %q: %q", *fs.FileSystemId, *fs.LifeCycleState) + return fs, *fs.LifeCycleState, nil + }, + Timeout: 10 * time.Minute, + Delay: 2 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for EFS file system (%q) to create: %s", + d.Id(), err.Error()) + } + log.Printf("[DEBUG] EFS file system %q created.", d.Id()) + + return resourceAwsEfsFileSystemUpdate(d, meta) +} + +func resourceAwsEfsFileSystemUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).efsconn + err := setTagsEFS(conn, d) + if err != nil { + return fmt.Errorf("Error setting EC2 tags for EFS file system (%q): %s", + d.Id(), err.Error()) + } + + return resourceAwsEfsFileSystemRead(d, meta) +} + +func resourceAwsEfsFileSystemRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).efsconn + + resp, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{ + FileSystemId: aws.String(d.Id()), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "FileSystemNotFound" { + log.Printf("[WARN] EFS file system (%s) could not be found.", d.Id()) + d.SetId("") + return nil + } + return err + } + + if hasEmptyFileSystems(resp) { + return fmt.Errorf("EFS file system %q could not be found.", d.Id()) + } + + tags := make([]*efs.Tag, 0) + var marker string + for { + params := &efs.DescribeTagsInput{ + FileSystemId: aws.String(d.Id()), + } + if marker != "" { + params.Marker = aws.String(marker) + } + + tagsResp, err := conn.DescribeTags(params) + if err != nil { + return fmt.Errorf("Error retrieving EC2 tags for EFS file system (%q): %s", + d.Id(), err.Error()) + } + + for _, tag := range tagsResp.Tags { + tags = append(tags, tag) + } + + if tagsResp.NextMarker != nil { + marker = *tagsResp.NextMarker + } else { + break + } + } + + err = d.Set("tags", tagsToMapEFS(tags)) + if err != nil { + return err + } + + var fs *efs.FileSystemDescription + for _, f := range resp.FileSystems { + if d.Id() == *f.FileSystemId { + fs = f + break + } + } + if fs == nil { + log.Printf("[WARN] EFS (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + d.Set("creation_token", fs.CreationToken) + d.Set("performance_mode", fs.PerformanceMode) + + return nil +} + +func resourceAwsEfsFileSystemDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).efsconn + + log.Printf("[DEBUG] Deleting EFS file system: %s", d.Id()) + _, err := conn.DeleteFileSystem(&efs.DeleteFileSystemInput{ + FileSystemId: aws.String(d.Id()), + }) + stateConf := &resource.StateChangeConf{ + Pending: []string{"available", "deleting"}, + Target: []string{}, + Refresh: func() (interface{}, string, error) { + resp, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{ + FileSystemId: aws.String(d.Id()), + }) + if err != nil { + efsErr, ok := err.(awserr.Error) + if ok && efsErr.Code() == "FileSystemNotFound" { + return nil, "", nil + } + return nil, "error", err + } + + if hasEmptyFileSystems(resp) { + return nil, "", nil + } + + fs := resp.FileSystems[0] + log.Printf("[DEBUG] current status of %q: %q", *fs.FileSystemId, *fs.LifeCycleState) + return fs, *fs.LifeCycleState, nil + }, + Timeout: 10 * time.Minute, + Delay: 2 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for EFS file system (%q) to delete: %s", + d.Id(), err.Error()) + } + + log.Printf("[DEBUG] EFS file system %q deleted.", d.Id()) + + return nil +} + +func validateReferenceName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + creationToken := resource.PrefixedUniqueId(fmt.Sprintf("%s-", value)) + if len(creationToken) > 64 { + errors = append(errors, fmt.Errorf( + "%q cannot take the Creation Token over the limit of 64 characters: %q", k, value)) + } + return +} + +func validatePerformanceModeType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != efs.PerformanceModeGeneralPurpose && value != efs.PerformanceModeMaxIo { + errors = append(errors, fmt.Errorf( + "%q contains an invalid Performance Mode %q. Valid modes are either %q or %q.", + k, value, efs.PerformanceModeGeneralPurpose, efs.PerformanceModeMaxIo)) + } + return +} + +func hasEmptyFileSystems(fs *efs.DescribeFileSystemsOutput) bool { + if fs != nil && len(fs.FileSystems) > 0 { + return false + } + return true +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_efs_mount_target.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_efs_mount_target.go new file mode 100644 index 000000000..501447808 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_efs_mount_target.go @@ -0,0 +1,298 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/efs" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsEfsMountTarget() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEfsMountTargetCreate, + Read: resourceAwsEfsMountTargetRead, + Update: resourceAwsEfsMountTargetUpdate, + Delete: resourceAwsEfsMountTargetDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "file_system_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + + "security_groups": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Computed: true, + Optional: true, + }, + + "subnet_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "network_interface_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "dns_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsEfsMountTargetCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).efsconn + + fsId := d.Get("file_system_id").(string) + subnetId := d.Get("subnet_id").(string) + + // CreateMountTarget would return the same Mount Target ID + // to parallel requests if they both include the same AZ + // and we would end up managing the same MT as 2 resources. + // So we make it fail by calling 1 request per AZ at a time. + az, err := getAzFromSubnetId(subnetId, meta.(*AWSClient).ec2conn) + if err != nil { + return fmt.Errorf("Failed getting Availability Zone from subnet ID (%s): %s", subnetId, err) + } + mtKey := "efs-mt-" + fsId + "-" + az + awsMutexKV.Lock(mtKey) + defer awsMutexKV.Unlock(mtKey) + + input := efs.CreateMountTargetInput{ + FileSystemId: aws.String(fsId), + SubnetId: aws.String(subnetId), + } + + if v, ok := d.GetOk("ip_address"); ok { + input.IpAddress = aws.String(v.(string)) + } + if v, ok := d.GetOk("security_groups"); ok { + input.SecurityGroups = expandStringList(v.(*schema.Set).List()) + } + + log.Printf("[DEBUG] Creating EFS mount target: %#v", input) + + mt, err := conn.CreateMountTarget(&input) + if err != nil { + return err + } + + d.SetId(*mt.MountTargetId) + log.Printf("[INFO] EFS mount target ID: %s", d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating"}, + Target: []string{"available"}, + Refresh: func() (interface{}, string, error) { + resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{ + MountTargetId: aws.String(d.Id()), + }) + if err != nil { + return nil, "error", err + } + + if hasEmptyMountTargets(resp) { + return nil, "error", fmt.Errorf("EFS mount target %q could not be found.", d.Id()) + } + + mt := resp.MountTargets[0] + + log.Printf("[DEBUG] Current status of %q: %q", *mt.MountTargetId, *mt.LifeCycleState) + return mt, *mt.LifeCycleState, nil + }, + Timeout: 10 * time.Minute, + Delay: 2 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for EFS mount target (%s) to create: %s", d.Id(), err) + } + + log.Printf("[DEBUG] EFS mount target created: %s", *mt.MountTargetId) + + return resourceAwsEfsMountTargetRead(d, meta) +} + +func resourceAwsEfsMountTargetUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).efsconn + + if d.HasChange("security_groups") { + input := efs.ModifyMountTargetSecurityGroupsInput{ + MountTargetId: aws.String(d.Id()), + SecurityGroups: expandStringList(d.Get("security_groups").(*schema.Set).List()), + } + _, err := conn.ModifyMountTargetSecurityGroups(&input) + if err != nil { + return err + } + } + + return resourceAwsEfsMountTargetRead(d, meta) +} + +func resourceAwsEfsMountTargetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).efsconn + resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{ + MountTargetId: aws.String(d.Id()), + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "MountTargetNotFound" { + // The EFS mount target could not be found, + // which would indicate that it might be + // already deleted. + log.Printf("[WARN] EFS mount target %q could not be found.", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("Error reading EFS mount target %s: %s", d.Id(), err) + } + + if hasEmptyMountTargets(resp) { + return fmt.Errorf("EFS mount target %q could not be found.", d.Id()) + } + + mt := resp.MountTargets[0] + + log.Printf("[DEBUG] Found EFS mount target: %#v", mt) + + d.SetId(*mt.MountTargetId) + d.Set("file_system_id", mt.FileSystemId) + d.Set("ip_address", mt.IpAddress) + d.Set("subnet_id", mt.SubnetId) + d.Set("network_interface_id", mt.NetworkInterfaceId) + + sgResp, err := conn.DescribeMountTargetSecurityGroups(&efs.DescribeMountTargetSecurityGroupsInput{ + MountTargetId: aws.String(d.Id()), + }) + if err != nil { + return err + } + + err = d.Set("security_groups", schema.NewSet(schema.HashString, flattenStringList(sgResp.SecurityGroups))) + if err != nil { + return err + } + + // DNS name per http://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html + _, err = getAzFromSubnetId(*mt.SubnetId, meta.(*AWSClient).ec2conn) + if err != nil { + return fmt.Errorf("Failed getting Availability Zone from subnet ID (%s): %s", *mt.SubnetId, err) + } + + region := meta.(*AWSClient).region + err = d.Set("dns_name", resourceAwsEfsMountTargetDnsName(*mt.FileSystemId, region)) + if err != nil { + return err + } + + return nil +} + +func getAzFromSubnetId(subnetId string, conn *ec2.EC2) (string, error) { + input := ec2.DescribeSubnetsInput{ + SubnetIds: []*string{aws.String(subnetId)}, + } + out, err := conn.DescribeSubnets(&input) + if err != nil { + return "", err + } + + if l := len(out.Subnets); l != 1 { + return "", fmt.Errorf("Expected exactly 1 subnet returned for %q, got: %d", subnetId, l) + } + + return *out.Subnets[0].AvailabilityZone, nil +} + +func resourceAwsEfsMountTargetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).efsconn + + log.Printf("[DEBUG] Deleting EFS mount target %q", d.Id()) + _, err := conn.DeleteMountTarget(&efs.DeleteMountTargetInput{ + MountTargetId: aws.String(d.Id()), + }) + if err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"available", "deleting", "deleted"}, + Target: []string{}, + Refresh: func() (interface{}, string, error) { + resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{ + MountTargetId: aws.String(d.Id()), + }) + if err != nil { + awsErr, ok := err.(awserr.Error) + if !ok { + return nil, "error", err + } + + if awsErr.Code() == "MountTargetNotFound" { + return nil, "", nil + } + + return nil, "error", awsErr + } + + if hasEmptyMountTargets(resp) { + return nil, "", nil + } + + mt := resp.MountTargets[0] + + log.Printf("[DEBUG] Current status of %q: %q", *mt.MountTargetId, *mt.LifeCycleState) + return mt, *mt.LifeCycleState, nil + }, + Timeout: 10 * time.Minute, + Delay: 2 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for EFS mount target (%q) to delete: %s", + d.Id(), err.Error()) + } + + log.Printf("[DEBUG] EFS mount target %q deleted.", d.Id()) + + return nil +} + +func resourceAwsEfsMountTargetDnsName(fileSystemId, region string) string { + return fmt.Sprintf("%s.efs.%s.amazonaws.com", fileSystemId, region) +} + +func hasEmptyMountTargets(mto *efs.DescribeMountTargetsOutput) bool { + if mto != nil && len(mto.MountTargets) > 0 { + return false + } + return true +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_egress_only_internet_gateway.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_egress_only_internet_gateway.go new file mode 100644 index 000000000..0d5256a15 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_egress_only_internet_gateway.go @@ -0,0 +1,129 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsEgressOnlyInternetGateway() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEgressOnlyInternetGatewayCreate, + Read: resourceAwsEgressOnlyInternetGatewayRead, + Delete: resourceAwsEgressOnlyInternetGatewayDelete, + + Schema: map[string]*schema.Schema{ + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsEgressOnlyInternetGatewayCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + resp, err := conn.CreateEgressOnlyInternetGateway(&ec2.CreateEgressOnlyInternetGatewayInput{ + VpcId: aws.String(d.Get("vpc_id").(string)), + }) + if err != nil { + return fmt.Errorf("Error creating egress internet gateway: %s", err) + } + + d.SetId(*resp.EgressOnlyInternetGateway.EgressOnlyInternetGatewayId) + + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + igRaw, _, err := EIGWStateRefreshFunc(conn, d.Id())() + if igRaw != nil { + return nil + } + if err == nil { + return resource.RetryableError(err) + } else { + return resource.NonRetryableError(err) + } + }) + + if err != nil { + return errwrap.Wrapf("{{err}}", err) + } + + return resourceAwsEgressOnlyInternetGatewayRead(d, meta) +} + +func EIGWStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeEgressOnlyInternetGateways(&ec2.DescribeEgressOnlyInternetGatewaysInput{ + EgressOnlyInternetGatewayIds: []*string{aws.String(id)}, + }) + if err != nil { + ec2err, ok := err.(awserr.Error) + if ok && ec2err.Code() == "InvalidEgressInternetGatewayID.NotFound" { + resp = nil + } else { + log.Printf("[ERROR] Error on EIGWStateRefreshFunc: %s", err) + return nil, "", err + } + } + if len(resp.EgressOnlyInternetGateways) < 1 { + resp = nil + } + + if resp == nil { + // Sometimes AWS just has consistency issues and doesn't see + // our instance yet. Return an empty state. + return nil, "", nil + } + + ig := resp.EgressOnlyInternetGateways[0] + return ig, "available", nil + } +} + +func resourceAwsEgressOnlyInternetGatewayRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + resp, err := conn.DescribeEgressOnlyInternetGateways(&ec2.DescribeEgressOnlyInternetGatewaysInput{ + EgressOnlyInternetGatewayIds: []*string{aws.String(d.Id())}, + }) + if err != nil { + return fmt.Errorf("Error describing egress internet gateway: %s", err) + } + + found := false + for _, igw := range resp.EgressOnlyInternetGateways { + if *igw.EgressOnlyInternetGatewayId == d.Id() { + found = true + } + } + + if !found { + log.Printf("[Error] Cannot find Egress Only Internet Gateway: %q", d.Id()) + d.SetId("") + return nil + } + + return nil +} + +func resourceAwsEgressOnlyInternetGatewayDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + _, err := conn.DeleteEgressOnlyInternetGateway(&ec2.DeleteEgressOnlyInternetGatewayInput{ + EgressOnlyInternetGatewayId: aws.String(d.Id()), + }) + if err != nil { + return fmt.Errorf("Error deleting egress internet gateway: %s", err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_eip.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_eip.go new file mode 100644 index 000000000..1cd136782 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_eip.go @@ -0,0 +1,326 @@ +package aws + +import ( + "fmt" + "log" + "net" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsEip() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEipCreate, + Read: resourceAwsEipRead, + Update: resourceAwsEipUpdate, + Delete: resourceAwsEipDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "vpc": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "instance": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "network_interface": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "allocation_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "association_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "domain": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "public_ip": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "private_ip": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "associate_with_private_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceAwsEipCreate(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + + // By default, we're not in a VPC + domainOpt := "" + if v := d.Get("vpc"); v != nil && v.(bool) { + domainOpt = "vpc" + } + + allocOpts := &ec2.AllocateAddressInput{ + Domain: aws.String(domainOpt), + } + + log.Printf("[DEBUG] EIP create configuration: %#v", allocOpts) + allocResp, err := ec2conn.AllocateAddress(allocOpts) + if err != nil { + return fmt.Errorf("Error creating EIP: %s", err) + } + + // The domain tells us if we're in a VPC or not + d.Set("domain", allocResp.Domain) + + // Assign the eips (unique) allocation id for use later + // the EIP api has a conditional unique ID (really), so + // if we're in a VPC we need to save the ID as such, otherwise + // it defaults to using the public IP + log.Printf("[DEBUG] EIP Allocate: %#v", allocResp) + if d.Get("domain").(string) == "vpc" { + d.SetId(*allocResp.AllocationId) + } else { + d.SetId(*allocResp.PublicIp) + } + + log.Printf("[INFO] EIP ID: %s (domain: %v)", d.Id(), *allocResp.Domain) + return resourceAwsEipUpdate(d, meta) +} + +func resourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + + domain := resourceAwsEipDomain(d) + id := d.Id() + + req := &ec2.DescribeAddressesInput{} + + if domain == "vpc" { + req.AllocationIds = []*string{aws.String(id)} + } else { + req.PublicIps = []*string{aws.String(id)} + } + + log.Printf( + "[DEBUG] EIP describe configuration: %s (domain: %s)", + req, domain) + + describeAddresses, err := ec2conn.DescribeAddresses(req) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && (ec2err.Code() == "InvalidAllocationID.NotFound" || ec2err.Code() == "InvalidAddress.NotFound") { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving EIP: %s", err) + } + + // Verify AWS returned our EIP + if len(describeAddresses.Addresses) != 1 || + domain == "vpc" && *describeAddresses.Addresses[0].AllocationId != id || + *describeAddresses.Addresses[0].PublicIp != id { + if err != nil { + return fmt.Errorf("Unable to find EIP: %#v", describeAddresses.Addresses) + } + } + + address := describeAddresses.Addresses[0] + + d.Set("association_id", address.AssociationId) + if address.InstanceId != nil { + d.Set("instance", address.InstanceId) + } else { + d.Set("instance", "") + } + if address.NetworkInterfaceId != nil { + d.Set("network_interface", address.NetworkInterfaceId) + } else { + d.Set("network_interface", "") + } + d.Set("private_ip", address.PrivateIpAddress) + d.Set("public_ip", address.PublicIp) + + // On import (domain never set, which it must've been if we created), + // set the 'vpc' attribute depending on if we're in a VPC. + if address.Domain != nil { + d.Set("vpc", *address.Domain == "vpc") + } + + d.Set("domain", address.Domain) + + // Force ID to be an Allocation ID if we're on a VPC + // This allows users to import the EIP based on the IP if they are in a VPC + if *address.Domain == "vpc" && net.ParseIP(id) != nil { + log.Printf("[DEBUG] Re-assigning EIP ID (%s) to it's Allocation ID (%s)", d.Id(), *address.AllocationId) + d.SetId(*address.AllocationId) + } + + return nil +} + +func resourceAwsEipUpdate(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + + domain := resourceAwsEipDomain(d) + + // Associate to instance or interface if specified + v_instance, ok_instance := d.GetOk("instance") + v_interface, ok_interface := d.GetOk("network_interface") + + if ok_instance || ok_interface { + instanceId := v_instance.(string) + networkInterfaceId := v_interface.(string) + + assocOpts := &ec2.AssociateAddressInput{ + InstanceId: aws.String(instanceId), + PublicIp: aws.String(d.Id()), + } + + // more unique ID conditionals + if domain == "vpc" { + var privateIpAddress *string + if v := d.Get("associate_with_private_ip").(string); v != "" { + privateIpAddress = aws.String(v) + } + assocOpts = &ec2.AssociateAddressInput{ + NetworkInterfaceId: aws.String(networkInterfaceId), + InstanceId: aws.String(instanceId), + AllocationId: aws.String(d.Id()), + PrivateIpAddress: privateIpAddress, + } + } + + log.Printf("[DEBUG] EIP associate configuration: %s (domain: %s)", assocOpts, domain) + + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := ec2conn.AssociateAddress(assocOpts) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "InvalidAllocationID.NotFound" { + return resource.RetryableError(awsErr) + } + } + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + // Prevent saving instance if association failed + // e.g. missing internet gateway in VPC + d.Set("instance", "") + d.Set("network_interface", "") + return fmt.Errorf("Failure associating EIP: %s", err) + } + } + + return resourceAwsEipRead(d, meta) +} + +func resourceAwsEipDelete(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + + if err := resourceAwsEipRead(d, meta); err != nil { + return err + } + if d.Id() == "" { + // This might happen from the read + return nil + } + + // If we are attached to an instance or interface, detach first. + if d.Get("instance").(string) != "" || d.Get("association_id").(string) != "" { + log.Printf("[DEBUG] Disassociating EIP: %s", d.Id()) + var err error + switch resourceAwsEipDomain(d) { + case "vpc": + _, err = ec2conn.DisassociateAddress(&ec2.DisassociateAddressInput{ + AssociationId: aws.String(d.Get("association_id").(string)), + }) + case "standard": + _, err = ec2conn.DisassociateAddress(&ec2.DisassociateAddressInput{ + PublicIp: aws.String(d.Get("public_ip").(string)), + }) + } + + if err != nil { + // First check if the association ID is not found. If this + // is the case, then it was already disassociated somehow, + // and that is okay. The most commmon reason for this is that + // the instance or ENI it was attached it was destroyed. + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidAssociationID.NotFound" { + err = nil + } + } + + if err != nil { + return err + } + } + + domain := resourceAwsEipDomain(d) + return resource.Retry(3*time.Minute, func() *resource.RetryError { + var err error + switch domain { + case "vpc": + log.Printf( + "[DEBUG] EIP release (destroy) address allocation: %v", + d.Id()) + _, err = ec2conn.ReleaseAddress(&ec2.ReleaseAddressInput{ + AllocationId: aws.String(d.Id()), + }) + case "standard": + log.Printf("[DEBUG] EIP release (destroy) address: %v", d.Id()) + _, err = ec2conn.ReleaseAddress(&ec2.ReleaseAddressInput{ + PublicIp: aws.String(d.Id()), + }) + } + + if err == nil { + return nil + } + if _, ok := err.(awserr.Error); !ok { + return resource.NonRetryableError(err) + } + + return resource.RetryableError(err) + }) +} + +func resourceAwsEipDomain(d *schema.ResourceData) string { + if v, ok := d.GetOk("domain"); ok { + return v.(string) + } else if strings.Contains(d.Id(), "eipalloc") { + // We have to do this for backwards compatibility since TF 0.1 + // didn't have the "domain" computed attribute. + return "vpc" + } + + return "standard" +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_eip_association.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_eip_association.go new file mode 100644 index 000000000..b3db8655d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_eip_association.go @@ -0,0 +1,163 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsEipAssociation() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEipAssociationCreate, + Read: resourceAwsEipAssociationRead, + Delete: resourceAwsEipAssociationDelete, + + Schema: map[string]*schema.Schema{ + "allocation_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "allow_reassociation": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + + "instance_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "network_interface_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "private_ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "public_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsEipAssociationCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + request := &ec2.AssociateAddressInput{} + + if v, ok := d.GetOk("allocation_id"); ok { + request.AllocationId = aws.String(v.(string)) + } + if v, ok := d.GetOk("allow_reassociation"); ok { + request.AllowReassociation = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("instance_id"); ok { + request.InstanceId = aws.String(v.(string)) + } + if v, ok := d.GetOk("network_interface_id"); ok { + request.NetworkInterfaceId = aws.String(v.(string)) + } + if v, ok := d.GetOk("private_ip_address"); ok { + request.PrivateIpAddress = aws.String(v.(string)) + } + if v, ok := d.GetOk("public_ip"); ok { + request.PublicIp = aws.String(v.(string)) + } + + log.Printf("[DEBUG] EIP association configuration: %#v", request) + + resp, err := conn.AssociateAddress(request) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + return fmt.Errorf("[WARN] Error attaching EIP, message: \"%s\", code: \"%s\"", + awsErr.Message(), awsErr.Code()) + } + return err + } + + d.SetId(*resp.AssociationId) + + return resourceAwsEipAssociationRead(d, meta) +} + +func resourceAwsEipAssociationRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + request := &ec2.DescribeAddressesInput{ + Filters: []*ec2.Filter{ + &ec2.Filter{ + Name: aws.String("association-id"), + Values: []*string{aws.String(d.Id())}, + }, + }, + } + + response, err := conn.DescribeAddresses(request) + if err != nil { + return fmt.Errorf("Error reading EC2 Elastic IP %s: %#v", d.Get("allocation_id").(string), err) + } + + if response.Addresses == nil || len(response.Addresses) == 0 { + log.Printf("[INFO] EIP Association ID Not Found. Refreshing from state") + d.SetId("") + return nil + } + + return readAwsEipAssociation(d, response.Addresses[0]) +} + +func resourceAwsEipAssociationDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + opts := &ec2.DisassociateAddressInput{ + AssociationId: aws.String(d.Id()), + } + + _, err := conn.DisassociateAddress(opts) + if err != nil { + return fmt.Errorf("Error deleting Elastic IP association: %s", err) + } + + return nil +} + +func readAwsEipAssociation(d *schema.ResourceData, address *ec2.Address) error { + if err := d.Set("allocation_id", address.AllocationId); err != nil { + return err + } + if err := d.Set("instance_id", address.InstanceId); err != nil { + return err + } + if err := d.Set("network_interface_id", address.NetworkInterfaceId); err != nil { + return err + } + if err := d.Set("private_ip_address", address.PrivateIpAddress); err != nil { + return err + } + if err := d.Set("public_ip", address.PublicIp); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_application.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_application.go new file mode 100644 index 000000000..212332526 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_application.go @@ -0,0 +1,152 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elasticbeanstalk" + "github.com/hashicorp/terraform/helper/resource" +) + +func resourceAwsElasticBeanstalkApplication() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsElasticBeanstalkApplicationCreate, + Read: resourceAwsElasticBeanstalkApplicationRead, + Update: resourceAwsElasticBeanstalkApplicationUpdate, + Delete: resourceAwsElasticBeanstalkApplicationDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + }, + } +} + +func resourceAwsElasticBeanstalkApplicationCreate(d *schema.ResourceData, meta interface{}) error { + beanstalkConn := meta.(*AWSClient).elasticbeanstalkconn + + // Get the name and description + name := d.Get("name").(string) + description := d.Get("description").(string) + + log.Printf("[DEBUG] Elastic Beanstalk application create: %s, description: %s", name, description) + + req := &elasticbeanstalk.CreateApplicationInput{ + ApplicationName: aws.String(name), + Description: aws.String(description), + } + + _, err := beanstalkConn.CreateApplication(req) + if err != nil { + return err + } + + d.SetId(name) + + return resourceAwsElasticBeanstalkApplicationRead(d, meta) +} + +func resourceAwsElasticBeanstalkApplicationUpdate(d *schema.ResourceData, meta interface{}) error { + beanstalkConn := meta.(*AWSClient).elasticbeanstalkconn + + if d.HasChange("description") { + if err := resourceAwsElasticBeanstalkApplicationDescriptionUpdate(beanstalkConn, d); err != nil { + return err + } + } + + return resourceAwsElasticBeanstalkApplicationRead(d, meta) +} + +func resourceAwsElasticBeanstalkApplicationDescriptionUpdate(beanstalkConn *elasticbeanstalk.ElasticBeanstalk, d *schema.ResourceData) error { + name := d.Get("name").(string) + description := d.Get("description").(string) + + log.Printf("[DEBUG] Elastic Beanstalk application: %s, update description: %s", name, description) + + _, err := beanstalkConn.UpdateApplication(&elasticbeanstalk.UpdateApplicationInput{ + ApplicationName: aws.String(name), + Description: aws.String(description), + }) + + return err +} + +func resourceAwsElasticBeanstalkApplicationRead(d *schema.ResourceData, meta interface{}) error { + a, err := getBeanstalkApplication(d, meta) + if err != nil { + return err + } + if a == nil { + return err + } + + d.Set("name", a.ApplicationName) + d.Set("description", a.Description) + return nil +} + +func resourceAwsElasticBeanstalkApplicationDelete(d *schema.ResourceData, meta interface{}) error { + beanstalkConn := meta.(*AWSClient).elasticbeanstalkconn + + a, err := getBeanstalkApplication(d, meta) + if err != nil { + return err + } + _, err = beanstalkConn.DeleteApplication(&elasticbeanstalk.DeleteApplicationInput{ + ApplicationName: aws.String(d.Id()), + }) + + return resource.Retry(10*time.Second, func() *resource.RetryError { + if a, _ = getBeanstalkApplication(d, meta); a != nil { + return resource.RetryableError( + fmt.Errorf("Beanstalk Application still exists")) + } + return nil + }) +} + +func getBeanstalkApplication( + d *schema.ResourceData, + meta interface{}) (*elasticbeanstalk.ApplicationDescription, error) { + conn := meta.(*AWSClient).elasticbeanstalkconn + + resp, err := conn.DescribeApplications(&elasticbeanstalk.DescribeApplicationsInput{ + ApplicationNames: []*string{aws.String(d.Id())}, + }) + + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() != "InvalidBeanstalkAppID.NotFound" { + log.Printf("[Err] Error reading Elastic Beanstalk Application (%s): Application not found", d.Id()) + d.SetId("") + return nil, nil + } + return nil, err + } + + switch { + case len(resp.Applications) > 1: + return nil, fmt.Errorf("Error %d Applications matched, expected 1", len(resp.Applications)) + case len(resp.Applications) == 0: + d.SetId("") + return nil, nil + default: + return resp.Applications[0], nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_application_version.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_application_version.go new file mode 100644 index 000000000..9125225a3 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_application_version.go @@ -0,0 +1,202 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elasticbeanstalk" + "time" +) + +func resourceAwsElasticBeanstalkApplicationVersion() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsElasticBeanstalkApplicationVersionCreate, + Read: resourceAwsElasticBeanstalkApplicationVersionRead, + Update: resourceAwsElasticBeanstalkApplicationVersionUpdate, + Delete: resourceAwsElasticBeanstalkApplicationVersionDelete, + + Schema: map[string]*schema.Schema{ + "application": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "key": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "force_delete": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + } +} + +func resourceAwsElasticBeanstalkApplicationVersionCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticbeanstalkconn + + application := d.Get("application").(string) + description := d.Get("description").(string) + bucket := d.Get("bucket").(string) + key := d.Get("key").(string) + name := d.Get("name").(string) + + s3Location := elasticbeanstalk.S3Location{ + S3Bucket: aws.String(bucket), + S3Key: aws.String(key), + } + + createOpts := elasticbeanstalk.CreateApplicationVersionInput{ + ApplicationName: aws.String(application), + Description: aws.String(description), + SourceBundle: &s3Location, + VersionLabel: aws.String(name), + } + + log.Printf("[DEBUG] Elastic Beanstalk Application Version create opts: %s", createOpts) + _, err := conn.CreateApplicationVersion(&createOpts) + if err != nil { + return err + } + + d.SetId(name) + log.Printf("[INFO] Elastic Beanstalk Application Version Label: %s", name) + + return resourceAwsElasticBeanstalkApplicationVersionRead(d, meta) +} + +func resourceAwsElasticBeanstalkApplicationVersionRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticbeanstalkconn + + resp, err := conn.DescribeApplicationVersions(&elasticbeanstalk.DescribeApplicationVersionsInput{ + VersionLabels: []*string{aws.String(d.Id())}, + }) + + if err != nil { + return err + } + + if len(resp.ApplicationVersions) == 0 { + log.Printf("[DEBUG] Elastic Beanstalk application version read: application version not found") + + d.SetId("") + + return nil + } else if len(resp.ApplicationVersions) != 1 { + return fmt.Errorf("Error reading application version properties: found %d application versions, expected 1", len(resp.ApplicationVersions)) + } + + if err := d.Set("description", resp.ApplicationVersions[0].Description); err != nil { + return err + } + + return nil +} + +func resourceAwsElasticBeanstalkApplicationVersionUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticbeanstalkconn + + if d.HasChange("description") { + if err := resourceAwsElasticBeanstalkApplicationVersionDescriptionUpdate(conn, d); err != nil { + return err + } + } + + return resourceAwsElasticBeanstalkApplicationVersionRead(d, meta) + +} + +func resourceAwsElasticBeanstalkApplicationVersionDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticbeanstalkconn + + application := d.Get("application").(string) + name := d.Id() + + if d.Get("force_delete").(bool) == false { + environments, err := versionUsedBy(application, name, conn) + if err != nil { + return err + } + + if len(environments) > 1 { + return fmt.Errorf("Unable to delete Application Version, it is currently in use by the following environments: %s.", environments) + } + } + _, err := conn.DeleteApplicationVersion(&elasticbeanstalk.DeleteApplicationVersionInput{ + ApplicationName: aws.String(application), + VersionLabel: aws.String(name), + DeleteSourceBundle: aws.Bool(false), + }) + + if err != nil { + if awserr, ok := err.(awserr.Error); ok { + // application version is pending delete, or no longer exists. + if awserr.Code() == "InvalidParameterValue" { + d.SetId("") + return nil + } + } + return err + } + + d.SetId("") + return nil +} + +func resourceAwsElasticBeanstalkApplicationVersionDescriptionUpdate(conn *elasticbeanstalk.ElasticBeanstalk, d *schema.ResourceData) error { + application := d.Get("application").(string) + description := d.Get("description").(string) + name := d.Get("name").(string) + + log.Printf("[DEBUG] Elastic Beanstalk application version: %s, update description: %s", name, description) + + _, err := conn.UpdateApplicationVersion(&elasticbeanstalk.UpdateApplicationVersionInput{ + ApplicationName: aws.String(application), + Description: aws.String(description), + VersionLabel: aws.String(name), + }) + + return err +} + +func versionUsedBy(applicationName, versionLabel string, conn *elasticbeanstalk.ElasticBeanstalk) ([]string, error) { + now := time.Now() + resp, err := conn.DescribeEnvironments(&elasticbeanstalk.DescribeEnvironmentsInput{ + ApplicationName: aws.String(applicationName), + VersionLabel: aws.String(versionLabel), + IncludeDeleted: aws.Bool(true), + IncludedDeletedBackTo: aws.Time(now.Add(-1 * time.Minute)), + }) + + if err != nil { + return nil, err + } + + var environmentIDs []string + for _, environment := range resp.Environments { + environmentIDs = append(environmentIDs, *environment.EnvironmentId) + } + + return environmentIDs, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_configuration_template.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_configuration_template.go new file mode 100644 index 000000000..346fcd5ff --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_configuration_template.go @@ -0,0 +1,240 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elasticbeanstalk" +) + +func resourceAwsElasticBeanstalkConfigurationTemplate() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsElasticBeanstalkConfigurationTemplateCreate, + Read: resourceAwsElasticBeanstalkConfigurationTemplateRead, + Update: resourceAwsElasticBeanstalkConfigurationTemplateUpdate, + Delete: resourceAwsElasticBeanstalkConfigurationTemplateDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "application": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "environment_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "setting": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: resourceAwsElasticBeanstalkOptionSetting(), + Set: optionSettingValueHash, + }, + "solution_stack_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsElasticBeanstalkConfigurationTemplateCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticbeanstalkconn + + // Get the relevant properties + name := d.Get("name").(string) + appName := d.Get("application").(string) + + optionSettings := gatherOptionSettings(d) + + opts := elasticbeanstalk.CreateConfigurationTemplateInput{ + ApplicationName: aws.String(appName), + TemplateName: aws.String(name), + OptionSettings: optionSettings, + } + + if attr, ok := d.GetOk("description"); ok { + opts.Description = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("environment_id"); ok { + opts.EnvironmentId = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("solution_stack_name"); ok { + opts.SolutionStackName = aws.String(attr.(string)) + } + + log.Printf("[DEBUG] Elastic Beanstalk configuration template create opts: %s", opts) + if _, err := conn.CreateConfigurationTemplate(&opts); err != nil { + return fmt.Errorf("Error creating Elastic Beanstalk configuration template: %s", err) + } + + d.SetId(name) + + return resourceAwsElasticBeanstalkConfigurationTemplateRead(d, meta) +} + +func resourceAwsElasticBeanstalkConfigurationTemplateRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticbeanstalkconn + + log.Printf("[DEBUG] Elastic Beanstalk configuration template read: %s", d.Get("name").(string)) + + resp, err := conn.DescribeConfigurationSettings(&elasticbeanstalk.DescribeConfigurationSettingsInput{ + TemplateName: aws.String(d.Id()), + ApplicationName: aws.String(d.Get("application").(string)), + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "InvalidParameterValue" && strings.Contains(awsErr.Message(), "No Configuration Template named") { + log.Printf("[WARN] No Configuration Template named (%s) found", d.Id()) + d.SetId("") + return nil + } + } + return err + } + + if len(resp.ConfigurationSettings) != 1 { + log.Printf("[DEBUG] Elastic Beanstalk unexpected describe configuration template response: %+v", resp) + return fmt.Errorf("Error reading application properties: found %d applications, expected 1", len(resp.ConfigurationSettings)) + } + + d.Set("description", resp.ConfigurationSettings[0].Description) + return nil +} + +func resourceAwsElasticBeanstalkConfigurationTemplateUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticbeanstalkconn + + log.Printf("[DEBUG] Elastic Beanstalk configuration template update: %s", d.Get("name").(string)) + + if d.HasChange("description") { + if err := resourceAwsElasticBeanstalkConfigurationTemplateDescriptionUpdate(conn, d); err != nil { + return err + } + } + + if d.HasChange("setting") { + if err := resourceAwsElasticBeanstalkConfigurationTemplateOptionSettingsUpdate(conn, d); err != nil { + return err + } + } + + return resourceAwsElasticBeanstalkConfigurationTemplateRead(d, meta) +} + +func resourceAwsElasticBeanstalkConfigurationTemplateDescriptionUpdate(conn *elasticbeanstalk.ElasticBeanstalk, d *schema.ResourceData) error { + _, err := conn.UpdateConfigurationTemplate(&elasticbeanstalk.UpdateConfigurationTemplateInput{ + ApplicationName: aws.String(d.Get("application").(string)), + TemplateName: aws.String(d.Get("name").(string)), + Description: aws.String(d.Get("description").(string)), + }) + + return err +} + +func resourceAwsElasticBeanstalkConfigurationTemplateOptionSettingsUpdate(conn *elasticbeanstalk.ElasticBeanstalk, d *schema.ResourceData) error { + if d.HasChange("setting") { + _, err := conn.ValidateConfigurationSettings(&elasticbeanstalk.ValidateConfigurationSettingsInput{ + ApplicationName: aws.String(d.Get("application").(string)), + TemplateName: aws.String(d.Get("name").(string)), + OptionSettings: gatherOptionSettings(d), + }) + if err != nil { + return err + } + + o, n := d.GetChange("setting") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + rm := extractOptionSettings(os.Difference(ns)) + add := extractOptionSettings(ns.Difference(os)) + + // Additions and removals of options are done in a single API call, so we + // can't do our normal "remove these" and then later "add these", re-adding + // any updated settings. + // Because of this, we need to remove any settings in the "removable" + // settings that are also found in the "add" settings, otherwise they + // conflict. Here we loop through all the initial removables from the set + // difference, and we build up a slice of settings not found in the "add" + // set + var remove []*elasticbeanstalk.ConfigurationOptionSetting + for _, r := range rm { + for _, a := range add { + if *r.Namespace == *a.Namespace && *r.OptionName == *a.OptionName { + continue + } + remove = append(remove, r) + } + } + + req := &elasticbeanstalk.UpdateConfigurationTemplateInput{ + ApplicationName: aws.String(d.Get("application").(string)), + TemplateName: aws.String(d.Get("name").(string)), + OptionSettings: add, + } + + for _, elem := range remove { + req.OptionsToRemove = append(req.OptionsToRemove, &elasticbeanstalk.OptionSpecification{ + Namespace: elem.Namespace, + OptionName: elem.OptionName, + }) + } + + log.Printf("[DEBUG] Update Configuration Template request: %s", req) + if _, err := conn.UpdateConfigurationTemplate(req); err != nil { + return err + } + } + + return nil +} + +func resourceAwsElasticBeanstalkConfigurationTemplateDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticbeanstalkconn + + application := d.Get("application").(string) + + _, err := conn.DeleteConfigurationTemplate(&elasticbeanstalk.DeleteConfigurationTemplateInput{ + ApplicationName: aws.String(application), + TemplateName: aws.String(d.Id()), + }) + + return err +} + +func gatherOptionSettings(d *schema.ResourceData) []*elasticbeanstalk.ConfigurationOptionSetting { + optionSettingsSet, ok := d.Get("setting").(*schema.Set) + if !ok || optionSettingsSet == nil { + optionSettingsSet = new(schema.Set) + } + + return extractOptionSettings(optionSettingsSet) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_environment.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_environment.go new file mode 100644 index 000000000..fa1e2562b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_environment.go @@ -0,0 +1,913 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "sort" + "strings" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/elasticbeanstalk" +) + +func resourceAwsElasticBeanstalkOptionSetting() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "namespace": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "resource": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceAwsElasticBeanstalkEnvironment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsElasticBeanstalkEnvironmentCreate, + Read: resourceAwsElasticBeanstalkEnvironmentRead, + Update: resourceAwsElasticBeanstalkEnvironmentUpdate, + Delete: resourceAwsElasticBeanstalkEnvironmentDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + SchemaVersion: 1, + MigrateState: resourceAwsElasticBeanstalkEnvironmentMigrateState, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "application": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "version_label": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "cname": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "cname_prefix": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "tier": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "WebServer", + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + switch value { + case + "Worker", + "WebServer": + return + } + errors = append(errors, fmt.Errorf("%s is not a valid tier. Valid options are WebServer or Worker", value)) + return + }, + ForceNew: true, + }, + "setting": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: resourceAwsElasticBeanstalkOptionSetting(), + Set: optionSettingValueHash, + }, + "all_settings": &schema.Schema{ + Type: schema.TypeSet, + Computed: true, + Elem: resourceAwsElasticBeanstalkOptionSetting(), + Set: optionSettingValueHash, + }, + "solution_stack_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"template_name"}, + }, + "template_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "wait_for_ready_timeout": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "20m", + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + duration, err := time.ParseDuration(value) + if err != nil { + errors = append(errors, fmt.Errorf( + "%q cannot be parsed as a duration: %s", k, err)) + } + if duration < 0 { + errors = append(errors, fmt.Errorf( + "%q must be greater than zero", k)) + } + return + }, + }, + "poll_interval": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + duration, err := time.ParseDuration(value) + if err != nil { + errors = append(errors, fmt.Errorf( + "%q cannot be parsed as a duration: %s", k, err)) + } + if duration < 10*time.Second || duration > 60*time.Second { + errors = append(errors, fmt.Errorf( + "%q must be between 10s and 180s", k)) + } + return + }, + }, + "autoscaling_groups": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "instances": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "launch_configurations": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "load_balancers": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "queues": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "triggers": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsElasticBeanstalkEnvironmentCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticbeanstalkconn + + // Get values from config + name := d.Get("name").(string) + cnamePrefix := d.Get("cname_prefix").(string) + tier := d.Get("tier").(string) + app := d.Get("application").(string) + desc := d.Get("description").(string) + version := d.Get("version_label").(string) + settings := d.Get("setting").(*schema.Set) + solutionStack := d.Get("solution_stack_name").(string) + templateName := d.Get("template_name").(string) + + // TODO set tags + // Note: at time of writing, you cannot view or edit Tags after creation + // d.Set("tags", tagsToMap(instance.Tags)) + createOpts := elasticbeanstalk.CreateEnvironmentInput{ + EnvironmentName: aws.String(name), + ApplicationName: aws.String(app), + OptionSettings: extractOptionSettings(settings), + Tags: tagsFromMapBeanstalk(d.Get("tags").(map[string]interface{})), + } + + if desc != "" { + createOpts.Description = aws.String(desc) + } + + if cnamePrefix != "" { + if tier != "WebServer" { + return fmt.Errorf("Cannot set cname_prefix for tier: %s.", tier) + } + createOpts.CNAMEPrefix = aws.String(cnamePrefix) + } + + if tier != "" { + var tierType string + + switch tier { + case "WebServer": + tierType = "Standard" + case "Worker": + tierType = "SQS/HTTP" + } + environmentTier := elasticbeanstalk.EnvironmentTier{ + Name: aws.String(tier), + Type: aws.String(tierType), + } + createOpts.Tier = &environmentTier + } + + if solutionStack != "" { + createOpts.SolutionStackName = aws.String(solutionStack) + } + + if templateName != "" { + createOpts.TemplateName = aws.String(templateName) + } + + if version != "" { + createOpts.VersionLabel = aws.String(version) + } + + // Get the current time to filter getBeanstalkEnvironmentErrors messages + t := time.Now() + log.Printf("[DEBUG] Elastic Beanstalk Environment create opts: %s", createOpts) + resp, err := conn.CreateEnvironment(&createOpts) + if err != nil { + return err + } + + // Assign the application name as the resource ID + d.SetId(*resp.EnvironmentId) + + waitForReadyTimeOut, err := time.ParseDuration(d.Get("wait_for_ready_timeout").(string)) + if err != nil { + return err + } + + pollInterval, err := time.ParseDuration(d.Get("poll_interval").(string)) + if err != nil { + pollInterval = 0 + log.Printf("[WARN] Error parsing poll_interval, using default backoff") + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"Launching", "Updating"}, + Target: []string{"Ready"}, + Refresh: environmentStateRefreshFunc(conn, d.Id(), t), + Timeout: waitForReadyTimeOut, + Delay: 10 * time.Second, + PollInterval: pollInterval, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for Elastic Beanstalk Environment (%s) to become ready: %s", + d.Id(), err) + } + + envErrors, err := getBeanstalkEnvironmentErrors(conn, d.Id(), t) + if err != nil { + return err + } + if envErrors != nil { + return envErrors + } + + return resourceAwsElasticBeanstalkEnvironmentRead(d, meta) +} + +func resourceAwsElasticBeanstalkEnvironmentUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticbeanstalkconn + + envId := d.Id() + + var hasChange bool + + updateOpts := elasticbeanstalk.UpdateEnvironmentInput{ + EnvironmentId: aws.String(envId), + } + + if d.HasChange("description") { + hasChange = true + updateOpts.Description = aws.String(d.Get("description").(string)) + } + + if d.HasChange("solution_stack_name") { + hasChange = true + if v, ok := d.GetOk("solution_stack_name"); ok { + updateOpts.SolutionStackName = aws.String(v.(string)) + } + } + + if d.HasChange("setting") { + hasChange = true + o, n := d.GetChange("setting") + if o == nil { + o = &schema.Set{F: optionSettingValueHash} + } + if n == nil { + n = &schema.Set{F: optionSettingValueHash} + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + rm := extractOptionSettings(os.Difference(ns)) + add := extractOptionSettings(ns.Difference(os)) + + // Additions and removals of options are done in a single API call, so we + // can't do our normal "remove these" and then later "add these", re-adding + // any updated settings. + // Because of this, we need to exclude any settings in the "removable" + // settings that are also found in the "add" settings, otherwise they + // conflict. Here we loop through all the initial removables from the set + // difference, and create a new slice `remove` that contains those settings + // found in `rm` but not in `add` + var remove []*elasticbeanstalk.ConfigurationOptionSetting + if len(add) > 0 { + for _, r := range rm { + var update = false + for _, a := range add { + // ResourceNames are optional. Some defaults come with it, some do + // not. We need to guard against nil/empty in state as well as + // nil/empty from the API + if a.ResourceName != nil { + if r.ResourceName == nil { + continue + } + if *r.ResourceName != *a.ResourceName { + continue + } + } + if *r.Namespace == *a.Namespace && *r.OptionName == *a.OptionName { + log.Printf("[DEBUG] Updating Beanstalk setting (%s::%s) \"%s\" => \"%s\"", *a.Namespace, *a.OptionName, *r.Value, *a.Value) + update = true + break + } + } + // Only remove options that are not updates + if !update { + remove = append(remove, r) + } + } + } else { + remove = rm + } + + for _, elem := range remove { + updateOpts.OptionsToRemove = append(updateOpts.OptionsToRemove, &elasticbeanstalk.OptionSpecification{ + Namespace: elem.Namespace, + OptionName: elem.OptionName, + }) + } + + updateOpts.OptionSettings = add + } + + if d.HasChange("template_name") { + hasChange = true + if v, ok := d.GetOk("template_name"); ok { + updateOpts.TemplateName = aws.String(v.(string)) + } + } + + if d.HasChange("version_label") { + hasChange = true + updateOpts.VersionLabel = aws.String(d.Get("version_label").(string)) + } + + if hasChange { + // Get the current time to filter getBeanstalkEnvironmentErrors messages + t := time.Now() + log.Printf("[DEBUG] Elastic Beanstalk Environment update opts: %s", updateOpts) + _, err := conn.UpdateEnvironment(&updateOpts) + if err != nil { + return err + } + + waitForReadyTimeOut, err := time.ParseDuration(d.Get("wait_for_ready_timeout").(string)) + if err != nil { + return err + } + pollInterval, err := time.ParseDuration(d.Get("poll_interval").(string)) + if err != nil { + pollInterval = 0 + log.Printf("[WARN] Error parsing poll_interval, using default backoff") + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"Launching", "Updating"}, + Target: []string{"Ready"}, + Refresh: environmentStateRefreshFunc(conn, d.Id(), t), + Timeout: waitForReadyTimeOut, + Delay: 10 * time.Second, + PollInterval: pollInterval, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for Elastic Beanstalk Environment (%s) to become ready: %s", + d.Id(), err) + } + + envErrors, err := getBeanstalkEnvironmentErrors(conn, d.Id(), t) + if err != nil { + return err + } + if envErrors != nil { + return envErrors + } + } + + return resourceAwsElasticBeanstalkEnvironmentRead(d, meta) +} + +func resourceAwsElasticBeanstalkEnvironmentRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticbeanstalkconn + + envId := d.Id() + + log.Printf("[DEBUG] Elastic Beanstalk environment read %s: id %s", d.Get("name").(string), d.Id()) + + resp, err := conn.DescribeEnvironments(&elasticbeanstalk.DescribeEnvironmentsInput{ + EnvironmentIds: []*string{aws.String(envId)}, + }) + + if err != nil { + return err + } + + if len(resp.Environments) == 0 { + log.Printf("[DEBUG] Elastic Beanstalk environment properties: could not find environment %s", d.Id()) + + d.SetId("") + return nil + } else if len(resp.Environments) != 1 { + return fmt.Errorf("Error reading application properties: found %d environments, expected 1", len(resp.Environments)) + } + + env := resp.Environments[0] + + if *env.Status == "Terminated" { + log.Printf("[DEBUG] Elastic Beanstalk environment %s was terminated", d.Id()) + + d.SetId("") + return nil + } + + resources, err := conn.DescribeEnvironmentResources(&elasticbeanstalk.DescribeEnvironmentResourcesInput{ + EnvironmentId: aws.String(envId), + }) + + if err != nil { + return err + } + + if err := d.Set("name", env.EnvironmentName); err != nil { + return err + } + + if err := d.Set("application", env.ApplicationName); err != nil { + return err + } + + if err := d.Set("description", env.Description); err != nil { + return err + } + + if err := d.Set("cname", env.CNAME); err != nil { + return err + } + + if err := d.Set("version_label", env.VersionLabel); err != nil { + return err + } + + if err := d.Set("tier", *env.Tier.Name); err != nil { + return err + } + + if env.CNAME != nil { + beanstalkCnamePrefixRegexp := regexp.MustCompile(`(^[^.]+)(.\w{2}-\w{4,9}-\d)?.elasticbeanstalk.com$`) + var cnamePrefix string + cnamePrefixMatch := beanstalkCnamePrefixRegexp.FindStringSubmatch(*env.CNAME) + + if cnamePrefixMatch == nil { + cnamePrefix = "" + } else { + cnamePrefix = cnamePrefixMatch[1] + } + + if err := d.Set("cname_prefix", cnamePrefix); err != nil { + return err + } + } else { + if err := d.Set("cname_prefix", ""); err != nil { + return err + } + } + + if err := d.Set("solution_stack_name", env.SolutionStackName); err != nil { + return err + } + + if err := d.Set("autoscaling_groups", flattenBeanstalkAsg(resources.EnvironmentResources.AutoScalingGroups)); err != nil { + return err + } + + if err := d.Set("instances", flattenBeanstalkInstances(resources.EnvironmentResources.Instances)); err != nil { + return err + } + if err := d.Set("launch_configurations", flattenBeanstalkLc(resources.EnvironmentResources.LaunchConfigurations)); err != nil { + return err + } + if err := d.Set("load_balancers", flattenBeanstalkElb(resources.EnvironmentResources.LoadBalancers)); err != nil { + return err + } + if err := d.Set("queues", flattenBeanstalkSqs(resources.EnvironmentResources.Queues)); err != nil { + return err + } + if err := d.Set("triggers", flattenBeanstalkTrigger(resources.EnvironmentResources.Triggers)); err != nil { + return err + } + + return resourceAwsElasticBeanstalkEnvironmentSettingsRead(d, meta) +} + +func fetchAwsElasticBeanstalkEnvironmentSettings(d *schema.ResourceData, meta interface{}) (*schema.Set, error) { + conn := meta.(*AWSClient).elasticbeanstalkconn + + app := d.Get("application").(string) + name := d.Get("name").(string) + + resp, err := conn.DescribeConfigurationSettings(&elasticbeanstalk.DescribeConfigurationSettingsInput{ + ApplicationName: aws.String(app), + EnvironmentName: aws.String(name), + }) + + if err != nil { + return nil, err + } + + if len(resp.ConfigurationSettings) != 1 { + return nil, fmt.Errorf("Error reading environment settings: received %d settings groups, expected 1", len(resp.ConfigurationSettings)) + } + + settings := &schema.Set{F: optionSettingValueHash} + for _, optionSetting := range resp.ConfigurationSettings[0].OptionSettings { + m := map[string]interface{}{} + + if optionSetting.Namespace != nil { + m["namespace"] = *optionSetting.Namespace + } else { + return nil, fmt.Errorf("Error reading environment settings: option setting with no namespace: %v", optionSetting) + } + + if optionSetting.OptionName != nil { + m["name"] = *optionSetting.OptionName + } else { + return nil, fmt.Errorf("Error reading environment settings: option setting with no name: %v", optionSetting) + } + + if *optionSetting.Namespace == "aws:autoscaling:scheduledaction" && optionSetting.ResourceName != nil { + m["resource"] = *optionSetting.ResourceName + } + + if optionSetting.Value != nil { + switch *optionSetting.OptionName { + case "SecurityGroups": + m["value"] = dropGeneratedSecurityGroup(*optionSetting.Value, meta) + case "Subnets", "ELBSubnets": + m["value"] = sortValues(*optionSetting.Value) + default: + m["value"] = *optionSetting.Value + } + } + + settings.Add(m) + } + + return settings, nil +} + +func resourceAwsElasticBeanstalkEnvironmentSettingsRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Elastic Beanstalk environment settings read %s: id %s", d.Get("name").(string), d.Id()) + + allSettings, err := fetchAwsElasticBeanstalkEnvironmentSettings(d, meta) + if err != nil { + return err + } + + settings := d.Get("setting").(*schema.Set) + + log.Printf("[DEBUG] Elastic Beanstalk allSettings: %s", allSettings.GoString()) + log.Printf("[DEBUG] Elastic Beanstalk settings: %s", settings.GoString()) + + // perform the set operation with only name/namespace as keys, excluding value + // this is so we override things in the settings resource data key with updated values + // from the api. we skip values we didn't know about before because there are so many + // defaults set by the eb api that we would delete many useful defaults. + // + // there is likely a better way to do this + allSettingsKeySet := schema.NewSet(optionSettingKeyHash, allSettings.List()) + settingsKeySet := schema.NewSet(optionSettingKeyHash, settings.List()) + updatedSettingsKeySet := allSettingsKeySet.Intersection(settingsKeySet) + + log.Printf("[DEBUG] Elastic Beanstalk updatedSettingsKeySet: %s", updatedSettingsKeySet.GoString()) + + updatedSettings := schema.NewSet(optionSettingValueHash, updatedSettingsKeySet.List()) + + log.Printf("[DEBUG] Elastic Beanstalk updatedSettings: %s", updatedSettings.GoString()) + + if err := d.Set("all_settings", allSettings.List()); err != nil { + return err + } + + if err := d.Set("setting", updatedSettings.List()); err != nil { + return err + } + + return nil +} + +func resourceAwsElasticBeanstalkEnvironmentDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticbeanstalkconn + + opts := elasticbeanstalk.TerminateEnvironmentInput{ + EnvironmentId: aws.String(d.Id()), + TerminateResources: aws.Bool(true), + } + + // Get the current time to filter getBeanstalkEnvironmentErrors messages + t := time.Now() + log.Printf("[DEBUG] Elastic Beanstalk Environment terminate opts: %s", opts) + _, err := conn.TerminateEnvironment(&opts) + + if err != nil { + return err + } + + waitForReadyTimeOut, err := time.ParseDuration(d.Get("wait_for_ready_timeout").(string)) + if err != nil { + return err + } + pollInterval, err := time.ParseDuration(d.Get("poll_interval").(string)) + if err != nil { + pollInterval = 0 + log.Printf("[WARN] Error parsing poll_interval, using default backoff") + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"Terminating"}, + Target: []string{"Terminated"}, + Refresh: environmentStateRefreshFunc(conn, d.Id(), t), + Timeout: waitForReadyTimeOut, + Delay: 10 * time.Second, + PollInterval: pollInterval, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for Elastic Beanstalk Environment (%s) to become terminated: %s", + d.Id(), err) + } + + envErrors, err := getBeanstalkEnvironmentErrors(conn, d.Id(), t) + if err != nil { + return err + } + if envErrors != nil { + return envErrors + } + + return nil +} + +// environmentStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// the creation of the Beanstalk Environment +func environmentStateRefreshFunc(conn *elasticbeanstalk.ElasticBeanstalk, environmentId string, t time.Time) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeEnvironments(&elasticbeanstalk.DescribeEnvironmentsInput{ + EnvironmentIds: []*string{aws.String(environmentId)}, + }) + if err != nil { + log.Printf("[Err] Error waiting for Elastic Beanstalk Environment state: %s", err) + return -1, "failed", fmt.Errorf("[Err] Error waiting for Elastic Beanstalk Environment state: %s", err) + } + + if resp == nil || len(resp.Environments) == 0 { + // Sometimes AWS just has consistency issues and doesn't see + // our instance yet. Return an empty state. + return nil, "", nil + } + + var env *elasticbeanstalk.EnvironmentDescription + for _, e := range resp.Environments { + if environmentId == *e.EnvironmentId { + env = e + } + } + + if env == nil { + return -1, "failed", fmt.Errorf("[Err] Error finding Elastic Beanstalk Environment, environment not found") + } + + envErrors, err := getBeanstalkEnvironmentErrors(conn, environmentId, t) + if err != nil { + return -1, "failed", err + } + if envErrors != nil { + return -1, "failed", envErrors + } + + return env, *env.Status, nil + } +} + +// we use the following two functions to allow us to split out defaults +// as they become overridden from within the template +func optionSettingValueHash(v interface{}) int { + rd := v.(map[string]interface{}) + namespace := rd["namespace"].(string) + optionName := rd["name"].(string) + var resourceName string + if v, ok := rd["resource"].(string); ok { + resourceName = v + } + value, _ := rd["value"].(string) + hk := fmt.Sprintf("%s:%s%s=%s", namespace, optionName, resourceName, sortValues(value)) + log.Printf("[DEBUG] Elastic Beanstalk optionSettingValueHash(%#v): %s: hk=%s,hc=%d", v, optionName, hk, hashcode.String(hk)) + return hashcode.String(hk) +} + +func optionSettingKeyHash(v interface{}) int { + rd := v.(map[string]interface{}) + namespace := rd["namespace"].(string) + optionName := rd["name"].(string) + var resourceName string + if v, ok := rd["resource"].(string); ok { + resourceName = v + } + hk := fmt.Sprintf("%s:%s%s", namespace, optionName, resourceName) + log.Printf("[DEBUG] Elastic Beanstalk optionSettingKeyHash(%#v): %s: hk=%s,hc=%d", v, optionName, hk, hashcode.String(hk)) + return hashcode.String(hk) +} + +func sortValues(v string) string { + values := strings.Split(v, ",") + sort.Strings(values) + return strings.Join(values, ",") +} + +func extractOptionSettings(s *schema.Set) []*elasticbeanstalk.ConfigurationOptionSetting { + settings := []*elasticbeanstalk.ConfigurationOptionSetting{} + + if s != nil { + for _, setting := range s.List() { + optionSetting := elasticbeanstalk.ConfigurationOptionSetting{ + Namespace: aws.String(setting.(map[string]interface{})["namespace"].(string)), + OptionName: aws.String(setting.(map[string]interface{})["name"].(string)), + Value: aws.String(setting.(map[string]interface{})["value"].(string)), + } + if *optionSetting.Namespace == "aws:autoscaling:scheduledaction" { + if v, ok := setting.(map[string]interface{})["resource"].(string); ok && v != "" { + optionSetting.ResourceName = aws.String(v) + } + } + settings = append(settings, &optionSetting) + } + } + + return settings +} + +func dropGeneratedSecurityGroup(settingValue string, meta interface{}) string { + conn := meta.(*AWSClient).ec2conn + + groups := strings.Split(settingValue, ",") + + // Check to see if groups are ec2-classic or vpc security groups + ec2Classic := true + beanstalkSGRegexp := "sg-[0-9a-fA-F]{8}" + for _, g := range groups { + if ok, _ := regexp.MatchString(beanstalkSGRegexp, g); ok { + ec2Classic = false + break + } + } + + var resp *ec2.DescribeSecurityGroupsOutput + var err error + + if ec2Classic { + resp, err = conn.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{ + GroupNames: aws.StringSlice(groups), + }) + } else { + resp, err = conn.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{ + GroupIds: aws.StringSlice(groups), + }) + } + + if err != nil { + log.Printf("[DEBUG] Elastic Beanstalk error describing SecurityGroups: %v", err) + return settingValue + } + + log.Printf("[DEBUG] Elastic Beanstalk using ec2-classic security-groups: %t", ec2Classic) + var legitGroups []string + for _, group := range resp.SecurityGroups { + log.Printf("[DEBUG] Elastic Beanstalk SecurityGroup: %v", *group.GroupName) + if !strings.HasPrefix(*group.GroupName, "awseb") { + if ec2Classic { + legitGroups = append(legitGroups, *group.GroupName) + } else { + legitGroups = append(legitGroups, *group.GroupId) + } + } + } + + sort.Strings(legitGroups) + + return strings.Join(legitGroups, ",") +} + +type beanstalkEnvironmentError struct { + eventDate *time.Time + environmentID string + message *string +} + +func (e beanstalkEnvironmentError) Error() string { + return e.eventDate.String() + " (" + e.environmentID + ") : " + *e.message +} + +type beanstalkEnvironmentErrors []*beanstalkEnvironmentError + +func (e beanstalkEnvironmentErrors) Len() int { return len(e) } +func (e beanstalkEnvironmentErrors) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e beanstalkEnvironmentErrors) Less(i, j int) bool { return e[i].eventDate.Before(*e[j].eventDate) } + +func getBeanstalkEnvironmentErrors(conn *elasticbeanstalk.ElasticBeanstalk, environmentId string, t time.Time) (*multierror.Error, error) { + environmentErrors, err := conn.DescribeEvents(&elasticbeanstalk.DescribeEventsInput{ + EnvironmentId: aws.String(environmentId), + Severity: aws.String("ERROR"), + StartTime: aws.Time(t), + }) + + if err != nil { + return nil, fmt.Errorf("[Err] Unable to get Elastic Beanstalk Evironment events: %s", err) + } + + var events beanstalkEnvironmentErrors + for _, event := range environmentErrors.Events { + e := &beanstalkEnvironmentError{ + eventDate: event.EventDate, + environmentID: environmentId, + message: event.Message, + } + events = append(events, e) + } + sort.Sort(beanstalkEnvironmentErrors(events)) + + var result *multierror.Error + for _, event := range events { + result = multierror.Append(result, event) + } + + return result, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_environment_migrate.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_environment_migrate.go new file mode 100644 index 000000000..31cd5c777 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_beanstalk_environment_migrate.go @@ -0,0 +1,35 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceAwsElasticBeanstalkEnvironmentMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found AWS Elastic Beanstalk Environment State v0; migrating to v1") + return migrateBeanstalkEnvironmentStateV0toV1(is) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateBeanstalkEnvironmentStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + if is.Empty() || is.Attributes == nil { + log.Println("[DEBUG] Empty Elastic Beanstalk Environment State; nothing to migrate.") + return is, nil + } + + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + if is.Attributes["tier"] == "" { + is.Attributes["tier"] = "WebServer" + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_transcoder_pipeline.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_transcoder_pipeline.go new file mode 100644 index 000000000..6a0d88352 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_transcoder_pipeline.go @@ -0,0 +1,481 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elastictranscoder" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsElasticTranscoderPipeline() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsElasticTranscoderPipelineCreate, + Read: resourceAwsElasticTranscoderPipelineRead, + Update: resourceAwsElasticTranscoderPipelineUpdate, + Delete: resourceAwsElasticTranscoderPipelineDelete, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "aws_kms_key_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, + + // ContentConfig also requires ThumbnailConfig + "content_config": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + // elastictranscoder.PipelineOutputConfig + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Optional: true, + // AWS may insert the bucket name here taken from output_bucket + Computed: true, + }, + "storage_class": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "content_config_permissions": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "grantee": { + Type: schema.TypeString, + Optional: true, + }, + "grantee_type": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "input_bucket": { + Type: schema.TypeString, + Required: true, + }, + + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[.0-9A-Za-z-_]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only alphanumeric characters, hyphens, underscores, and periods allowed in %q", k)) + } + if len(value) > 40 { + errors = append(errors, fmt.Errorf("%q cannot be longer than 40 characters", k)) + } + return + }, + }, + + "notifications": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "completed": { + Type: schema.TypeString, + Optional: true, + }, + "error": { + Type: schema.TypeString, + Optional: true, + }, + "progressing": { + Type: schema.TypeString, + Optional: true, + }, + "warning": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + // The output_bucket must be set, or both of content_config.bucket + // and thumbnail_config.bucket. + // This is set as Computed, because the API may or may not return + // this as set based on the other 2 configurations. + "output_bucket": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "role": { + Type: schema.TypeString, + Required: true, + }, + + "thumbnail_config": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + // elastictranscoder.PipelineOutputConfig + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Optional: true, + // AWS may insert the bucket name here taken from output_bucket + Computed: true, + }, + "storage_class": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "thumbnail_config_permissions": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "grantee": { + Type: schema.TypeString, + Optional: true, + }, + "grantee_type": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsElasticTranscoderPipelineCreate(d *schema.ResourceData, meta interface{}) error { + elastictranscoderconn := meta.(*AWSClient).elastictranscoderconn + + req := &elastictranscoder.CreatePipelineInput{ + AwsKmsKeyArn: getStringPtr(d, "aws_kms_key_arn"), + ContentConfig: expandETPiplineOutputConfig(d, "content_config"), + InputBucket: aws.String(d.Get("input_bucket").(string)), + Notifications: expandETNotifications(d), + OutputBucket: getStringPtr(d, "output_bucket"), + Role: getStringPtr(d, "role"), + ThumbnailConfig: expandETPiplineOutputConfig(d, "thumbnail_config"), + } + + if name, ok := d.GetOk("name"); ok { + req.Name = aws.String(name.(string)) + } else { + name := resource.PrefixedUniqueId("tf-et-") + d.Set("name", name) + req.Name = aws.String(name) + } + + if (req.OutputBucket == nil && (req.ContentConfig == nil || req.ContentConfig.Bucket == nil)) || + (req.OutputBucket != nil && req.ContentConfig != nil && req.ContentConfig.Bucket != nil) { + return fmt.Errorf("[ERROR] you must specify only one of output_bucket or content_config.bucket") + } + + log.Printf("[DEBUG] Elastic Transcoder Pipeline create opts: %s", req) + resp, err := elastictranscoderconn.CreatePipeline(req) + if err != nil { + return fmt.Errorf("Error creating Elastic Transcoder Pipeline: %s", err) + } + + d.SetId(*resp.Pipeline.Id) + + for _, w := range resp.Warnings { + log.Printf("[WARN] Elastic Transcoder Pipeline %v: %v", *w.Code, *w.Message) + } + + return resourceAwsElasticTranscoderPipelineRead(d, meta) +} + +func expandETNotifications(d *schema.ResourceData) *elastictranscoder.Notifications { + set, ok := d.GetOk("notifications") + if !ok { + return nil + } + + s := set.(*schema.Set).List() + if s == nil || len(s) == 0 { + return nil + } + + if s[0] == nil { + log.Printf("[ERR] First element of Notifications set is nil") + return nil + } + + rN := s[0].(map[string]interface{}) + + return &elastictranscoder.Notifications{ + Completed: aws.String(rN["completed"].(string)), + Error: aws.String(rN["error"].(string)), + Progressing: aws.String(rN["progressing"].(string)), + Warning: aws.String(rN["warning"].(string)), + } +} + +func flattenETNotifications(n *elastictranscoder.Notifications) []map[string]interface{} { + if n == nil { + return nil + } + + allEmpty := func(s ...*string) bool { + for _, s := range s { + if s != nil && *s != "" { + return false + } + } + return true + } + + // the API always returns a Notifications value, even when all fields are nil + if allEmpty(n.Completed, n.Error, n.Progressing, n.Warning) { + return nil + } + + m := setMap(make(map[string]interface{})) + + m.SetString("completed", n.Completed) + m.SetString("error", n.Error) + m.SetString("progressing", n.Progressing) + m.SetString("warning", n.Warning) + return m.MapList() +} + +func expandETPiplineOutputConfig(d *schema.ResourceData, key string) *elastictranscoder.PipelineOutputConfig { + set, ok := d.GetOk(key) + if !ok { + return nil + } + + s := set.(*schema.Set) + if s == nil || s.Len() == 0 { + return nil + } + + cc := s.List()[0].(map[string]interface{}) + + cfg := &elastictranscoder.PipelineOutputConfig{ + Bucket: getStringPtr(cc, "bucket"), + StorageClass: getStringPtr(cc, "storage_class"), + } + + switch key { + case "content_config": + cfg.Permissions = expandETPermList(d.Get("content_config_permissions").(*schema.Set)) + case "thumbnail_config": + cfg.Permissions = expandETPermList(d.Get("thumbnail_config_permissions").(*schema.Set)) + } + + return cfg +} + +func flattenETPipelineOutputConfig(cfg *elastictranscoder.PipelineOutputConfig) []map[string]interface{} { + m := setMap(make(map[string]interface{})) + + m.SetString("bucket", cfg.Bucket) + m.SetString("storage_class", cfg.StorageClass) + + return m.MapList() +} + +func expandETPermList(permissions *schema.Set) []*elastictranscoder.Permission { + var perms []*elastictranscoder.Permission + + for _, p := range permissions.List() { + perm := &elastictranscoder.Permission{ + Access: getStringPtrList(p.(map[string]interface{}), "access"), + Grantee: getStringPtr(p, "grantee"), + GranteeType: getStringPtr(p, "grantee_type"), + } + perms = append(perms, perm) + } + return perms +} + +func flattenETPermList(perms []*elastictranscoder.Permission) []map[string]interface{} { + var set []map[string]interface{} + + for _, p := range perms { + m := setMap(make(map[string]interface{})) + m.Set("access", flattenStringList(p.Access)) + m.SetString("grantee", p.Grantee) + m.SetString("grantee_type", p.GranteeType) + + set = append(set, m) + } + return set +} + +func resourceAwsElasticTranscoderPipelineUpdate(d *schema.ResourceData, meta interface{}) error { + elastictranscoderconn := meta.(*AWSClient).elastictranscoderconn + + req := &elastictranscoder.UpdatePipelineInput{ + Id: aws.String(d.Id()), + } + + if d.HasChange("aws_kms_key_arn") { + req.AwsKmsKeyArn = getStringPtr(d, "aws_kms_key_arn") + } + + if d.HasChange("content_config") { + req.ContentConfig = expandETPiplineOutputConfig(d, "content_config") + } + + if d.HasChange("input_bucket") { + req.InputBucket = getStringPtr(d, "input_bucket") + } + + if d.HasChange("name") { + req.Name = getStringPtr(d, "name") + } + + if d.HasChange("notifications") { + req.Notifications = expandETNotifications(d) + } + + if d.HasChange("role") { + req.Role = getStringPtr(d, "role") + } + + if d.HasChange("thumbnail_config") { + req.ThumbnailConfig = expandETPiplineOutputConfig(d, "thumbnail_config") + } + + log.Printf("[DEBUG] Updating Elastic Transcoder Pipeline: %#v", req) + output, err := elastictranscoderconn.UpdatePipeline(req) + if err != nil { + return fmt.Errorf("Error updating Elastic Transcoder pipeline: %s", err) + } + + for _, w := range output.Warnings { + log.Printf("[WARN] Elastic Transcoder Pipeline %v: %v", *w.Code, *w.Message) + } + + return resourceAwsElasticTranscoderPipelineRead(d, meta) +} + +func resourceAwsElasticTranscoderPipelineRead(d *schema.ResourceData, meta interface{}) error { + elastictranscoderconn := meta.(*AWSClient).elastictranscoderconn + + resp, err := elastictranscoderconn.ReadPipeline(&elastictranscoder.ReadPipelineInput{ + Id: aws.String(d.Id()), + }) + + if err != nil { + if err, ok := err.(awserr.Error); ok && err.Code() == "ResourceNotFoundException" { + d.SetId("") + return nil + } + return err + } + + log.Printf("[DEBUG] Elastic Transcoder Pipeline Read response: %#v", resp) + + pipeline := resp.Pipeline + + d.Set("arn", *pipeline.Arn) + + if arn := pipeline.AwsKmsKeyArn; arn != nil { + d.Set("aws_kms_key_arn", *arn) + } + + if pipeline.ContentConfig != nil { + err := d.Set("content_config", flattenETPipelineOutputConfig(pipeline.ContentConfig)) + if err != nil { + return fmt.Errorf("error setting content_config: %s", err) + } + + if pipeline.ContentConfig.Permissions != nil { + err := d.Set("content_config_permissions", flattenETPermList(pipeline.ContentConfig.Permissions)) + if err != nil { + return fmt.Errorf("error setting content_config_permissions: %s", err) + } + } + } + + d.Set("input_bucket", *pipeline.InputBucket) + d.Set("name", *pipeline.Name) + + notifications := flattenETNotifications(pipeline.Notifications) + if notifications != nil { + if err := d.Set("notifications", notifications); err != nil { + return fmt.Errorf("error setting notifications: %s", err) + } + } + + d.Set("role", *pipeline.Role) + + if pipeline.ThumbnailConfig != nil { + err := d.Set("thumbnail_config", flattenETPipelineOutputConfig(pipeline.ThumbnailConfig)) + if err != nil { + return fmt.Errorf("error setting thumbnail_config: %s", err) + } + + if pipeline.ThumbnailConfig.Permissions != nil { + err := d.Set("thumbnail_config_permissions", flattenETPermList(pipeline.ThumbnailConfig.Permissions)) + if err != nil { + return fmt.Errorf("error setting thumbnail_config_permissions: %s", err) + } + } + } + + if pipeline.OutputBucket != nil { + d.Set("output_bucket", *pipeline.OutputBucket) + } + + return nil +} + +func resourceAwsElasticTranscoderPipelineDelete(d *schema.ResourceData, meta interface{}) error { + elastictranscoderconn := meta.(*AWSClient).elastictranscoderconn + + log.Printf("[DEBUG] Elastic Transcoder Delete Pipeline: %s", d.Id()) + _, err := elastictranscoderconn.DeletePipeline(&elastictranscoder.DeletePipelineInput{ + Id: aws.String(d.Id()), + }) + if err != nil { + return fmt.Errorf("error deleting Elastic Transcoder Pipeline: %s", err) + } + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_transcoder_preset.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_transcoder_preset.go new file mode 100644 index 000000000..72639fedf --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elastic_transcoder_preset.go @@ -0,0 +1,639 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elastictranscoder" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsElasticTranscoderPreset() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsElasticTranscoderPresetCreate, + Read: resourceAwsElasticTranscoderPresetRead, + Delete: resourceAwsElasticTranscoderPresetDelete, + + Schema: map[string]*schema.Schema{ + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "audio": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + // elastictranscoder.AudioParameters + Schema: map[string]*schema.Schema{ + "audio_packing_mode": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "bit_rate": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "channels": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "codec": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "sample_rate": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "audio_codec_options": &schema.Schema{ + Type: schema.TypeSet, + MaxItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bit_depth": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "bit_order": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "profile": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "signed": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + + "container": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "thumbnails": &schema.Schema{ + Type: schema.TypeSet, + MaxItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + // elastictranscoder.Thumbnails + Schema: map[string]*schema.Schema{ + "aspect_ratio": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "format": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "interval": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "max_height": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "max_width": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "padding_policy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "resolution:": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "sizing_policy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "video": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + // elastictranscoder.VideoParameters + Schema: map[string]*schema.Schema{ + "aspect_ratio": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "bit_rate": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "codec": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "display_aspect_ratio": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "fixed_gop": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "frame_rate": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "keyframes_max_dist": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "max_frame_rate": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "30", + ForceNew: true, + }, + "max_height": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "max_width": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "padding_policy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "resolution": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "sizing_policy": &schema.Schema{ + Type: schema.TypeString, + Default: "Fit", + Optional: true, + ForceNew: true, + }, + }, + }, + }, + + "video_watermarks": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + // elastictranscoder.PresetWatermark + Schema: map[string]*schema.Schema{ + "horizontal_align": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "horizontal_offset": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "max_height": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "max_width": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "opacity": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "sizing_policy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "target": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "vertical_align": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "vertical_offset": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + + "video_codec_options": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsElasticTranscoderPresetCreate(d *schema.ResourceData, meta interface{}) error { + elastictranscoderconn := meta.(*AWSClient).elastictranscoderconn + + req := &elastictranscoder.CreatePresetInput{ + Audio: expandETAudioParams(d), + Container: aws.String(d.Get("container").(string)), + Description: getStringPtr(d, "description"), + Thumbnails: expandETThumbnails(d), + Video: exapandETVideoParams(d), + } + + if name, ok := d.GetOk("name"); ok { + req.Name = aws.String(name.(string)) + } else { + name := resource.PrefixedUniqueId("tf-et-preset-") + d.Set("name", name) + req.Name = aws.String(name) + } + + log.Printf("[DEBUG] Elastic Transcoder Preset create opts: %s", req) + resp, err := elastictranscoderconn.CreatePreset(req) + if err != nil { + return fmt.Errorf("Error creating Elastic Transcoder Preset: %s", err) + } + + if resp.Warning != nil && *resp.Warning != "" { + log.Printf("[WARN] Elastic Transcoder Preset: %s", *resp.Warning) + } + + d.SetId(*resp.Preset.Id) + d.Set("arn", *resp.Preset.Arn) + + return nil +} + +func expandETThumbnails(d *schema.ResourceData) *elastictranscoder.Thumbnails { + set, ok := d.GetOk("thumbnails") + if !ok { + return nil + } + + s := set.(*schema.Set) + if s == nil || s.Len() == 0 { + return nil + } + t := s.List()[0].(map[string]interface{}) + + return &elastictranscoder.Thumbnails{ + AspectRatio: getStringPtr(t, "aspect_ratio"), + Format: getStringPtr(t, "format"), + Interval: getStringPtr(t, "interval"), + MaxHeight: getStringPtr(t, "max_height"), + MaxWidth: getStringPtr(t, "max_width"), + PaddingPolicy: getStringPtr(t, "padding_policy"), + Resolution: getStringPtr(t, "resolution"), + SizingPolicy: getStringPtr(t, "sizing_policy"), + } +} + +func expandETAudioParams(d *schema.ResourceData) *elastictranscoder.AudioParameters { + set, ok := d.GetOk("audio") + if !ok { + return nil + } + + s := set.(*schema.Set) + if s == nil || s.Len() == 0 { + return nil + } + audio := s.List()[0].(map[string]interface{}) + + return &elastictranscoder.AudioParameters{ + AudioPackingMode: getStringPtr(audio, "audio_packing_mode"), + BitRate: getStringPtr(audio, "bit_rate"), + Channels: getStringPtr(audio, "channels"), + Codec: getStringPtr(audio, "codec"), + CodecOptions: expandETAudioCodecOptions(d), + SampleRate: getStringPtr(audio, "sample_rate"), + } +} + +func expandETAudioCodecOptions(d *schema.ResourceData) *elastictranscoder.AudioCodecOptions { + s := d.Get("audio_codec_options").(*schema.Set) + if s == nil || s.Len() == 0 { + return nil + } + + codec := s.List()[0].(map[string]interface{}) + + codecOpts := &elastictranscoder.AudioCodecOptions{ + BitDepth: getStringPtr(codec, "bit_depth"), + BitOrder: getStringPtr(codec, "bit_order"), + Profile: getStringPtr(codec, "profile"), + Signed: getStringPtr(codec, "signed"), + } + + return codecOpts +} + +func exapandETVideoParams(d *schema.ResourceData) *elastictranscoder.VideoParameters { + s := d.Get("video").(*schema.Set) + if s == nil || s.Len() == 0 { + return nil + } + p := s.List()[0].(map[string]interface{}) + + return &elastictranscoder.VideoParameters{ + AspectRatio: getStringPtr(p, "aspect_ratio"), + BitRate: getStringPtr(p, "bit_rate"), + Codec: getStringPtr(p, "codec"), + CodecOptions: stringMapToPointers(d.Get("video_codec_options").(map[string]interface{})), + DisplayAspectRatio: getStringPtr(p, "display_aspect_ratio"), + FixedGOP: getStringPtr(p, "fixed_gop"), + FrameRate: getStringPtr(p, "frame_rate"), + KeyframesMaxDist: getStringPtr(p, "keyframes_max_dist"), + MaxFrameRate: getStringPtr(p, "max_frame_rate"), + MaxHeight: getStringPtr(p, "max_height"), + MaxWidth: getStringPtr(p, "max_width"), + PaddingPolicy: getStringPtr(p, "padding_policy"), + Resolution: getStringPtr(p, "resolution"), + SizingPolicy: getStringPtr(p, "sizing_policy"), + Watermarks: expandETVideoWatermarks(d), + } +} + +func expandETVideoWatermarks(d *schema.ResourceData) []*elastictranscoder.PresetWatermark { + s := d.Get("video_watermarks").(*schema.Set) + if s == nil || s.Len() == 0 { + return nil + } + var watermarks []*elastictranscoder.PresetWatermark + + for _, w := range s.List() { + watermark := &elastictranscoder.PresetWatermark{ + HorizontalAlign: getStringPtr(w, "horizontal_align"), + HorizontalOffset: getStringPtr(w, "horizontal_offset"), + Id: getStringPtr(w, "id"), + MaxHeight: getStringPtr(w, "max_height"), + MaxWidth: getStringPtr(w, "max_width"), + Opacity: getStringPtr(w, "opacity"), + SizingPolicy: getStringPtr(w, "sizing_policy"), + Target: getStringPtr(w, "target"), + VerticalAlign: getStringPtr(w, "vertical_align"), + VerticalOffset: getStringPtr(w, "vertical_offset"), + } + watermarks = append(watermarks, watermark) + } + + return watermarks +} + +func resourceAwsElasticTranscoderPresetRead(d *schema.ResourceData, meta interface{}) error { + elastictranscoderconn := meta.(*AWSClient).elastictranscoderconn + + resp, err := elastictranscoderconn.ReadPreset(&elastictranscoder.ReadPresetInput{ + Id: aws.String(d.Id()), + }) + + if err != nil { + if err, ok := err.(awserr.Error); ok && err.Code() == "ResourceNotFoundException" { + d.SetId("") + return nil + } + return err + } + + log.Printf("[DEBUG] Elastic Transcoder Preset Read response: %#v", resp) + + preset := resp.Preset + d.Set("arn", *preset.Arn) + + if preset.Audio != nil { + err := d.Set("audio", flattenETAudioParameters(preset.Audio)) + if err != nil { + return err + } + + if preset.Audio.CodecOptions != nil { + d.Set("audio_codec_options", flattenETAudioCodecOptions(preset.Audio.CodecOptions)) + } + } + + d.Set("container", *preset.Container) + d.Set("name", *preset.Name) + + if preset.Thumbnails != nil { + err := d.Set("thumbnails", flattenETThumbnails(preset.Thumbnails)) + if err != nil { + return err + } + } + + d.Set("type", *preset.Type) + + if preset.Video != nil { + err := d.Set("video", flattenETVideoParams(preset.Video)) + if err != nil { + return err + } + + if preset.Video.CodecOptions != nil { + d.Set("video_codec_options", flattenETVideoCodecOptions(preset.Video.CodecOptions)) + } + + if preset.Video.Watermarks != nil { + d.Set("video_watermarks", flattenETWatermarks(preset.Video.Watermarks)) + } + } + + return nil +} + +func flattenETAudioParameters(audio *elastictranscoder.AudioParameters) []map[string]interface{} { + m := setMap(make(map[string]interface{})) + + m.SetString("audio_packing_mode", audio.AudioPackingMode) + m.SetString("bit_rate", audio.BitRate) + m.SetString("channels", audio.Channels) + m.SetString("codec", audio.Codec) + m.SetString("sample_rate", audio.SampleRate) + + return m.MapList() +} + +func flattenETAudioCodecOptions(opts *elastictranscoder.AudioCodecOptions) []map[string]interface{} { + if opts == nil { + return nil + } + + m := setMap(make(map[string]interface{})) + + m.SetString("bit_depth", opts.BitDepth) + m.SetString("bit_order", opts.BitOrder) + m.SetString("profile", opts.Profile) + m.SetString("signed", opts.Signed) + + return m.MapList() +} + +func flattenETThumbnails(thumbs *elastictranscoder.Thumbnails) []map[string]interface{} { + m := setMap(make(map[string]interface{})) + + m.SetString("aspect_ratio", thumbs.AspectRatio) + m.SetString("format", thumbs.Format) + m.SetString("interval", thumbs.Interval) + m.SetString("max_height", thumbs.MaxHeight) + m.SetString("max_width", thumbs.MaxWidth) + m.SetString("padding_policy", thumbs.PaddingPolicy) + m.SetString("resolution", thumbs.Resolution) + m.SetString("sizing_policy", thumbs.SizingPolicy) + + return m.MapList() +} + +func flattenETVideoParams(video *elastictranscoder.VideoParameters) []map[string]interface{} { + m := setMap(make(map[string]interface{})) + + m.SetString("aspect_ratio", video.AspectRatio) + m.SetString("bit_rate", video.BitRate) + m.SetString("codec", video.Codec) + m.SetString("display_aspect_ratio", video.DisplayAspectRatio) + m.SetString("fixed_gop", video.FixedGOP) + m.SetString("frame_rate", video.FrameRate) + m.SetString("keyframes_max_dist", video.KeyframesMaxDist) + m.SetString("max_frame_rate", video.MaxFrameRate) + m.SetString("max_height", video.MaxHeight) + m.SetString("max_width", video.MaxWidth) + m.SetString("padding_policy", video.PaddingPolicy) + m.SetString("resolution", video.Resolution) + m.SetString("sizing_policy", video.SizingPolicy) + + return m.MapList() +} + +func flattenETVideoCodecOptions(opts map[string]*string) []map[string]interface{} { + codecOpts := setMap(make(map[string]interface{})) + + for k, v := range opts { + codecOpts.SetString(k, v) + } + + return codecOpts.MapList() +} + +func flattenETWatermarks(watermarks []*elastictranscoder.PresetWatermark) []map[string]interface{} { + var watermarkSet []map[string]interface{} + + for _, w := range watermarks { + watermark := setMap(make(map[string]interface{})) + + watermark.SetString("horizontal_align", w.HorizontalAlign) + watermark.SetString("horizontal_offset", w.HorizontalOffset) + watermark.SetString("id", w.Id) + watermark.SetString("max_height", w.MaxHeight) + watermark.SetString("max_width", w.MaxWidth) + watermark.SetString("opacity", w.Opacity) + watermark.SetString("sizing_policy", w.SizingPolicy) + watermark.SetString("target", w.Target) + watermark.SetString("vertical_align", w.VerticalAlign) + watermark.SetString("vertical_offset", w.VerticalOffset) + + watermarkSet = append(watermarkSet, watermark.Map()) + } + + return watermarkSet +} + +func resourceAwsElasticTranscoderPresetDelete(d *schema.ResourceData, meta interface{}) error { + elastictranscoderconn := meta.(*AWSClient).elastictranscoderconn + + log.Printf("[DEBUG] Elastic Transcoder Delete Preset: %s", d.Id()) + _, err := elastictranscoderconn.DeletePreset(&elastictranscoder.DeletePresetInput{ + Id: aws.String(d.Id()), + }) + + if err != nil { + return fmt.Errorf("error deleting Elastic Transcoder Preset: %s", err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_cluster.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_cluster.go new file mode 100644 index 000000000..a2bec932d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_cluster.go @@ -0,0 +1,687 @@ +package aws + +import ( + "fmt" + "log" + "sort" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsElastiCacheCommonSchema() map[string]*schema.Schema { + + return map[string]*schema.Schema{ + "availability_zones": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "node_type": { + Type: schema.TypeString, + Required: true, + }, + "engine": { + Type: schema.TypeString, + Required: true, + }, + "engine_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "parameter_group_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "subnet_group_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "security_group_names": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "security_group_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + // A single-element string list containing an Amazon Resource Name (ARN) that + // uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot + // file will be used to populate the node group. + // + // See also: + // https://github.com/aws/aws-sdk-go/blob/4862a174f7fc92fb523fc39e68f00b87d91d2c3d/service/elasticache/api.go#L2079 + "snapshot_arns": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "snapshot_window": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateOnceADayWindowFormat, + }, + "snapshot_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "maintenance_window": { + Type: schema.TypeString, + Optional: true, + Computed: true, + StateFunc: func(val interface{}) string { + // Elasticache always changes the maintenance + // to lowercase + return strings.ToLower(val.(string)) + }, + ValidateFunc: validateOnceAWeekWindowFormat, + }, + "port": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "notification_topic_arn": { + Type: schema.TypeString, + Optional: true, + }, + + "snapshot_retention_limit": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(int) + if value > 35 { + es = append(es, fmt.Errorf( + "snapshot retention limit cannot be more than 35 days")) + } + return + }, + }, + + "apply_immediately": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "tags": tagsSchema(), + } +} + +func resourceAwsElasticacheCluster() *schema.Resource { + resourceSchema := resourceAwsElastiCacheCommonSchema() + + resourceSchema["cluster_id"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: func(val interface{}) string { + // Elasticache normalizes cluster ids to lowercase, + // so we have to do this too or else we can end up + // with non-converging diffs. + return strings.ToLower(val.(string)) + }, + ValidateFunc: validateElastiCacheClusterId, + } + + resourceSchema["num_cache_nodes"] = &schema.Schema{ + Type: schema.TypeInt, + Required: true, + } + + resourceSchema["az_mode"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + } + + resourceSchema["availability_zone"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + } + + resourceSchema["configuration_endpoint"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + + resourceSchema["cluster_address"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + + resourceSchema["replication_group_id"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + + resourceSchema["cache_nodes"] = &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "address": { + Type: schema.TypeString, + Computed: true, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + }, + "availability_zone": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + } + + return &schema.Resource{ + Create: resourceAwsElasticacheClusterCreate, + Read: resourceAwsElasticacheClusterRead, + Update: resourceAwsElasticacheClusterUpdate, + Delete: resourceAwsElasticacheClusterDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: resourceSchema, + } +} + +func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + + clusterId := d.Get("cluster_id").(string) + nodeType := d.Get("node_type").(string) // e.g) cache.m1.small + numNodes := int64(d.Get("num_cache_nodes").(int)) // 2 + engine := d.Get("engine").(string) // memcached + engineVersion := d.Get("engine_version").(string) // 1.4.14 + port := int64(d.Get("port").(int)) // e.g) 11211 + subnetGroupName := d.Get("subnet_group_name").(string) + securityNameSet := d.Get("security_group_names").(*schema.Set) + securityIdSet := d.Get("security_group_ids").(*schema.Set) + + securityNames := expandStringList(securityNameSet.List()) + securityIds := expandStringList(securityIdSet.List()) + tags := tagsFromMapEC(d.Get("tags").(map[string]interface{})) + + req := &elasticache.CreateCacheClusterInput{ + CacheClusterId: aws.String(clusterId), + CacheNodeType: aws.String(nodeType), + NumCacheNodes: aws.Int64(numNodes), + Engine: aws.String(engine), + EngineVersion: aws.String(engineVersion), + Port: aws.Int64(port), + CacheSubnetGroupName: aws.String(subnetGroupName), + CacheSecurityGroupNames: securityNames, + SecurityGroupIds: securityIds, + Tags: tags, + } + + // parameter groups are optional and can be defaulted by AWS + if v, ok := d.GetOk("parameter_group_name"); ok { + req.CacheParameterGroupName = aws.String(v.(string)) + } + + if v, ok := d.GetOk("snapshot_retention_limit"); ok { + req.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("snapshot_window"); ok { + req.SnapshotWindow = aws.String(v.(string)) + } + + if v, ok := d.GetOk("maintenance_window"); ok { + req.PreferredMaintenanceWindow = aws.String(v.(string)) + } + + if v, ok := d.GetOk("notification_topic_arn"); ok { + req.NotificationTopicArn = aws.String(v.(string)) + } + + snaps := d.Get("snapshot_arns").(*schema.Set).List() + if len(snaps) > 0 { + s := expandStringList(snaps) + req.SnapshotArns = s + log.Printf("[DEBUG] Restoring Redis cluster from S3 snapshot: %#v", s) + } + + if v, ok := d.GetOk("snapshot_name"); ok { + req.SnapshotName = aws.String(v.(string)) + } + + if v, ok := d.GetOk("az_mode"); ok { + req.AZMode = aws.String(v.(string)) + } + + if v, ok := d.GetOk("availability_zone"); ok { + req.PreferredAvailabilityZone = aws.String(v.(string)) + } + + preferred_azs := d.Get("availability_zones").(*schema.Set).List() + if len(preferred_azs) > 0 { + azs := expandStringList(preferred_azs) + req.PreferredAvailabilityZones = azs + } + + if v, ok := d.GetOk("replication_group_id"); ok { + req.ReplicationGroupId = aws.String(v.(string)) + } + + resp, err := conn.CreateCacheCluster(req) + if err != nil { + return fmt.Errorf("Error creating Elasticache: %s", err) + } + + // Assign the cluster id as the resource ID + // Elasticache always retains the id in lower case, so we have to + // mimic that or else we won't be able to refresh a resource whose + // name contained uppercase characters. + d.SetId(strings.ToLower(*resp.CacheCluster.CacheClusterId)) + + pending := []string{"creating", "modifying", "restoring", "snapshotting"} + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: []string{"available"}, + Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "available", pending), + Timeout: 40 * time.Minute, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + log.Printf("[DEBUG] Waiting for state to become available: %v", d.Id()) + _, sterr := stateConf.WaitForState() + if sterr != nil { + return fmt.Errorf("Error waiting for elasticache (%s) to be created: %s", d.Id(), sterr) + } + + return resourceAwsElasticacheClusterRead(d, meta) +} + +func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + req := &elasticache.DescribeCacheClustersInput{ + CacheClusterId: aws.String(d.Id()), + ShowCacheNodeInfo: aws.Bool(true), + } + + res, err := conn.DescribeCacheClusters(req) + if err != nil { + if eccErr, ok := err.(awserr.Error); ok && eccErr.Code() == "CacheClusterNotFound" { + log.Printf("[WARN] ElastiCache Cluster (%s) not found", d.Id()) + d.SetId("") + return nil + } + + return err + } + + if len(res.CacheClusters) == 1 { + c := res.CacheClusters[0] + d.Set("cluster_id", c.CacheClusterId) + d.Set("node_type", c.CacheNodeType) + d.Set("num_cache_nodes", c.NumCacheNodes) + d.Set("engine", c.Engine) + d.Set("engine_version", c.EngineVersion) + if c.ConfigurationEndpoint != nil { + d.Set("port", c.ConfigurationEndpoint.Port) + d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", *c.ConfigurationEndpoint.Address, *c.ConfigurationEndpoint.Port))) + d.Set("cluster_address", aws.String(fmt.Sprintf("%s", *c.ConfigurationEndpoint.Address))) + } + + if c.ReplicationGroupId != nil { + d.Set("replication_group_id", c.ReplicationGroupId) + } + + d.Set("subnet_group_name", c.CacheSubnetGroupName) + d.Set("security_group_names", flattenElastiCacheSecurityGroupNames(c.CacheSecurityGroups)) + d.Set("security_group_ids", flattenElastiCacheSecurityGroupIds(c.SecurityGroups)) + if c.CacheParameterGroup != nil { + d.Set("parameter_group_name", c.CacheParameterGroup.CacheParameterGroupName) + } + d.Set("maintenance_window", c.PreferredMaintenanceWindow) + d.Set("snapshot_window", c.SnapshotWindow) + d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) + if c.NotificationConfiguration != nil { + if *c.NotificationConfiguration.TopicStatus == "active" { + d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn) + } + } + d.Set("availability_zone", c.PreferredAvailabilityZone) + + if err := setCacheNodeData(d, c); err != nil { + return err + } + // list tags for resource + // set tags + arn, err := buildECARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) + if err != nil { + log.Printf("[DEBUG] Error building ARN for ElastiCache Cluster, not setting Tags for cluster %s", *c.CacheClusterId) + } else { + resp, err := conn.ListTagsForResource(&elasticache.ListTagsForResourceInput{ + ResourceName: aws.String(arn), + }) + + if err != nil { + log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn) + } + + var et []*elasticache.Tag + if len(resp.TagList) > 0 { + et = resp.TagList + } + d.Set("tags", tagsToMapEC(et)) + } + } + + return nil +} + +func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + arn, err := buildECARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) + if err != nil { + log.Printf("[DEBUG] Error building ARN for ElastiCache Cluster, not updating Tags for cluster %s", d.Id()) + } else { + if err := setTagsEC(conn, d, arn); err != nil { + return err + } + } + + req := &elasticache.ModifyCacheClusterInput{ + CacheClusterId: aws.String(d.Id()), + ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), + } + + requestUpdate := false + if d.HasChange("security_group_ids") { + if attr := d.Get("security_group_ids").(*schema.Set); attr.Len() > 0 { + req.SecurityGroupIds = expandStringList(attr.List()) + requestUpdate = true + } + } + + if d.HasChange("parameter_group_name") { + req.CacheParameterGroupName = aws.String(d.Get("parameter_group_name").(string)) + requestUpdate = true + } + + if d.HasChange("maintenance_window") { + req.PreferredMaintenanceWindow = aws.String(d.Get("maintenance_window").(string)) + requestUpdate = true + } + + if d.HasChange("notification_topic_arn") { + v := d.Get("notification_topic_arn").(string) + req.NotificationTopicArn = aws.String(v) + if v == "" { + inactive := "inactive" + req.NotificationTopicStatus = &inactive + } + requestUpdate = true + } + + if d.HasChange("engine_version") { + req.EngineVersion = aws.String(d.Get("engine_version").(string)) + requestUpdate = true + } + + if d.HasChange("snapshot_window") { + req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string)) + requestUpdate = true + } + + if d.HasChange("node_type") { + req.CacheNodeType = aws.String(d.Get("node_type").(string)) + requestUpdate = true + } + + if d.HasChange("snapshot_retention_limit") { + req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) + requestUpdate = true + } + + if d.HasChange("num_cache_nodes") { + oraw, nraw := d.GetChange("num_cache_nodes") + o := oraw.(int) + n := nraw.(int) + if v, ok := d.GetOk("az_mode"); ok && v.(string) == "cross-az" && n == 1 { + return fmt.Errorf("[WARN] Error updateing Elasticache cluster (%s), error: Cross-AZ mode is not supported in a single cache node.", d.Id()) + } + if n < o { + log.Printf("[INFO] Cluster %s is marked for Decreasing cache nodes from %d to %d", d.Id(), o, n) + nodesToRemove := getCacheNodesToRemove(d, o, o-n) + req.CacheNodeIdsToRemove = nodesToRemove + } + + req.NumCacheNodes = aws.Int64(int64(d.Get("num_cache_nodes").(int))) + requestUpdate = true + + } + + if requestUpdate { + log.Printf("[DEBUG] Modifying ElastiCache Cluster (%s), opts:\n%s", d.Id(), req) + _, err := conn.ModifyCacheCluster(req) + if err != nil { + return fmt.Errorf("[WARN] Error updating ElastiCache cluster (%s), error: %s", d.Id(), err) + } + + log.Printf("[DEBUG] Waiting for update: %s", d.Id()) + pending := []string{"modifying", "rebooting cache cluster nodes", "snapshotting"} + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: []string{"available"}, + Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "available", pending), + Timeout: 80 * time.Minute, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + _, sterr := stateConf.WaitForState() + if sterr != nil { + return fmt.Errorf("Error waiting for elasticache (%s) to update: %s", d.Id(), sterr) + } + } + + return resourceAwsElasticacheClusterRead(d, meta) +} + +func getCacheNodesToRemove(d *schema.ResourceData, oldNumberOfNodes int, cacheNodesToRemove int) []*string { + nodesIdsToRemove := []*string{} + for i := oldNumberOfNodes; i > oldNumberOfNodes-cacheNodesToRemove && i > 0; i-- { + s := fmt.Sprintf("%04d", i) + nodesIdsToRemove = append(nodesIdsToRemove, &s) + } + + return nodesIdsToRemove +} + +func setCacheNodeData(d *schema.ResourceData, c *elasticache.CacheCluster) error { + sortedCacheNodes := make([]*elasticache.CacheNode, len(c.CacheNodes)) + copy(sortedCacheNodes, c.CacheNodes) + sort.Sort(byCacheNodeId(sortedCacheNodes)) + + cacheNodeData := make([]map[string]interface{}, 0, len(sortedCacheNodes)) + + for _, node := range sortedCacheNodes { + if node.CacheNodeId == nil || node.Endpoint == nil || node.Endpoint.Address == nil || node.Endpoint.Port == nil || node.CustomerAvailabilityZone == nil { + return fmt.Errorf("Unexpected nil pointer in: %s", node) + } + cacheNodeData = append(cacheNodeData, map[string]interface{}{ + "id": *node.CacheNodeId, + "address": *node.Endpoint.Address, + "port": int(*node.Endpoint.Port), + "availability_zone": *node.CustomerAvailabilityZone, + }) + } + + return d.Set("cache_nodes", cacheNodeData) +} + +type byCacheNodeId []*elasticache.CacheNode + +func (b byCacheNodeId) Len() int { return len(b) } +func (b byCacheNodeId) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byCacheNodeId) Less(i, j int) bool { + return b[i].CacheNodeId != nil && b[j].CacheNodeId != nil && + *b[i].CacheNodeId < *b[j].CacheNodeId +} + +func resourceAwsElasticacheClusterDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + + req := &elasticache.DeleteCacheClusterInput{ + CacheClusterId: aws.String(d.Id()), + } + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteCacheCluster(req) + if err != nil { + awsErr, ok := err.(awserr.Error) + // The cluster may be just snapshotting, so we retry until it's ready for deletion + if ok && awsErr.Code() == "InvalidCacheClusterState" { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return err + } + + log.Printf("[DEBUG] Waiting for deletion: %v", d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "available", "deleting", "incompatible-parameters", "incompatible-network", "restore-failed", "snapshotting"}, + Target: []string{}, + Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "", []string{}), + Timeout: 40 * time.Minute, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + _, sterr := stateConf.WaitForState() + if sterr != nil { + return fmt.Errorf("Error waiting for elasticache (%s) to delete: %s", d.Id(), sterr) + } + + d.SetId("") + + return nil +} + +func cacheClusterStateRefreshFunc(conn *elasticache.ElastiCache, clusterID, givenState string, pending []string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{ + CacheClusterId: aws.String(clusterID), + ShowCacheNodeInfo: aws.Bool(true), + }) + if err != nil { + apierr := err.(awserr.Error) + log.Printf("[DEBUG] message: %v, code: %v", apierr.Message(), apierr.Code()) + if apierr.Message() == fmt.Sprintf("CacheCluster not found: %v", clusterID) { + log.Printf("[DEBUG] Detect deletion") + return nil, "", nil + } + + log.Printf("[ERROR] CacheClusterStateRefreshFunc: %s", err) + return nil, "", err + } + + if len(resp.CacheClusters) == 0 { + return nil, "", fmt.Errorf("[WARN] Error: no Cache Clusters found for id (%s)", clusterID) + } + + var c *elasticache.CacheCluster + for _, cluster := range resp.CacheClusters { + if *cluster.CacheClusterId == clusterID { + log.Printf("[DEBUG] Found matching ElastiCache cluster: %s", *cluster.CacheClusterId) + c = cluster + } + } + + if c == nil { + return nil, "", fmt.Errorf("[WARN] Error: no matching Elastic Cache cluster for id (%s)", clusterID) + } + + log.Printf("[DEBUG] ElastiCache Cluster (%s) status: %v", clusterID, *c.CacheClusterStatus) + + // return the current state if it's in the pending array + for _, p := range pending { + log.Printf("[DEBUG] ElastiCache: checking pending state (%s) for cluster (%s), cluster status: %s", pending, clusterID, *c.CacheClusterStatus) + s := *c.CacheClusterStatus + if p == s { + log.Printf("[DEBUG] Return with status: %v", *c.CacheClusterStatus) + return c, p, nil + } + } + + // return given state if it's not in pending + if givenState != "" { + log.Printf("[DEBUG] ElastiCache: checking given state (%s) of cluster (%s) against cluster status (%s)", givenState, clusterID, *c.CacheClusterStatus) + // check to make sure we have the node count we're expecting + if int64(len(c.CacheNodes)) != *c.NumCacheNodes { + log.Printf("[DEBUG] Node count is not what is expected: %d found, %d expected", len(c.CacheNodes), *c.NumCacheNodes) + return nil, "creating", nil + } + + log.Printf("[DEBUG] Node count matched (%d)", len(c.CacheNodes)) + // loop the nodes and check their status as well + for _, n := range c.CacheNodes { + log.Printf("[DEBUG] Checking cache node for status: %s", n) + if n.CacheNodeStatus != nil && *n.CacheNodeStatus != "available" { + log.Printf("[DEBUG] Node (%s) is not yet available, status: %s", *n.CacheNodeId, *n.CacheNodeStatus) + return nil, "creating", nil + } + log.Printf("[DEBUG] Cache node not in expected state") + } + log.Printf("[DEBUG] ElastiCache returning given state (%s), cluster: %s", givenState, c) + return c, givenState, nil + } + log.Printf("[DEBUG] current status: %v", *c.CacheClusterStatus) + return c, *c.CacheClusterStatus, nil + } +} + +func buildECARN(identifier, partition, accountid, region string) (string, error) { + if partition == "" { + return "", fmt.Errorf("Unable to construct ElastiCache ARN because of missing AWS partition") + } + if accountid == "" { + return "", fmt.Errorf("Unable to construct ElastiCache ARN because of missing AWS Account ID") + } + arn := fmt.Sprintf("arn:%s:elasticache:%s:%s:cluster:%s", partition, region, accountid, identifier) + return arn, nil + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_parameter_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_parameter_group.go new file mode 100644 index 000000000..1cd689387 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_parameter_group.go @@ -0,0 +1,203 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elasticache" +) + +func resourceAwsElasticacheParameterGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsElasticacheParameterGroupCreate, + Read: resourceAwsElasticacheParameterGroupRead, + Update: resourceAwsElasticacheParameterGroupUpdate, + Delete: resourceAwsElasticacheParameterGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "family": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "Managed by Terraform", + }, + "parameter": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: false, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + Set: resourceAwsElasticacheParameterHash, + }, + }, + } +} + +func resourceAwsElasticacheParameterGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + + createOpts := elasticache.CreateCacheParameterGroupInput{ + CacheParameterGroupName: aws.String(d.Get("name").(string)), + CacheParameterGroupFamily: aws.String(d.Get("family").(string)), + Description: aws.String(d.Get("description").(string)), + } + + log.Printf("[DEBUG] Create Cache Parameter Group: %#v", createOpts) + _, err := conn.CreateCacheParameterGroup(&createOpts) + if err != nil { + return fmt.Errorf("Error creating Cache Parameter Group: %s", err) + } + + d.Partial(true) + d.SetPartial("name") + d.SetPartial("family") + d.SetPartial("description") + d.Partial(false) + + d.SetId(*createOpts.CacheParameterGroupName) + log.Printf("[INFO] Cache Parameter Group ID: %s", d.Id()) + + return resourceAwsElasticacheParameterGroupUpdate(d, meta) +} + +func resourceAwsElasticacheParameterGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + + describeOpts := elasticache.DescribeCacheParameterGroupsInput{ + CacheParameterGroupName: aws.String(d.Id()), + } + + describeResp, err := conn.DescribeCacheParameterGroups(&describeOpts) + if err != nil { + return err + } + + if len(describeResp.CacheParameterGroups) != 1 || + *describeResp.CacheParameterGroups[0].CacheParameterGroupName != d.Id() { + return fmt.Errorf("Unable to find Parameter Group: %#v", describeResp.CacheParameterGroups) + } + + d.Set("name", describeResp.CacheParameterGroups[0].CacheParameterGroupName) + d.Set("family", describeResp.CacheParameterGroups[0].CacheParameterGroupFamily) + d.Set("description", describeResp.CacheParameterGroups[0].Description) + + // Only include user customized parameters as there's hundreds of system/default ones + describeParametersOpts := elasticache.DescribeCacheParametersInput{ + CacheParameterGroupName: aws.String(d.Id()), + Source: aws.String("user"), + } + + describeParametersResp, err := conn.DescribeCacheParameters(&describeParametersOpts) + if err != nil { + return err + } + + d.Set("parameter", flattenElastiCacheParameters(describeParametersResp.Parameters)) + + return nil +} + +func resourceAwsElasticacheParameterGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + + d.Partial(true) + + if d.HasChange("parameter") { + o, n := d.GetChange("parameter") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + // Expand the "parameter" set to aws-sdk-go compat []elasticacheconn.Parameter + parameters, err := expandElastiCacheParameters(ns.Difference(os).List()) + if err != nil { + return err + } + + if len(parameters) > 0 { + modifyOpts := elasticache.ModifyCacheParameterGroupInput{ + CacheParameterGroupName: aws.String(d.Get("name").(string)), + ParameterNameValues: parameters, + } + + log.Printf("[DEBUG] Modify Cache Parameter Group: %#v", modifyOpts) + _, err = conn.ModifyCacheParameterGroup(&modifyOpts) + if err != nil { + return fmt.Errorf("Error modifying Cache Parameter Group: %s", err) + } + } + d.SetPartial("parameter") + } + + d.Partial(false) + + return resourceAwsElasticacheParameterGroupRead(d, meta) +} + +func resourceAwsElasticacheParameterGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + + return resource.Retry(3*time.Minute, func() *resource.RetryError { + deleteOpts := elasticache.DeleteCacheParameterGroupInput{ + CacheParameterGroupName: aws.String(d.Id()), + } + _, err := conn.DeleteCacheParameterGroup(&deleteOpts) + if err != nil { + awsErr, ok := err.(awserr.Error) + if ok && awsErr.Code() == "CacheParameterGroupNotFoundFault" { + d.SetId("") + return nil + } + if ok && awsErr.Code() == "InvalidCacheParameterGroupState" { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) +} + +func resourceAwsElasticacheParameterHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["value"].(string))) + + return hashcode.String(buf.String()) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_replication_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_replication_group.go new file mode 100644 index 000000000..ff739f2ce --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_replication_group.go @@ -0,0 +1,534 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsElasticacheReplicationGroup() *schema.Resource { + + resourceSchema := resourceAwsElastiCacheCommonSchema() + + resourceSchema["replication_group_id"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateAwsElastiCacheReplicationGroupId, + } + + resourceSchema["automatic_failover_enabled"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + } + + resourceSchema["auto_minor_version_upgrade"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + } + + resourceSchema["replication_group_description"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + } + + resourceSchema["number_cache_clusters"] = &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + } + + resourceSchema["primary_endpoint_address"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + + resourceSchema["configuration_endpoint_address"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + + resourceSchema["cluster_mode"] = &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "replicas_per_node_group": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "num_node_groups": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + }, + }, + } + + resourceSchema["engine"].Required = false + resourceSchema["engine"].Optional = true + resourceSchema["engine"].Default = "redis" + resourceSchema["engine"].ValidateFunc = validateAwsElastiCacheReplicationGroupEngine + + return &schema.Resource{ + Create: resourceAwsElasticacheReplicationGroupCreate, + Read: resourceAwsElasticacheReplicationGroupRead, + Update: resourceAwsElasticacheReplicationGroupUpdate, + Delete: resourceAwsElasticacheReplicationGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: resourceSchema, + } +} + +func resourceAwsElasticacheReplicationGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + + tags := tagsFromMapEC(d.Get("tags").(map[string]interface{})) + params := &elasticache.CreateReplicationGroupInput{ + ReplicationGroupId: aws.String(d.Get("replication_group_id").(string)), + ReplicationGroupDescription: aws.String(d.Get("replication_group_description").(string)), + AutomaticFailoverEnabled: aws.Bool(d.Get("automatic_failover_enabled").(bool)), + AutoMinorVersionUpgrade: aws.Bool(d.Get("auto_minor_version_upgrade").(bool)), + CacheNodeType: aws.String(d.Get("node_type").(string)), + Engine: aws.String(d.Get("engine").(string)), + Port: aws.Int64(int64(d.Get("port").(int))), + Tags: tags, + } + + if v, ok := d.GetOk("engine_version"); ok { + params.EngineVersion = aws.String(v.(string)) + } + + preferred_azs := d.Get("availability_zones").(*schema.Set).List() + if len(preferred_azs) > 0 { + azs := expandStringList(preferred_azs) + params.PreferredCacheClusterAZs = azs + } + + if v, ok := d.GetOk("parameter_group_name"); ok { + params.CacheParameterGroupName = aws.String(v.(string)) + } + + if v, ok := d.GetOk("subnet_group_name"); ok { + params.CacheSubnetGroupName = aws.String(v.(string)) + } + + security_group_names := d.Get("security_group_names").(*schema.Set).List() + if len(security_group_names) > 0 { + params.CacheSecurityGroupNames = expandStringList(security_group_names) + } + + security_group_ids := d.Get("security_group_ids").(*schema.Set).List() + if len(security_group_ids) > 0 { + params.SecurityGroupIds = expandStringList(security_group_ids) + } + + snaps := d.Get("snapshot_arns").(*schema.Set).List() + if len(snaps) > 0 { + params.SnapshotArns = expandStringList(snaps) + } + + if v, ok := d.GetOk("maintenance_window"); ok { + params.PreferredMaintenanceWindow = aws.String(v.(string)) + } + + if v, ok := d.GetOk("notification_topic_arn"); ok { + params.NotificationTopicArn = aws.String(v.(string)) + } + + if v, ok := d.GetOk("snapshot_retention_limit"); ok { + params.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("snapshot_window"); ok { + params.SnapshotWindow = aws.String(v.(string)) + } + + if v, ok := d.GetOk("snapshot_name"); ok { + params.SnapshotName = aws.String(v.(string)) + } + + clusterMode, clusterModeOk := d.GetOk("cluster_mode") + cacheClusters, cacheClustersOk := d.GetOk("number_cache_clusters") + + if !clusterModeOk && !cacheClustersOk || clusterModeOk && cacheClustersOk { + return fmt.Errorf("Either `number_cache_clusters` or `cluster_mode` must be set") + } + + if clusterModeOk { + clusterModeAttributes := clusterMode.(*schema.Set).List() + attributes := clusterModeAttributes[0].(map[string]interface{}) + + if v, ok := attributes["num_node_groups"]; ok { + params.NumNodeGroups = aws.Int64(int64(v.(int))) + } + + if v, ok := attributes["replicas_per_node_group"]; ok { + params.ReplicasPerNodeGroup = aws.Int64(int64(v.(int))) + } + } + + if cacheClustersOk { + params.NumCacheClusters = aws.Int64(int64(cacheClusters.(int))) + } + + resp, err := conn.CreateReplicationGroup(params) + if err != nil { + return fmt.Errorf("Error creating Elasticache Replication Group: %s", err) + } + + d.SetId(*resp.ReplicationGroup.ReplicationGroupId) + + pending := []string{"creating", "modifying", "restoring", "snapshotting"} + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: []string{"available"}, + Refresh: cacheReplicationGroupStateRefreshFunc(conn, d.Id(), "available", pending), + Timeout: 40 * time.Minute, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + log.Printf("[DEBUG] Waiting for state to become available: %v", d.Id()) + _, sterr := stateConf.WaitForState() + if sterr != nil { + return fmt.Errorf("Error waiting for elasticache replication group (%s) to be created: %s", d.Id(), sterr) + } + + return resourceAwsElasticacheReplicationGroupRead(d, meta) +} + +func resourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + req := &elasticache.DescribeReplicationGroupsInput{ + ReplicationGroupId: aws.String(d.Id()), + } + + res, err := conn.DescribeReplicationGroups(req) + if err != nil { + if eccErr, ok := err.(awserr.Error); ok && eccErr.Code() == "ReplicationGroupNotFoundFault" { + log.Printf("[WARN] Elasticache Replication Group (%s) not found", d.Id()) + d.SetId("") + return nil + } + + return err + } + + var rgp *elasticache.ReplicationGroup + for _, r := range res.ReplicationGroups { + if *r.ReplicationGroupId == d.Id() { + rgp = r + } + } + + if rgp == nil { + log.Printf("[WARN] Replication Group (%s) not found", d.Id()) + d.SetId("") + return nil + } + + if *rgp.Status == "deleting" { + log.Printf("[WARN] The Replication Group %q is currently in the `deleting` state", d.Id()) + d.SetId("") + return nil + } + + if rgp.AutomaticFailover != nil { + switch strings.ToLower(*rgp.AutomaticFailover) { + case "disabled", "disabling": + d.Set("automatic_failover_enabled", false) + case "enabled", "enabling": + d.Set("automatic_failover_enabled", true) + default: + log.Printf("Unknown AutomaticFailover state %s", *rgp.AutomaticFailover) + } + } + + d.Set("replication_group_description", rgp.Description) + d.Set("number_cache_clusters", len(rgp.MemberClusters)) + d.Set("replication_group_id", rgp.ReplicationGroupId) + + if rgp.NodeGroups != nil { + if len(rgp.NodeGroups[0].NodeGroupMembers) == 0 { + return nil + } + + cacheCluster := *rgp.NodeGroups[0].NodeGroupMembers[0] + + res, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{ + CacheClusterId: cacheCluster.CacheClusterId, + ShowCacheNodeInfo: aws.Bool(true), + }) + if err != nil { + return err + } + + if len(res.CacheClusters) == 0 { + return nil + } + + c := res.CacheClusters[0] + d.Set("node_type", c.CacheNodeType) + d.Set("engine", c.Engine) + d.Set("engine_version", c.EngineVersion) + d.Set("subnet_group_name", c.CacheSubnetGroupName) + d.Set("security_group_names", flattenElastiCacheSecurityGroupNames(c.CacheSecurityGroups)) + d.Set("security_group_ids", flattenElastiCacheSecurityGroupIds(c.SecurityGroups)) + + if c.CacheParameterGroup != nil { + d.Set("parameter_group_name", c.CacheParameterGroup.CacheParameterGroupName) + } + + d.Set("maintenance_window", c.PreferredMaintenanceWindow) + d.Set("snapshot_window", rgp.SnapshotWindow) + d.Set("snapshot_retention_limit", rgp.SnapshotRetentionLimit) + + if rgp.ConfigurationEndpoint != nil { + d.Set("port", rgp.ConfigurationEndpoint.Port) + d.Set("configuration_endpoint_address", rgp.ConfigurationEndpoint.Address) + } else { + d.Set("port", rgp.NodeGroups[0].PrimaryEndpoint.Port) + d.Set("primary_endpoint_address", rgp.NodeGroups[0].PrimaryEndpoint.Address) + } + + d.Set("auto_minor_version_upgrade", c.AutoMinorVersionUpgrade) + } + + return nil +} + +func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + + requestUpdate := false + params := &elasticache.ModifyReplicationGroupInput{ + ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), + ReplicationGroupId: aws.String(d.Id()), + } + + if d.HasChange("replication_group_description") { + params.ReplicationGroupDescription = aws.String(d.Get("replication_group_description").(string)) + requestUpdate = true + } + + if d.HasChange("automatic_failover_enabled") { + params.AutomaticFailoverEnabled = aws.Bool(d.Get("automatic_failover_enabled").(bool)) + requestUpdate = true + } + + if d.HasChange("auto_minor_version_upgrade") { + params.AutoMinorVersionUpgrade = aws.Bool(d.Get("auto_minor_version_upgrade").(bool)) + requestUpdate = true + } + + if d.HasChange("security_group_ids") { + if attr := d.Get("security_group_ids").(*schema.Set); attr.Len() > 0 { + params.SecurityGroupIds = expandStringList(attr.List()) + requestUpdate = true + } + } + + if d.HasChange("security_group_names") { + if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 { + params.CacheSecurityGroupNames = expandStringList(attr.List()) + requestUpdate = true + } + } + + if d.HasChange("maintenance_window") { + params.PreferredMaintenanceWindow = aws.String(d.Get("maintenance_window").(string)) + requestUpdate = true + } + + if d.HasChange("notification_topic_arn") { + params.NotificationTopicArn = aws.String(d.Get("notification_topic_arn").(string)) + requestUpdate = true + } + + if d.HasChange("parameter_group_name") { + params.CacheParameterGroupName = aws.String(d.Get("parameter_group_name").(string)) + requestUpdate = true + } + + if d.HasChange("engine_version") { + params.EngineVersion = aws.String(d.Get("engine_version").(string)) + requestUpdate = true + } + + if d.HasChange("snapshot_retention_limit") { + // This is a real hack to set the Snapshotting Cluster ID to be the first Cluster in the RG + o, _ := d.GetChange("snapshot_retention_limit") + if o.(int) == 0 { + params.SnapshottingClusterId = aws.String(fmt.Sprintf("%s-001", d.Id())) + } + + params.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) + requestUpdate = true + } + + if d.HasChange("snapshot_window") { + params.SnapshotWindow = aws.String(d.Get("snapshot_window").(string)) + requestUpdate = true + } + + if d.HasChange("node_type") { + params.CacheNodeType = aws.String(d.Get("node_type").(string)) + requestUpdate = true + } + + if requestUpdate { + _, err := conn.ModifyReplicationGroup(params) + if err != nil { + return fmt.Errorf("Error updating Elasticache replication group: %s", err) + } + + pending := []string{"creating", "modifying", "snapshotting"} + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: []string{"available"}, + Refresh: cacheReplicationGroupStateRefreshFunc(conn, d.Id(), "available", pending), + Timeout: 40 * time.Minute, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + log.Printf("[DEBUG] Waiting for state to become available: %v", d.Id()) + _, sterr := stateConf.WaitForState() + if sterr != nil { + return fmt.Errorf("Error waiting for elasticache replication group (%s) to be created: %s", d.Id(), sterr) + } + } + return resourceAwsElasticacheReplicationGroupRead(d, meta) +} + +func resourceAwsElasticacheReplicationGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + + req := &elasticache.DeleteReplicationGroupInput{ + ReplicationGroupId: aws.String(d.Id()), + } + + _, err := conn.DeleteReplicationGroup(req) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "ReplicationGroupNotFoundFault" { + d.SetId("") + return nil + } + + return fmt.Errorf("Error deleting Elasticache replication group: %s", err) + } + + log.Printf("[DEBUG] Waiting for deletion: %v", d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "available", "deleting"}, + Target: []string{}, + Refresh: cacheReplicationGroupStateRefreshFunc(conn, d.Id(), "", []string{}), + Timeout: 40 * time.Minute, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + _, sterr := stateConf.WaitForState() + if sterr != nil { + return fmt.Errorf("Error waiting for replication group (%s) to delete: %s", d.Id(), sterr) + } + + return nil +} + +func cacheReplicationGroupStateRefreshFunc(conn *elasticache.ElastiCache, replicationGroupId, givenState string, pending []string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeReplicationGroups(&elasticache.DescribeReplicationGroupsInput{ + ReplicationGroupId: aws.String(replicationGroupId), + }) + if err != nil { + if eccErr, ok := err.(awserr.Error); ok && eccErr.Code() == "ReplicationGroupNotFoundFault" { + log.Printf("[DEBUG] Replication Group Not Found") + return nil, "", nil + } + + log.Printf("[ERROR] cacheClusterReplicationGroupStateRefreshFunc: %s", err) + return nil, "", err + } + + if len(resp.ReplicationGroups) == 0 { + return nil, "", fmt.Errorf("[WARN] Error: no Cache Replication Groups found for id (%s)", replicationGroupId) + } + + var rg *elasticache.ReplicationGroup + for _, replicationGroup := range resp.ReplicationGroups { + if *replicationGroup.ReplicationGroupId == replicationGroupId { + log.Printf("[DEBUG] Found matching ElastiCache Replication Group: %s", *replicationGroup.ReplicationGroupId) + rg = replicationGroup + } + } + + if rg == nil { + return nil, "", fmt.Errorf("[WARN] Error: no matching ElastiCache Replication Group for id (%s)", replicationGroupId) + } + + log.Printf("[DEBUG] ElastiCache Replication Group (%s) status: %v", replicationGroupId, *rg.Status) + + // return the current state if it's in the pending array + for _, p := range pending { + log.Printf("[DEBUG] ElastiCache: checking pending state (%s) for Replication Group (%s), Replication Group status: %s", pending, replicationGroupId, *rg.Status) + s := *rg.Status + if p == s { + log.Printf("[DEBUG] Return with status: %v", *rg.Status) + return s, p, nil + } + } + + return rg, *rg.Status, nil + } +} + +func validateAwsElastiCacheReplicationGroupEngine(v interface{}, k string) (ws []string, errors []error) { + if strings.ToLower(v.(string)) != "redis" { + errors = append(errors, fmt.Errorf("The only acceptable Engine type when using Replication Groups is Redis")) + } + return +} + +func validateAwsElastiCacheReplicationGroupId(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if (len(value) < 1) || (len(value) > 20) { + errors = append(errors, fmt.Errorf( + "%q must contain from 1 to 20 alphanumeric characters or hyphens", k)) + } + if !regexp.MustCompile(`^[0-9a-zA-Z-]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only alphanumeric characters and hyphens allowed in %q", k)) + } + if !regexp.MustCompile(`^[a-z]`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "first character of %q must be a letter", k)) + } + if regexp.MustCompile(`--`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot contain two consecutive hyphens", k)) + } + if regexp.MustCompile(`-$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot end with a hyphen", k)) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_security_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_security_group.go new file mode 100644 index 000000000..07676e513 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_security_group.go @@ -0,0 +1,144 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsElasticacheSecurityGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsElasticacheSecurityGroupCreate, + Read: resourceAwsElasticacheSecurityGroupRead, + Delete: resourceAwsElasticacheSecurityGroupDelete, + + Schema: map[string]*schema.Schema{ + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "Managed by Terraform", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "security_group_names": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceAwsElasticacheSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + + name := d.Get("name").(string) + desc := d.Get("description").(string) + nameSet := d.Get("security_group_names").(*schema.Set) + + names := make([]string, nameSet.Len()) + for i, name := range nameSet.List() { + names[i] = name.(string) + } + + log.Printf("[DEBUG] Cache security group create: name: %s, description: %s, security_group_names: %v", name, desc, names) + res, err := conn.CreateCacheSecurityGroup(&elasticache.CreateCacheSecurityGroupInput{ + Description: aws.String(desc), + CacheSecurityGroupName: aws.String(name), + }) + if err != nil { + return fmt.Errorf("Error creating CacheSecurityGroup: %s", err) + } + + for _, n := range names { + log.Printf("[DEBUG] Authorize cache security group ingress name: %v, ec2 security group name: %v", name, n) + _, err = conn.AuthorizeCacheSecurityGroupIngress(&elasticache.AuthorizeCacheSecurityGroupIngressInput{ + CacheSecurityGroupName: aws.String(name), + EC2SecurityGroupName: aws.String(n), + EC2SecurityGroupOwnerId: aws.String(*res.CacheSecurityGroup.OwnerId), + }) + if err != nil { + log.Printf("[ERROR] Failed to authorize: %v", err) + _, err := conn.DeleteCacheSecurityGroup(&elasticache.DeleteCacheSecurityGroupInput{ + CacheSecurityGroupName: aws.String(d.Id()), + }) + log.Printf("[ERROR] Revert cache security group: %v", err) + } + } + + d.SetId(name) + + return nil +} + +func resourceAwsElasticacheSecurityGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + req := &elasticache.DescribeCacheSecurityGroupsInput{ + CacheSecurityGroupName: aws.String(d.Get("name").(string)), + } + + res, err := conn.DescribeCacheSecurityGroups(req) + if err != nil { + return err + } + if len(res.CacheSecurityGroups) == 0 { + return fmt.Errorf("Error missing %v", d.Get("name")) + } + + var group *elasticache.CacheSecurityGroup + for _, g := range res.CacheSecurityGroups { + log.Printf("[DEBUG] CacheSecurityGroupName: %v, id: %v", g.CacheSecurityGroupName, d.Id()) + if *g.CacheSecurityGroupName == d.Id() { + group = g + } + } + if group == nil { + return fmt.Errorf("Error retrieving cache security group: %v", res) + } + + d.Set("name", group.CacheSecurityGroupName) + d.Set("description", group.Description) + + return nil +} + +func resourceAwsElasticacheSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + + log.Printf("[DEBUG] Cache security group delete: %s", d.Id()) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteCacheSecurityGroup(&elasticache.DeleteCacheSecurityGroupInput{ + CacheSecurityGroupName: aws.String(d.Id()), + }) + if err != nil { + apierr, ok := err.(awserr.Error) + if !ok { + return resource.RetryableError(err) + } + log.Printf("[DEBUG] APIError.Code: %v", apierr.Code()) + switch apierr.Code() { + case "InvalidCacheSecurityGroupState": + return resource.RetryableError(err) + case "DependencyViolation": + // If it is a dependency violation, we want to retry + return resource.RetryableError(err) + default: + return resource.NonRetryableError(err) + } + } + return nil + }) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_subnet_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_subnet_group.go new file mode 100644 index 000000000..efae2e703 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticache_subnet_group.go @@ -0,0 +1,176 @@ +package aws + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsElasticacheSubnetGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsElasticacheSubnetGroupCreate, + Read: resourceAwsElasticacheSubnetGroupRead, + Update: resourceAwsElasticacheSubnetGroupUpdate, + Delete: resourceAwsElasticacheSubnetGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "Managed by Terraform", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: func(val interface{}) string { + // Elasticache normalizes subnet names to lowercase, + // so we have to do this too or else we can end up + // with non-converging diffs. + return strings.ToLower(val.(string)) + }, + }, + "subnet_ids": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceAwsElasticacheSubnetGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + + // Get the group properties + name := d.Get("name").(string) + desc := d.Get("description").(string) + subnetIdsSet := d.Get("subnet_ids").(*schema.Set) + + log.Printf("[DEBUG] Cache subnet group create: name: %s, description: %s", name, desc) + + subnetIds := expandStringList(subnetIdsSet.List()) + + req := &elasticache.CreateCacheSubnetGroupInput{ + CacheSubnetGroupDescription: aws.String(desc), + CacheSubnetGroupName: aws.String(name), + SubnetIds: subnetIds, + } + + _, err := conn.CreateCacheSubnetGroup(req) + if err != nil { + return fmt.Errorf("Error creating CacheSubnetGroup: %s", err) + } + + // Assign the group name as the resource ID + // Elasticache always retains the name in lower case, so we have to + // mimic that or else we won't be able to refresh a resource whose + // name contained uppercase characters. + d.SetId(strings.ToLower(name)) + + return nil +} + +func resourceAwsElasticacheSubnetGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + req := &elasticache.DescribeCacheSubnetGroupsInput{ + CacheSubnetGroupName: aws.String(d.Get("name").(string)), + } + + res, err := conn.DescribeCacheSubnetGroups(req) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "CacheSubnetGroupNotFoundFault" { + // Update state to indicate the db subnet no longer exists. + log.Printf("[WARN] Elasticache Subnet Group (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return err + } + if len(res.CacheSubnetGroups) == 0 { + return fmt.Errorf("Error missing %v", d.Get("name")) + } + + var group *elasticache.CacheSubnetGroup + for _, g := range res.CacheSubnetGroups { + log.Printf("[DEBUG] %v %v", g.CacheSubnetGroupName, d.Id()) + if *g.CacheSubnetGroupName == d.Id() { + group = g + } + } + if group == nil { + return fmt.Errorf("Error retrieving cache subnet group: %v", res) + } + + ids := make([]string, len(group.Subnets)) + for i, s := range group.Subnets { + ids[i] = *s.SubnetIdentifier + } + + d.Set("name", group.CacheSubnetGroupName) + d.Set("description", group.CacheSubnetGroupDescription) + d.Set("subnet_ids", ids) + + return nil +} + +func resourceAwsElasticacheSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + if d.HasChange("subnet_ids") || d.HasChange("description") { + var subnets []*string + if v := d.Get("subnet_ids"); v != nil { + for _, v := range v.(*schema.Set).List() { + subnets = append(subnets, aws.String(v.(string))) + } + } + log.Printf("[DEBUG] Updating ElastiCache Subnet Group") + + _, err := conn.ModifyCacheSubnetGroup(&elasticache.ModifyCacheSubnetGroupInput{ + CacheSubnetGroupName: aws.String(d.Get("name").(string)), + CacheSubnetGroupDescription: aws.String(d.Get("description").(string)), + SubnetIds: subnets, + }) + if err != nil { + return err + } + } + + return resourceAwsElasticacheSubnetGroupRead(d, meta) +} +func resourceAwsElasticacheSubnetGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).elasticacheconn + + log.Printf("[DEBUG] Cache subnet group delete: %s", d.Id()) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteCacheSubnetGroup(&elasticache.DeleteCacheSubnetGroupInput{ + CacheSubnetGroupName: aws.String(d.Id()), + }) + if err != nil { + apierr, ok := err.(awserr.Error) + if !ok { + return resource.RetryableError(err) + } + log.Printf("[DEBUG] APIError.Code: %v", apierr.Code()) + switch apierr.Code() { + case "DependencyViolation": + // If it is a dependency violation, we want to retry + return resource.RetryableError(err) + default: + return resource.NonRetryableError(err) + } + } + return nil + }) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticsearch_domain.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticsearch_domain.go new file mode 100644 index 000000000..c931b119e --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticsearch_domain.go @@ -0,0 +1,467 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsElasticSearchDomain() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsElasticSearchDomainCreate, + Read: resourceAwsElasticSearchDomainRead, + Update: resourceAwsElasticSearchDomainUpdate, + Delete: resourceAwsElasticSearchDomainDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsElasticSearchDomainImport, + }, + + Schema: map[string]*schema.Schema{ + "access_policies": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateJsonString, + DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, + }, + "advanced_options": { + Type: schema.TypeMap, + Optional: true, + Computed: true, + }, + "domain_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[a-z][0-9a-z\-]{2,27}$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must start with a lowercase alphabet and be at least 3 and no more than 28 characters long. Valid characters are a-z (lowercase letters), 0-9, and - (hyphen).", k)) + } + return + }, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "domain_id": { + Type: schema.TypeString, + Computed: true, + }, + "endpoint": { + Type: schema.TypeString, + Computed: true, + }, + "ebs_options": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ebs_enabled": { + Type: schema.TypeBool, + Required: true, + }, + "iops": { + Type: schema.TypeInt, + Optional: true, + }, + "volume_size": { + Type: schema.TypeInt, + Optional: true, + }, + "volume_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + "cluster_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dedicated_master_count": { + Type: schema.TypeInt, + Optional: true, + }, + "dedicated_master_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "dedicated_master_type": { + Type: schema.TypeString, + Optional: true, + }, + "instance_count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + }, + "instance_type": { + Type: schema.TypeString, + Optional: true, + Default: "m3.medium.elasticsearch", + }, + "zone_awareness_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + "snapshot_options": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "automated_snapshot_start_hour": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + "elasticsearch_version": { + Type: schema.TypeString, + Optional: true, + Default: "1.5", + ForceNew: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsElasticSearchDomainImport( + d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("domain_name", d.Id()) + return []*schema.ResourceData{d}, nil +} + +func resourceAwsElasticSearchDomainCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).esconn + + input := elasticsearch.CreateElasticsearchDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + ElasticsearchVersion: aws.String(d.Get("elasticsearch_version").(string)), + } + + if v, ok := d.GetOk("access_policies"); ok { + input.AccessPolicies = aws.String(v.(string)) + } + + if v, ok := d.GetOk("advanced_options"); ok { + input.AdvancedOptions = stringMapToPointers(v.(map[string]interface{})) + } + + if v, ok := d.GetOk("ebs_options"); ok { + options := v.([]interface{}) + + if len(options) > 1 { + return fmt.Errorf("Only a single ebs_options block is expected") + } else if len(options) == 1 { + if options[0] == nil { + return fmt.Errorf("At least one field is expected inside ebs_options") + } + + s := options[0].(map[string]interface{}) + input.EBSOptions = expandESEBSOptions(s) + } + } + + if v, ok := d.GetOk("cluster_config"); ok { + config := v.([]interface{}) + + if len(config) > 1 { + return fmt.Errorf("Only a single cluster_config block is expected") + } else if len(config) == 1 { + if config[0] == nil { + return fmt.Errorf("At least one field is expected inside cluster_config") + } + m := config[0].(map[string]interface{}) + input.ElasticsearchClusterConfig = expandESClusterConfig(m) + } + } + + if v, ok := d.GetOk("snapshot_options"); ok { + options := v.([]interface{}) + + if len(options) > 1 { + return fmt.Errorf("Only a single snapshot_options block is expected") + } else if len(options) == 1 { + if options[0] == nil { + return fmt.Errorf("At least one field is expected inside snapshot_options") + } + + o := options[0].(map[string]interface{}) + + snapshotOptions := elasticsearch.SnapshotOptions{ + AutomatedSnapshotStartHour: aws.Int64(int64(o["automated_snapshot_start_hour"].(int))), + } + + input.SnapshotOptions = &snapshotOptions + } + } + + log.Printf("[DEBUG] Creating ElasticSearch domain: %s", input) + out, err := conn.CreateElasticsearchDomain(&input) + if err != nil { + return err + } + + d.SetId(*out.DomainStatus.ARN) + + log.Printf("[DEBUG] Waiting for ElasticSearch domain %q to be created", d.Id()) + err = resource.Retry(60*time.Minute, func() *resource.RetryError { + out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + }) + if err != nil { + return resource.NonRetryableError(err) + } + + if !*out.DomainStatus.Processing && out.DomainStatus.Endpoint != nil { + return nil + } + + return resource.RetryableError( + fmt.Errorf("%q: Timeout while waiting for the domain to be created", d.Id())) + }) + if err != nil { + return err + } + + tags := tagsFromMapElasticsearchService(d.Get("tags").(map[string]interface{})) + + if err := setTagsElasticsearchService(conn, d, *out.DomainStatus.ARN); err != nil { + return err + } + + d.Set("tags", tagsToMapElasticsearchService(tags)) + d.SetPartial("tags") + d.Partial(false) + + log.Printf("[DEBUG] ElasticSearch domain %q created", d.Id()) + + return resourceAwsElasticSearchDomainRead(d, meta) +} + +func resourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).esconn + + out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "ResourceNotFoundException" { + log.Printf("[INFO] ElasticSearch Domain %q not found", d.Get("domain_name").(string)) + d.SetId("") + return nil + } + return err + } + + log.Printf("[DEBUG] Received ElasticSearch domain: %s", out) + + ds := out.DomainStatus + + if ds.AccessPolicies != nil && *ds.AccessPolicies != "" { + policies, err := normalizeJsonString(*ds.AccessPolicies) + if err != nil { + return errwrap.Wrapf("access policies contain an invalid JSON: {{err}}", err) + } + d.Set("access_policies", policies) + } + err = d.Set("advanced_options", pointersMapToStringList(ds.AdvancedOptions)) + if err != nil { + return err + } + d.SetId(*ds.ARN) + d.Set("domain_id", ds.DomainId) + d.Set("domain_name", ds.DomainName) + d.Set("elasticsearch_version", ds.ElasticsearchVersion) + if ds.Endpoint != nil { + d.Set("endpoint", *ds.Endpoint) + } + + err = d.Set("ebs_options", flattenESEBSOptions(ds.EBSOptions)) + if err != nil { + return err + } + err = d.Set("cluster_config", flattenESClusterConfig(ds.ElasticsearchClusterConfig)) + if err != nil { + return err + } + if ds.SnapshotOptions != nil { + d.Set("snapshot_options", map[string]interface{}{ + "automated_snapshot_start_hour": *ds.SnapshotOptions.AutomatedSnapshotStartHour, + }) + } + + d.Set("arn", ds.ARN) + + listOut, err := conn.ListTags(&elasticsearch.ListTagsInput{ + ARN: ds.ARN, + }) + + if err != nil { + return err + } + var est []*elasticsearch.Tag + if len(listOut.TagList) > 0 { + est = listOut.TagList + } + + d.Set("tags", tagsToMapElasticsearchService(est)) + + return nil +} + +func resourceAwsElasticSearchDomainUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).esconn + + d.Partial(true) + + if err := setTagsElasticsearchService(conn, d, d.Id()); err != nil { + return err + } else { + d.SetPartial("tags") + } + + input := elasticsearch.UpdateElasticsearchDomainConfigInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + } + + if d.HasChange("access_policies") { + input.AccessPolicies = aws.String(d.Get("access_policies").(string)) + } + + if d.HasChange("advanced_options") { + input.AdvancedOptions = stringMapToPointers(d.Get("advanced_options").(map[string]interface{})) + } + + if d.HasChange("ebs_options") { + options := d.Get("ebs_options").([]interface{}) + + if len(options) > 1 { + return fmt.Errorf("Only a single ebs_options block is expected") + } else if len(options) == 1 { + s := options[0].(map[string]interface{}) + input.EBSOptions = expandESEBSOptions(s) + } + } + + if d.HasChange("cluster_config") { + config := d.Get("cluster_config").([]interface{}) + + if len(config) > 1 { + return fmt.Errorf("Only a single cluster_config block is expected") + } else if len(config) == 1 { + m := config[0].(map[string]interface{}) + input.ElasticsearchClusterConfig = expandESClusterConfig(m) + } + } + + if d.HasChange("snapshot_options") { + options := d.Get("snapshot_options").([]interface{}) + + if len(options) > 1 { + return fmt.Errorf("Only a single snapshot_options block is expected") + } else if len(options) == 1 { + o := options[0].(map[string]interface{}) + + snapshotOptions := elasticsearch.SnapshotOptions{ + AutomatedSnapshotStartHour: aws.Int64(int64(o["automated_snapshot_start_hour"].(int))), + } + + input.SnapshotOptions = &snapshotOptions + } + } + + _, err := conn.UpdateElasticsearchDomainConfig(&input) + if err != nil { + return err + } + + err = resource.Retry(60*time.Minute, func() *resource.RetryError { + out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + }) + if err != nil { + return resource.NonRetryableError(err) + } + + if *out.DomainStatus.Processing == false { + return nil + } + + return resource.RetryableError( + fmt.Errorf("%q: Timeout while waiting for changes to be processed", d.Id())) + }) + if err != nil { + return err + } + + d.Partial(false) + + return resourceAwsElasticSearchDomainRead(d, meta) +} + +func resourceAwsElasticSearchDomainDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).esconn + + log.Printf("[DEBUG] Deleting ElasticSearch domain: %q", d.Get("domain_name").(string)) + _, err := conn.DeleteElasticsearchDomain(&elasticsearch.DeleteElasticsearchDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + }) + if err != nil { + return err + } + + log.Printf("[DEBUG] Waiting for ElasticSearch domain %q to be deleted", d.Get("domain_name").(string)) + err = resource.Retry(90*time.Minute, func() *resource.RetryError { + out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + }) + + if err != nil { + awsErr, ok := err.(awserr.Error) + if !ok { + return resource.NonRetryableError(err) + } + + if awsErr.Code() == "ResourceNotFoundException" { + return nil + } + + return resource.NonRetryableError(err) + } + + if !*out.DomainStatus.Processing { + return nil + } + + return resource.RetryableError( + fmt.Errorf("%q: Timeout while waiting for the domain to be deleted", d.Id())) + }) + + d.SetId("") + + return err +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticsearch_domain_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticsearch_domain_policy.go new file mode 100644 index 000000000..dfb22c64d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elasticsearch_domain_policy.go @@ -0,0 +1,127 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsElasticSearchDomainPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsElasticSearchDomainPolicyUpsert, + Read: resourceAwsElasticSearchDomainPolicyRead, + Update: resourceAwsElasticSearchDomainPolicyUpsert, + Delete: resourceAwsElasticSearchDomainPolicyDelete, + + Schema: map[string]*schema.Schema{ + "domain_name": { + Type: schema.TypeString, + Required: true, + }, + "access_policies": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, + }, + }, + } +} + +func resourceAwsElasticSearchDomainPolicyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).esconn + name := d.Get("domain_name").(string) + out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ + DomainName: aws.String(name), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceNotFound" { + log.Printf("[WARN] ElasticSearch Domain %q not found, removing", name) + d.SetId("") + return nil + } + return err + } + + log.Printf("[DEBUG] Received ElasticSearch domain: %s", out) + + ds := out.DomainStatus + d.Set("access_policies", ds.AccessPolicies) + + return nil +} + +func resourceAwsElasticSearchDomainPolicyUpsert(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).esconn + domainName := d.Get("domain_name").(string) + _, err := conn.UpdateElasticsearchDomainConfig(&elasticsearch.UpdateElasticsearchDomainConfigInput{ + DomainName: aws.String(domainName), + AccessPolicies: aws.String(d.Get("access_policies").(string)), + }) + if err != nil { + return err + } + + d.SetId("esd-policy-" + domainName) + + err = resource.Retry(50*time.Minute, func() *resource.RetryError { + out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + }) + if err != nil { + return resource.NonRetryableError(err) + } + + if *out.DomainStatus.Processing == false { + return nil + } + + return resource.RetryableError( + fmt.Errorf("%q: Timeout while waiting for changes to be processed", d.Id())) + }) + if err != nil { + return err + } + + return resourceAwsElasticSearchDomainPolicyRead(d, meta) +} + +func resourceAwsElasticSearchDomainPolicyDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).esconn + + _, err := conn.UpdateElasticsearchDomainConfig(&elasticsearch.UpdateElasticsearchDomainConfigInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + AccessPolicies: aws.String(""), + }) + if err != nil { + return err + } + + log.Printf("[DEBUG] Waiting for ElasticSearch domain policy %q to be deleted", d.Get("domain_name").(string)) + err = resource.Retry(60*time.Minute, func() *resource.RetryError { + out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + }) + if err != nil { + return resource.NonRetryableError(err) + } + + if *out.DomainStatus.Processing == false { + return nil + } + + return resource.RetryableError( + fmt.Errorf("%q: Timeout while waiting for policy to be deleted", d.Id())) + }) + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elb.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elb.go new file mode 100644 index 000000000..3878c9611 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elb.go @@ -0,0 +1,976 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "regexp" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsElb() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsElbCreate, + Read: resourceAwsElbRead, + Update: resourceAwsElbUpdate, + Delete: resourceAwsElbDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: validateElbName, + }, + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateElbNamePrefix, + }, + + "internal": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "cross_zone_load_balancing": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "availability_zones": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Computed: true, + Set: schema.HashString, + }, + + "instances": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Computed: true, + Set: schema.HashString, + }, + + "security_groups": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Computed: true, + Set: schema.HashString, + }, + + "source_security_group": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "source_security_group_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "subnets": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Computed: true, + Set: schema.HashString, + }, + + "idle_timeout": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 60, + ValidateFunc: validateIntegerInRange(1, 3600), + }, + + "connection_draining": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "connection_draining_timeout": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 300, + }, + + "access_logs": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "interval": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 60, + ValidateFunc: validateAccessLogsInterval, + }, + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "bucket_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + }, + }, + }, + + "listener": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ValidateFunc: validateIntegerInRange(1, 65535), + }, + + "instance_protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validateListenerProtocol, + }, + + "lb_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ValidateFunc: validateIntegerInRange(1, 65535), + }, + + "lb_protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validateListenerProtocol, + }, + + "ssl_certificate_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + Set: resourceAwsElbListenerHash, + }, + + "health_check": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "healthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ValidateFunc: validateIntegerInRange(2, 10), + }, + + "unhealthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ValidateFunc: validateIntegerInRange(2, 10), + }, + + "target": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validateHeathCheckTarget, + }, + + "interval": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ValidateFunc: validateIntegerInRange(5, 300), + }, + + "timeout": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ValidateFunc: validateIntegerInRange(2, 60), + }, + }, + }, + }, + + "dns_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "zone_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsElbCreate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + // Expand the "listener" set to aws-sdk-go compat []*elb.Listener + listeners, err := expandListeners(d.Get("listener").(*schema.Set).List()) + if err != nil { + return err + } + + var elbName string + if v, ok := d.GetOk("name"); ok { + elbName = v.(string) + } else { + if v, ok := d.GetOk("name_prefix"); ok { + elbName = resource.PrefixedUniqueId(v.(string)) + } else { + elbName = resource.PrefixedUniqueId("tf-lb-") + } + d.Set("name", elbName) + } + + tags := tagsFromMapELB(d.Get("tags").(map[string]interface{})) + // Provision the elb + elbOpts := &elb.CreateLoadBalancerInput{ + LoadBalancerName: aws.String(elbName), + Listeners: listeners, + Tags: tags, + } + + if scheme, ok := d.GetOk("internal"); ok && scheme.(bool) { + elbOpts.Scheme = aws.String("internal") + } + + if v, ok := d.GetOk("availability_zones"); ok { + elbOpts.AvailabilityZones = expandStringList(v.(*schema.Set).List()) + } + + if v, ok := d.GetOk("security_groups"); ok { + elbOpts.SecurityGroups = expandStringList(v.(*schema.Set).List()) + } + + if v, ok := d.GetOk("subnets"); ok { + elbOpts.Subnets = expandStringList(v.(*schema.Set).List()) + } + + log.Printf("[DEBUG] ELB create configuration: %#v", elbOpts) + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := elbconn.CreateLoadBalancer(elbOpts) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + // Check for IAM SSL Cert error, eventual consistancy issue + if awsErr.Code() == "CertificateNotFound" { + return resource.RetryableError( + fmt.Errorf("[WARN] Error creating ELB Listener with SSL Cert, retrying: %s", err)) + } + } + return resource.NonRetryableError(err) + } + return nil + }) + + if err != nil { + return err + } + + // Assign the elb's unique identifier for use later + d.SetId(elbName) + log.Printf("[INFO] ELB ID: %s", d.Id()) + + // Enable partial mode and record what we set + d.Partial(true) + d.SetPartial("name") + d.SetPartial("internal") + d.SetPartial("availability_zones") + d.SetPartial("listener") + d.SetPartial("security_groups") + d.SetPartial("subnets") + + d.Set("tags", tagsToMapELB(tags)) + + return resourceAwsElbUpdate(d, meta) +} + +func resourceAwsElbRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + elbName := d.Id() + + // Retrieve the ELB properties for updating the state + describeElbOpts := &elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{aws.String(elbName)}, + } + + describeResp, err := elbconn.DescribeLoadBalancers(describeElbOpts) + if err != nil { + if isLoadBalancerNotFound(err) { + // The ELB is gone now, so just remove it from the state + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving ELB: %s", err) + } + if len(describeResp.LoadBalancerDescriptions) != 1 { + return fmt.Errorf("Unable to find ELB: %#v", describeResp.LoadBalancerDescriptions) + } + + describeAttrsOpts := &elb.DescribeLoadBalancerAttributesInput{ + LoadBalancerName: aws.String(elbName), + } + describeAttrsResp, err := elbconn.DescribeLoadBalancerAttributes(describeAttrsOpts) + if err != nil { + if isLoadBalancerNotFound(err) { + // The ELB is gone now, so just remove it from the state + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving ELB: %s", err) + } + + lbAttrs := describeAttrsResp.LoadBalancerAttributes + + lb := describeResp.LoadBalancerDescriptions[0] + + d.Set("name", lb.LoadBalancerName) + d.Set("dns_name", lb.DNSName) + d.Set("zone_id", lb.CanonicalHostedZoneNameID) + + var scheme bool + if lb.Scheme != nil { + scheme = *lb.Scheme == "internal" + } + d.Set("internal", scheme) + d.Set("availability_zones", flattenStringList(lb.AvailabilityZones)) + d.Set("instances", flattenInstances(lb.Instances)) + d.Set("listener", flattenListeners(lb.ListenerDescriptions)) + d.Set("security_groups", flattenStringList(lb.SecurityGroups)) + if lb.SourceSecurityGroup != nil { + group := lb.SourceSecurityGroup.GroupName + if lb.SourceSecurityGroup.OwnerAlias != nil && *lb.SourceSecurityGroup.OwnerAlias != "" { + group = aws.String(*lb.SourceSecurityGroup.OwnerAlias + "/" + *lb.SourceSecurityGroup.GroupName) + } + d.Set("source_security_group", group) + + // Manually look up the ELB Security Group ID, since it's not provided + var elbVpc string + if lb.VPCId != nil { + elbVpc = *lb.VPCId + sgId, err := sourceSGIdByName(meta, *lb.SourceSecurityGroup.GroupName, elbVpc) + if err != nil { + return fmt.Errorf("[WARN] Error looking up ELB Security Group ID: %s", err) + } else { + d.Set("source_security_group_id", sgId) + } + } + } + d.Set("subnets", flattenStringList(lb.Subnets)) + if lbAttrs.ConnectionSettings != nil { + d.Set("idle_timeout", lbAttrs.ConnectionSettings.IdleTimeout) + } + d.Set("connection_draining", lbAttrs.ConnectionDraining.Enabled) + d.Set("connection_draining_timeout", lbAttrs.ConnectionDraining.Timeout) + d.Set("cross_zone_load_balancing", lbAttrs.CrossZoneLoadBalancing.Enabled) + if lbAttrs.AccessLog != nil { + // The AWS API does not allow users to remove access_logs, only disable them. + // During creation of the ELB, Terraform sets the access_logs to disabled, + // so there should not be a case where lbAttrs.AccessLog above is nil. + + // Here we do not record the remove value of access_log if: + // - there is no access_log block in the configuration + // - the remote access_logs are disabled + // + // This indicates there is no access_log in the configuration. + // - externally added access_logs will be enabled, so we'll detect the drift + // - locally added access_logs will be in the config, so we'll add to the + // API/state + // See https://github.com/hashicorp/terraform/issues/10138 + _, n := d.GetChange("access_logs") + elbal := lbAttrs.AccessLog + nl := n.([]interface{}) + if len(nl) == 0 && !*elbal.Enabled { + elbal = nil + } + if err := d.Set("access_logs", flattenAccessLog(elbal)); err != nil { + return err + } + } + + resp, err := elbconn.DescribeTags(&elb.DescribeTagsInput{ + LoadBalancerNames: []*string{lb.LoadBalancerName}, + }) + + var et []*elb.Tag + if len(resp.TagDescriptions) > 0 { + et = resp.TagDescriptions[0].Tags + } + d.Set("tags", tagsToMapELB(et)) + + // There's only one health check, so save that to state as we + // currently can + if *lb.HealthCheck.Target != "" { + d.Set("health_check", flattenHealthCheck(lb.HealthCheck)) + } + + return nil +} + +func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + d.Partial(true) + + if d.HasChange("listener") { + o, n := d.GetChange("listener") + os := o.(*schema.Set) + ns := n.(*schema.Set) + + remove, _ := expandListeners(os.Difference(ns).List()) + add, _ := expandListeners(ns.Difference(os).List()) + + if len(remove) > 0 { + ports := make([]*int64, 0, len(remove)) + for _, listener := range remove { + ports = append(ports, listener.LoadBalancerPort) + } + + deleteListenersOpts := &elb.DeleteLoadBalancerListenersInput{ + LoadBalancerName: aws.String(d.Id()), + LoadBalancerPorts: ports, + } + + log.Printf("[DEBUG] ELB Delete Listeners opts: %s", deleteListenersOpts) + _, err := elbconn.DeleteLoadBalancerListeners(deleteListenersOpts) + if err != nil { + return fmt.Errorf("Failure removing outdated ELB listeners: %s", err) + } + } + + if len(add) > 0 { + createListenersOpts := &elb.CreateLoadBalancerListenersInput{ + LoadBalancerName: aws.String(d.Id()), + Listeners: add, + } + + // Occasionally AWS will error with a 'duplicate listener', without any + // other listeners on the ELB. Retry here to eliminate that. + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + log.Printf("[DEBUG] ELB Create Listeners opts: %s", createListenersOpts) + if _, err := elbconn.CreateLoadBalancerListeners(createListenersOpts); err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "DuplicateListener" { + log.Printf("[DEBUG] Duplicate listener found for ELB (%s), retrying", d.Id()) + return resource.RetryableError(awsErr) + } + if awsErr.Code() == "CertificateNotFound" && strings.Contains(awsErr.Message(), "Server Certificate not found for the key: arn") { + log.Printf("[DEBUG] SSL Cert not found for given ARN, retrying") + return resource.RetryableError(awsErr) + } + } + + // Didn't recognize the error, so shouldn't retry. + return resource.NonRetryableError(err) + } + // Successful creation + return nil + }) + if err != nil { + return fmt.Errorf("Failure adding new or updated ELB listeners: %s", err) + } + } + + d.SetPartial("listener") + } + + // If we currently have instances, or did have instances, + // we want to figure out what to add and remove from the load + // balancer + if d.HasChange("instances") { + o, n := d.GetChange("instances") + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove := expandInstanceString(os.Difference(ns).List()) + add := expandInstanceString(ns.Difference(os).List()) + + if len(add) > 0 { + registerInstancesOpts := elb.RegisterInstancesWithLoadBalancerInput{ + LoadBalancerName: aws.String(d.Id()), + Instances: add, + } + + _, err := elbconn.RegisterInstancesWithLoadBalancer(®isterInstancesOpts) + if err != nil { + return fmt.Errorf("Failure registering instances with ELB: %s", err) + } + } + if len(remove) > 0 { + deRegisterInstancesOpts := elb.DeregisterInstancesFromLoadBalancerInput{ + LoadBalancerName: aws.String(d.Id()), + Instances: remove, + } + + _, err := elbconn.DeregisterInstancesFromLoadBalancer(&deRegisterInstancesOpts) + if err != nil { + return fmt.Errorf("Failure deregistering instances from ELB: %s", err) + } + } + + d.SetPartial("instances") + } + + if d.HasChange("cross_zone_load_balancing") || d.HasChange("idle_timeout") || d.HasChange("access_logs") { + attrs := elb.ModifyLoadBalancerAttributesInput{ + LoadBalancerName: aws.String(d.Get("name").(string)), + LoadBalancerAttributes: &elb.LoadBalancerAttributes{ + CrossZoneLoadBalancing: &elb.CrossZoneLoadBalancing{ + Enabled: aws.Bool(d.Get("cross_zone_load_balancing").(bool)), + }, + ConnectionSettings: &elb.ConnectionSettings{ + IdleTimeout: aws.Int64(int64(d.Get("idle_timeout").(int))), + }, + }, + } + + logs := d.Get("access_logs").([]interface{}) + if len(logs) == 1 { + l := logs[0].(map[string]interface{}) + accessLog := &elb.AccessLog{ + Enabled: aws.Bool(l["enabled"].(bool)), + EmitInterval: aws.Int64(int64(l["interval"].(int))), + S3BucketName: aws.String(l["bucket"].(string)), + } + + if l["bucket_prefix"] != "" { + accessLog.S3BucketPrefix = aws.String(l["bucket_prefix"].(string)) + } + + attrs.LoadBalancerAttributes.AccessLog = accessLog + } else if len(logs) == 0 { + // disable access logs + attrs.LoadBalancerAttributes.AccessLog = &elb.AccessLog{ + Enabled: aws.Bool(false), + } + } + + log.Printf("[DEBUG] ELB Modify Load Balancer Attributes Request: %#v", attrs) + _, err := elbconn.ModifyLoadBalancerAttributes(&attrs) + if err != nil { + return fmt.Errorf("Failure configuring ELB attributes: %s", err) + } + + d.SetPartial("cross_zone_load_balancing") + d.SetPartial("idle_timeout") + d.SetPartial("connection_draining_timeout") + } + + // We have to do these changes separately from everything else since + // they have some weird undocumented rules. You can't set the timeout + // without having connection draining to true, so we set that to true, + // set the timeout, then reset it to false if requested. + if d.HasChange("connection_draining") || d.HasChange("connection_draining_timeout") { + // We do timeout changes first since they require us to set draining + // to true for a hot second. + if d.HasChange("connection_draining_timeout") { + attrs := elb.ModifyLoadBalancerAttributesInput{ + LoadBalancerName: aws.String(d.Get("name").(string)), + LoadBalancerAttributes: &elb.LoadBalancerAttributes{ + ConnectionDraining: &elb.ConnectionDraining{ + Enabled: aws.Bool(true), + Timeout: aws.Int64(int64(d.Get("connection_draining_timeout").(int))), + }, + }, + } + + _, err := elbconn.ModifyLoadBalancerAttributes(&attrs) + if err != nil { + return fmt.Errorf("Failure configuring ELB attributes: %s", err) + } + + d.SetPartial("connection_draining_timeout") + } + + // Then we always set connection draining even if there is no change. + // This lets us reset to "false" if requested even with a timeout + // change. + attrs := elb.ModifyLoadBalancerAttributesInput{ + LoadBalancerName: aws.String(d.Get("name").(string)), + LoadBalancerAttributes: &elb.LoadBalancerAttributes{ + ConnectionDraining: &elb.ConnectionDraining{ + Enabled: aws.Bool(d.Get("connection_draining").(bool)), + }, + }, + } + + _, err := elbconn.ModifyLoadBalancerAttributes(&attrs) + if err != nil { + return fmt.Errorf("Failure configuring ELB attributes: %s", err) + } + + d.SetPartial("connection_draining") + } + + if d.HasChange("health_check") { + hc := d.Get("health_check").([]interface{}) + if len(hc) > 0 { + check := hc[0].(map[string]interface{}) + configureHealthCheckOpts := elb.ConfigureHealthCheckInput{ + LoadBalancerName: aws.String(d.Id()), + HealthCheck: &elb.HealthCheck{ + HealthyThreshold: aws.Int64(int64(check["healthy_threshold"].(int))), + UnhealthyThreshold: aws.Int64(int64(check["unhealthy_threshold"].(int))), + Interval: aws.Int64(int64(check["interval"].(int))), + Target: aws.String(check["target"].(string)), + Timeout: aws.Int64(int64(check["timeout"].(int))), + }, + } + _, err := elbconn.ConfigureHealthCheck(&configureHealthCheckOpts) + if err != nil { + return fmt.Errorf("Failure configuring health check for ELB: %s", err) + } + d.SetPartial("health_check") + } + } + + if d.HasChange("security_groups") { + groups := d.Get("security_groups").(*schema.Set).List() + + applySecurityGroupsOpts := elb.ApplySecurityGroupsToLoadBalancerInput{ + LoadBalancerName: aws.String(d.Id()), + SecurityGroups: expandStringList(groups), + } + + _, err := elbconn.ApplySecurityGroupsToLoadBalancer(&applySecurityGroupsOpts) + if err != nil { + return fmt.Errorf("Failure applying security groups to ELB: %s", err) + } + + d.SetPartial("security_groups") + } + + if d.HasChange("availability_zones") { + o, n := d.GetChange("availability_zones") + os := o.(*schema.Set) + ns := n.(*schema.Set) + + removed := expandStringList(os.Difference(ns).List()) + added := expandStringList(ns.Difference(os).List()) + + if len(added) > 0 { + enableOpts := &elb.EnableAvailabilityZonesForLoadBalancerInput{ + LoadBalancerName: aws.String(d.Id()), + AvailabilityZones: added, + } + + log.Printf("[DEBUG] ELB enable availability zones opts: %s", enableOpts) + _, err := elbconn.EnableAvailabilityZonesForLoadBalancer(enableOpts) + if err != nil { + return fmt.Errorf("Failure enabling ELB availability zones: %s", err) + } + } + + if len(removed) > 0 { + disableOpts := &elb.DisableAvailabilityZonesForLoadBalancerInput{ + LoadBalancerName: aws.String(d.Id()), + AvailabilityZones: removed, + } + + log.Printf("[DEBUG] ELB disable availability zones opts: %s", disableOpts) + _, err := elbconn.DisableAvailabilityZonesForLoadBalancer(disableOpts) + if err != nil { + return fmt.Errorf("Failure disabling ELB availability zones: %s", err) + } + } + + d.SetPartial("availability_zones") + } + + if d.HasChange("subnets") { + o, n := d.GetChange("subnets") + os := o.(*schema.Set) + ns := n.(*schema.Set) + + removed := expandStringList(os.Difference(ns).List()) + added := expandStringList(ns.Difference(os).List()) + + if len(removed) > 0 { + detachOpts := &elb.DetachLoadBalancerFromSubnetsInput{ + LoadBalancerName: aws.String(d.Id()), + Subnets: removed, + } + + log.Printf("[DEBUG] ELB detach subnets opts: %s", detachOpts) + _, err := elbconn.DetachLoadBalancerFromSubnets(detachOpts) + if err != nil { + return fmt.Errorf("Failure removing ELB subnets: %s", err) + } + } + + if len(added) > 0 { + attachOpts := &elb.AttachLoadBalancerToSubnetsInput{ + LoadBalancerName: aws.String(d.Id()), + Subnets: added, + } + + log.Printf("[DEBUG] ELB attach subnets opts: %s", attachOpts) + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := elbconn.AttachLoadBalancerToSubnets(attachOpts) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + // eventually consistent issue with removing a subnet in AZ1 and + // immediately adding a new one in the same AZ + if awsErr.Code() == "InvalidConfigurationRequest" && strings.Contains(awsErr.Message(), "cannot be attached to multiple subnets in the same AZ") { + log.Printf("[DEBUG] retrying az association") + return resource.RetryableError(awsErr) + } + } + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return fmt.Errorf("Failure adding ELB subnets: %s", err) + } + } + + d.SetPartial("subnets") + } + + if err := setTagsELB(elbconn, d); err != nil { + return err + } + + d.SetPartial("tags") + d.Partial(false) + + return resourceAwsElbRead(d, meta) +} + +func resourceAwsElbDelete(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + log.Printf("[INFO] Deleting ELB: %s", d.Id()) + + // Destroy the load balancer + deleteElbOpts := elb.DeleteLoadBalancerInput{ + LoadBalancerName: aws.String(d.Id()), + } + if _, err := elbconn.DeleteLoadBalancer(&deleteElbOpts); err != nil { + return fmt.Errorf("Error deleting ELB: %s", err) + } + + return nil +} + +func resourceAwsElbListenerHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%d-", m["instance_port"].(int))) + buf.WriteString(fmt.Sprintf("%s-", + strings.ToLower(m["instance_protocol"].(string)))) + buf.WriteString(fmt.Sprintf("%d-", m["lb_port"].(int))) + buf.WriteString(fmt.Sprintf("%s-", + strings.ToLower(m["lb_protocol"].(string)))) + + if v, ok := m["ssl_certificate_id"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + return hashcode.String(buf.String()) +} + +func isLoadBalancerNotFound(err error) bool { + elberr, ok := err.(awserr.Error) + return ok && elberr.Code() == "LoadBalancerNotFound" +} + +func sourceSGIdByName(meta interface{}, sg, vpcId string) (string, error) { + conn := meta.(*AWSClient).ec2conn + var filters []*ec2.Filter + var sgFilterName, sgFilterVPCID *ec2.Filter + sgFilterName = &ec2.Filter{ + Name: aws.String("group-name"), + Values: []*string{aws.String(sg)}, + } + + if vpcId != "" { + sgFilterVPCID = &ec2.Filter{ + Name: aws.String("vpc-id"), + Values: []*string{aws.String(vpcId)}, + } + } + + filters = append(filters, sgFilterName) + + if sgFilterVPCID != nil { + filters = append(filters, sgFilterVPCID) + } + + req := &ec2.DescribeSecurityGroupsInput{ + Filters: filters, + } + resp, err := conn.DescribeSecurityGroups(req) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok { + if ec2err.Code() == "InvalidSecurityGroupID.NotFound" || + ec2err.Code() == "InvalidGroup.NotFound" { + resp = nil + err = nil + } + } + + if err != nil { + log.Printf("Error on ELB SG look up: %s", err) + return "", err + } + } + + if resp == nil || len(resp.SecurityGroups) == 0 { + return "", fmt.Errorf("No security groups found for name %s and vpc id %s", sg, vpcId) + } + + group := resp.SecurityGroups[0] + return *group.GroupId, nil +} + +func validateAccessLogsInterval(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + + // Check if the value is either 5 or 60 (minutes). + if value != 5 && value != 60 { + errors = append(errors, fmt.Errorf( + "%q contains an invalid Access Logs interval \"%d\". "+ + "Valid intervals are either 5 or 60 (minutes).", + k, value)) + } + return +} + +func validateHeathCheckTarget(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + // Parse the Health Check target value. + matches := regexp.MustCompile(`\A(\w+):(\d+)(.+)?\z`).FindStringSubmatch(value) + + // Check if the value contains a valid target. + if matches == nil || len(matches) < 1 { + errors = append(errors, fmt.Errorf( + "%q contains an invalid Health Check: %s", + k, value)) + + // Invalid target? Return immediately, + // there is no need to collect other + // errors. + return + } + + // Check if the value contains a valid protocol. + if !isValidProtocol(matches[1]) { + errors = append(errors, fmt.Errorf( + "%q contains an invalid Health Check protocol %q. "+ + "Valid protocols are either %q, %q, %q, or %q.", + k, matches[1], "TCP", "SSL", "HTTP", "HTTPS")) + } + + // Check if the value contains a valid port range. + port, _ := strconv.Atoi(matches[2]) + if port < 1 || port > 65535 { + errors = append(errors, fmt.Errorf( + "%q contains an invalid Health Check target port \"%d\". "+ + "Valid port is in the range from 1 to 65535 inclusive.", + k, port)) + } + + switch strings.ToLower(matches[1]) { + case "tcp", "ssl": + // Check if value is in the form : for TCP and/or SSL. + if matches[3] != "" { + errors = append(errors, fmt.Errorf( + "%q cannot contain a path in the Health Check target: %s", + k, value)) + } + break + case "http", "https": + // Check if value is in the form :/ for HTTP and/or HTTPS. + if matches[3] == "" { + errors = append(errors, fmt.Errorf( + "%q must contain a path in the Health Check target: %s", + k, value)) + } + + // Cannot be longer than 1024 multibyte characters. + if len([]rune(matches[3])) > 1024 { + errors = append(errors, fmt.Errorf("%q cannot contain a path longer "+ + "than 1024 characters in the Health Check target: %s", + k, value)) + } + break + } + + return +} + +func validateListenerProtocol(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if !isValidProtocol(value) { + errors = append(errors, fmt.Errorf( + "%q contains an invalid Listener protocol %q. "+ + "Valid protocols are either %q, %q, %q, or %q.", + k, value, "TCP", "SSL", "HTTP", "HTTPS")) + } + return +} + +func isValidProtocol(s string) bool { + if s == "" { + return false + } + s = strings.ToLower(s) + + validProtocols := map[string]bool{ + "http": true, + "https": true, + "ssl": true, + "tcp": true, + } + + if _, ok := validProtocols[s]; !ok { + return false + } + + return true +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elb_attachment.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elb_attachment.go new file mode 100644 index 000000000..401544ad7 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_elb_attachment.go @@ -0,0 +1,121 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsElbAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsElbAttachmentCreate, + Read: resourceAwsElbAttachmentRead, + Delete: resourceAwsElbAttachmentDelete, + + Schema: map[string]*schema.Schema{ + "elb": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + + "instance": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + }, + } +} + +func resourceAwsElbAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + elbName := d.Get("elb").(string) + + instance := d.Get("instance").(string) + + registerInstancesOpts := elb.RegisterInstancesWithLoadBalancerInput{ + LoadBalancerName: aws.String(elbName), + Instances: []*elb.Instance{{InstanceId: aws.String(instance)}}, + } + + log.Printf("[INFO] registering instance %s with ELB %s", instance, elbName) + + _, err := elbconn.RegisterInstancesWithLoadBalancer(®isterInstancesOpts) + if err != nil { + return fmt.Errorf("Failure registering instances with ELB: %s", err) + } + + d.SetId(resource.PrefixedUniqueId(fmt.Sprintf("%s-", elbName))) + + return nil +} + +func resourceAwsElbAttachmentRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + elbName := d.Get("elb").(string) + + // only add the instance that was previously defined for this resource + expected := d.Get("instance").(string) + + // Retrieve the ELB properties to get a list of attachments + describeElbOpts := &elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{aws.String(elbName)}, + } + + resp, err := elbconn.DescribeLoadBalancers(describeElbOpts) + if err != nil { + if isLoadBalancerNotFound(err) { + log.Printf("[ERROR] ELB %s not found", elbName) + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving ELB: %s", err) + } + if len(resp.LoadBalancerDescriptions) != 1 { + log.Printf("[ERROR] Unable to find ELB: %s", resp.LoadBalancerDescriptions) + d.SetId("") + return nil + } + + // only set the instance Id that this resource manages + found := false + for _, i := range resp.LoadBalancerDescriptions[0].Instances { + if expected == *i.InstanceId { + d.Set("instance", expected) + found = true + } + } + + if !found { + log.Printf("[WARN] instance %s not found in elb attachments", expected) + d.SetId("") + } + + return nil +} + +func resourceAwsElbAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + elbName := d.Get("elb").(string) + + instance := d.Get("instance").(string) + + log.Printf("[INFO] Deleting Attachment %s from: %s", instance, elbName) + + deRegisterInstancesOpts := elb.DeregisterInstancesFromLoadBalancerInput{ + LoadBalancerName: aws.String(elbName), + Instances: []*elb.Instance{{InstanceId: aws.String(instance)}}, + } + + _, err := elbconn.DeregisterInstancesFromLoadBalancer(&deRegisterInstancesOpts) + if err != nil { + return fmt.Errorf("Failure deregistering instances from ELB: %s", err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_cluster.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_cluster.go new file mode 100644 index 000000000..f1a0d5512 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_cluster.go @@ -0,0 +1,843 @@ +package aws + +import ( + "log" + + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/emr" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsEMRCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEMRClusterCreate, + Read: resourceAwsEMRClusterRead, + Update: resourceAwsEMRClusterUpdate, + Delete: resourceAwsEMRClusterDelete, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "release_label": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "master_instance_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "core_instance_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "core_instance_count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + }, + "cluster_state": { + Type: schema.TypeString, + Computed: true, + }, + "log_uri": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "master_public_dns": { + Type: schema.TypeString, + Computed: true, + }, + "applications": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "termination_protection": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "keep_job_flow_alive_when_no_steps": { + Type: schema.TypeBool, + ForceNew: true, + Optional: true, + Computed: true, + }, + "ec2_attributes": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key_name": { + Type: schema.TypeString, + Optional: true, + }, + "subnet_id": { + Type: schema.TypeString, + Optional: true, + }, + "additional_master_security_groups": { + Type: schema.TypeString, + Optional: true, + }, + "additional_slave_security_groups": { + Type: schema.TypeString, + Optional: true, + }, + "emr_managed_master_security_group": { + Type: schema.TypeString, + Optional: true, + }, + "emr_managed_slave_security_group": { + Type: schema.TypeString, + Optional: true, + }, + "instance_profile": { + Type: schema.TypeString, + Required: true, + }, + "service_access_security_group": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "bootstrap_action": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "path": { + Type: schema.TypeString, + Required: true, + }, + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "tags": tagsSchema(), + "configurations": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "service_role": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "security_configuration": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "autoscaling_role": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "visible_to_all_users": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + }, + } +} + +func resourceAwsEMRClusterCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).emrconn + + log.Printf("[DEBUG] Creating EMR cluster") + masterInstanceType := d.Get("master_instance_type").(string) + coreInstanceType := masterInstanceType + if v, ok := d.GetOk("core_instance_type"); ok { + coreInstanceType = v.(string) + } + coreInstanceCount := d.Get("core_instance_count").(int) + + applications := d.Get("applications").(*schema.Set).List() + + keepJobFlowAliveWhenNoSteps := true + if v, ok := d.GetOk("keep_job_flow_alive_when_no_steps"); ok { + keepJobFlowAliveWhenNoSteps = v.(bool) + } + + terminationProtection := false + if v, ok := d.GetOk("termination_protection"); ok { + terminationProtection = v.(bool) + } + instanceConfig := &emr.JobFlowInstancesConfig{ + MasterInstanceType: aws.String(masterInstanceType), + SlaveInstanceType: aws.String(coreInstanceType), + InstanceCount: aws.Int64(int64(coreInstanceCount)), + + KeepJobFlowAliveWhenNoSteps: aws.Bool(keepJobFlowAliveWhenNoSteps), + TerminationProtected: aws.Bool(terminationProtection), + } + + var instanceProfile string + if a, ok := d.GetOk("ec2_attributes"); ok { + ec2Attributes := a.([]interface{}) + attributes := ec2Attributes[0].(map[string]interface{}) + + if v, ok := attributes["key_name"]; ok { + instanceConfig.Ec2KeyName = aws.String(v.(string)) + } + if v, ok := attributes["subnet_id"]; ok { + instanceConfig.Ec2SubnetId = aws.String(v.(string)) + } + if v, ok := attributes["subnet_id"]; ok { + instanceConfig.Ec2SubnetId = aws.String(v.(string)) + } + + if v, ok := attributes["additional_master_security_groups"]; ok { + strSlice := strings.Split(v.(string), ",") + for i, s := range strSlice { + strSlice[i] = strings.TrimSpace(s) + } + instanceConfig.AdditionalMasterSecurityGroups = aws.StringSlice(strSlice) + } + + if v, ok := attributes["additional_slave_security_groups"]; ok { + strSlice := strings.Split(v.(string), ",") + for i, s := range strSlice { + strSlice[i] = strings.TrimSpace(s) + } + instanceConfig.AdditionalSlaveSecurityGroups = aws.StringSlice(strSlice) + } + + if v, ok := attributes["emr_managed_master_security_group"]; ok { + instanceConfig.EmrManagedMasterSecurityGroup = aws.String(v.(string)) + } + if v, ok := attributes["emr_managed_slave_security_group"]; ok { + instanceConfig.EmrManagedSlaveSecurityGroup = aws.String(v.(string)) + } + + if len(strings.TrimSpace(attributes["instance_profile"].(string))) != 0 { + instanceProfile = strings.TrimSpace(attributes["instance_profile"].(string)) + } + + if v, ok := attributes["service_access_security_group"]; ok { + instanceConfig.ServiceAccessSecurityGroup = aws.String(v.(string)) + } + } + + emrApps := expandApplications(applications) + + params := &emr.RunJobFlowInput{ + Instances: instanceConfig, + Name: aws.String(d.Get("name").(string)), + Applications: emrApps, + + ReleaseLabel: aws.String(d.Get("release_label").(string)), + ServiceRole: aws.String(d.Get("service_role").(string)), + VisibleToAllUsers: aws.Bool(d.Get("visible_to_all_users").(bool)), + } + + if v, ok := d.GetOk("log_uri"); ok { + params.LogUri = aws.String(v.(string)) + } + if v, ok := d.GetOk("autoscaling_role"); ok { + params.AutoScalingRole = aws.String(v.(string)) + } + + if v, ok := d.GetOk("security_configuration"); ok { + params.SecurityConfiguration = aws.String(v.(string)) + } + + if instanceProfile != "" { + params.JobFlowRole = aws.String(instanceProfile) + } + + if v, ok := d.GetOk("bootstrap_action"); ok { + bootstrapActions := v.(*schema.Set).List() + params.BootstrapActions = expandBootstrapActions(bootstrapActions) + } + if v, ok := d.GetOk("tags"); ok { + tagsIn := v.(map[string]interface{}) + params.Tags = expandTags(tagsIn) + } + if v, ok := d.GetOk("configurations"); ok { + confUrl := v.(string) + params.Configurations = expandConfigures(confUrl) + } + + log.Printf("[DEBUG] EMR Cluster create options: %s", params) + resp, err := conn.RunJobFlow(params) + + if err != nil { + log.Printf("[ERROR] %s", err) + return err + } + + d.SetId(*resp.JobFlowId) + + log.Println( + "[INFO] Waiting for EMR Cluster to be available") + + stateConf := &resource.StateChangeConf{ + Pending: []string{"STARTING", "BOOTSTRAPPING"}, + Target: []string{"WAITING", "RUNNING"}, + Refresh: resourceAwsEMRClusterStateRefreshFunc(d, meta), + Timeout: 75 * time.Minute, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("[WARN] Error waiting for EMR Cluster state to be \"WAITING\" or \"RUNNING\": %s", err) + } + + return resourceAwsEMRClusterRead(d, meta) +} + +func resourceAwsEMRClusterRead(d *schema.ResourceData, meta interface{}) error { + emrconn := meta.(*AWSClient).emrconn + + req := &emr.DescribeClusterInput{ + ClusterId: aws.String(d.Id()), + } + + resp, err := emrconn.DescribeCluster(req) + if err != nil { + return fmt.Errorf("Error reading EMR cluster: %s", err) + } + + if resp.Cluster == nil { + log.Printf("[DEBUG] EMR Cluster (%s) not found", d.Id()) + d.SetId("") + return nil + } + + cluster := resp.Cluster + + if cluster.Status != nil { + if *cluster.Status.State == "TERMINATED" { + log.Printf("[DEBUG] EMR Cluster (%s) was TERMINATED already", d.Id()) + d.SetId("") + return nil + } + + if *cluster.Status.State == "TERMINATED_WITH_ERRORS" { + log.Printf("[DEBUG] EMR Cluster (%s) was TERMINATED_WITH_ERRORS already", d.Id()) + d.SetId("") + return nil + } + + d.Set("cluster_state", cluster.Status.State) + } + + instanceGroups, err := fetchAllEMRInstanceGroups(meta, d.Id()) + if err == nil { + coreGroup := findGroup(instanceGroups, "CORE") + if coreGroup != nil { + d.Set("core_instance_type", coreGroup.InstanceType) + } + } + + d.Set("name", cluster.Name) + d.Set("service_role", cluster.ServiceRole) + d.Set("security_configuration", cluster.SecurityConfiguration) + d.Set("autoscaling_role", cluster.AutoScalingRole) + d.Set("release_label", cluster.ReleaseLabel) + d.Set("log_uri", cluster.LogUri) + d.Set("master_public_dns", cluster.MasterPublicDnsName) + d.Set("visible_to_all_users", cluster.VisibleToAllUsers) + d.Set("tags", tagsToMapEMR(cluster.Tags)) + + if err := d.Set("applications", flattenApplications(cluster.Applications)); err != nil { + log.Printf("[ERR] Error setting EMR Applications for cluster (%s): %s", d.Id(), err) + } + + // Configurations is a JSON document. It's built with an expand method but a + // simple string should be returned as JSON + if err := d.Set("configurations", cluster.Configurations); err != nil { + log.Printf("[ERR] Error setting EMR configurations for cluster (%s): %s", d.Id(), err) + } + + if err := d.Set("ec2_attributes", flattenEc2Attributes(cluster.Ec2InstanceAttributes)); err != nil { + log.Printf("[ERR] Error setting EMR Ec2 Attributes: %s", err) + } + + respBootstraps, err := emrconn.ListBootstrapActions(&emr.ListBootstrapActionsInput{ + ClusterId: cluster.Id, + }) + if err != nil { + log.Printf("[WARN] Error listing bootstrap actions: %s", err) + } + + if err := d.Set("bootstrap_action", flattenBootstrapArguments(respBootstraps.BootstrapActions)); err != nil { + log.Printf("[WARN] Error setting Bootstrap Actions: %s", err) + } + + return nil +} + +func resourceAwsEMRClusterUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).emrconn + + d.Partial(true) + + if d.HasChange("core_instance_count") { + d.SetPartial("core_instance_count") + log.Printf("[DEBUG] Modify EMR cluster") + groups, err := fetchAllEMRInstanceGroups(meta, d.Id()) + if err != nil { + log.Printf("[DEBUG] Error finding all instance groups: %s", err) + return err + } + + coreInstanceCount := d.Get("core_instance_count").(int) + coreGroup := findGroup(groups, "CORE") + if coreGroup == nil { + return fmt.Errorf("[ERR] Error finding core group") + } + + params := &emr.ModifyInstanceGroupsInput{ + InstanceGroups: []*emr.InstanceGroupModifyConfig{ + { + InstanceGroupId: coreGroup.Id, + InstanceCount: aws.Int64(int64(coreInstanceCount) - 1), + }, + }, + } + _, errModify := conn.ModifyInstanceGroups(params) + if errModify != nil { + log.Printf("[ERROR] %s", errModify) + return errModify + } + + log.Printf("[DEBUG] Modify EMR Cluster done...") + + log.Println("[INFO] Waiting for EMR Cluster to be available") + + stateConf := &resource.StateChangeConf{ + Pending: []string{"STARTING", "BOOTSTRAPPING"}, + Target: []string{"WAITING", "RUNNING"}, + Refresh: resourceAwsEMRClusterStateRefreshFunc(d, meta), + Timeout: 40 * time.Minute, + MinTimeout: 10 * time.Second, + Delay: 5 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("[WARN] Error waiting for EMR Cluster state to be \"WAITING\" or \"RUNNING\" after modification: %s", err) + } + } + + if d.HasChange("visible_to_all_users") { + d.SetPartial("visible_to_all_users") + _, errModify := conn.SetVisibleToAllUsers(&emr.SetVisibleToAllUsersInput{ + JobFlowIds: []*string{aws.String(d.Id())}, + VisibleToAllUsers: aws.Bool(d.Get("visible_to_all_users").(bool)), + }) + if errModify != nil { + log.Printf("[ERROR] %s", errModify) + return errModify + } + } + + if d.HasChange("termination_protection") { + d.SetPartial("termination_protection") + _, errModify := conn.SetTerminationProtection(&emr.SetTerminationProtectionInput{ + JobFlowIds: []*string{aws.String(d.Id())}, + TerminationProtected: aws.Bool(d.Get("termination_protection").(bool)), + }) + if errModify != nil { + log.Printf("[ERROR] %s", errModify) + return errModify + } + } + + if err := setTagsEMR(conn, d); err != nil { + return err + } else { + d.SetPartial("tags") + } + + d.Partial(false) + + return resourceAwsEMRClusterRead(d, meta) +} + +func resourceAwsEMRClusterDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).emrconn + + req := &emr.TerminateJobFlowsInput{ + JobFlowIds: []*string{ + aws.String(d.Id()), + }, + } + + _, err := conn.TerminateJobFlows(req) + if err != nil { + log.Printf("[ERROR], %s", err) + return err + } + + err = resource.Retry(10*time.Minute, func() *resource.RetryError { + resp, err := conn.ListInstances(&emr.ListInstancesInput{ + ClusterId: aws.String(d.Id()), + }) + + if err != nil { + return resource.NonRetryableError(err) + } + + instanceCount := len(resp.Instances) + + if resp == nil || instanceCount == 0 { + log.Printf("[DEBUG] No instances found for EMR Cluster (%s)", d.Id()) + return nil + } + + // Collect instance status states, wait for all instances to be terminated + // before moving on + var terminated []string + for j, i := range resp.Instances { + if i.Status != nil { + if *i.Status.State == "TERMINATED" { + terminated = append(terminated, *i.Ec2InstanceId) + } + } else { + log.Printf("[DEBUG] Cluster instance (%d : %s) has no status", j, *i.Ec2InstanceId) + } + } + if len(terminated) == instanceCount { + log.Printf("[DEBUG] All (%d) EMR Cluster (%s) Instances terminated", instanceCount, d.Id()) + return nil + } + return resource.RetryableError(fmt.Errorf("[DEBUG] EMR Cluster (%s) has (%d) Instances remaining, retrying", d.Id(), len(resp.Instances))) + }) + + if err != nil { + log.Printf("[ERR] Error waiting for EMR Cluster (%s) Instances to drain", d.Id()) + } + + d.SetId("") + return nil +} + +func expandApplications(apps []interface{}) []*emr.Application { + appOut := make([]*emr.Application, 0, len(apps)) + + for _, appName := range expandStringList(apps) { + app := &emr.Application{ + Name: appName, + } + appOut = append(appOut, app) + } + return appOut +} + +func flattenApplications(apps []*emr.Application) []interface{} { + appOut := make([]interface{}, 0, len(apps)) + + for _, app := range apps { + appOut = append(appOut, *app.Name) + } + return appOut +} + +func flattenEc2Attributes(ia *emr.Ec2InstanceAttributes) []map[string]interface{} { + attrs := map[string]interface{}{} + result := make([]map[string]interface{}, 0) + + if ia.Ec2KeyName != nil { + attrs["key_name"] = *ia.Ec2KeyName + } + if ia.Ec2SubnetId != nil { + attrs["subnet_id"] = *ia.Ec2SubnetId + } + if ia.IamInstanceProfile != nil { + attrs["instance_profile"] = *ia.IamInstanceProfile + } + if ia.EmrManagedMasterSecurityGroup != nil { + attrs["emr_managed_master_security_group"] = *ia.EmrManagedMasterSecurityGroup + } + if ia.EmrManagedSlaveSecurityGroup != nil { + attrs["emr_managed_slave_security_group"] = *ia.EmrManagedSlaveSecurityGroup + } + + if len(ia.AdditionalMasterSecurityGroups) > 0 { + strs := aws.StringValueSlice(ia.AdditionalMasterSecurityGroups) + attrs["additional_master_security_groups"] = strings.Join(strs, ",") + } + if len(ia.AdditionalSlaveSecurityGroups) > 0 { + strs := aws.StringValueSlice(ia.AdditionalSlaveSecurityGroups) + attrs["additional_slave_security_groups"] = strings.Join(strs, ",") + } + + if ia.ServiceAccessSecurityGroup != nil { + attrs["service_access_security_group"] = *ia.ServiceAccessSecurityGroup + } + + result = append(result, attrs) + + return result +} + +func flattenBootstrapArguments(actions []*emr.Command) []map[string]interface{} { + result := make([]map[string]interface{}, 0) + + for _, b := range actions { + attrs := make(map[string]interface{}) + attrs["name"] = *b.Name + attrs["path"] = *b.ScriptPath + attrs["args"] = flattenStringList(b.Args) + result = append(result, attrs) + } + + return result +} + +func loadGroups(d *schema.ResourceData, meta interface{}) ([]*emr.InstanceGroup, error) { + emrconn := meta.(*AWSClient).emrconn + reqGrps := &emr.ListInstanceGroupsInput{ + ClusterId: aws.String(d.Id()), + } + + respGrps, errGrps := emrconn.ListInstanceGroups(reqGrps) + if errGrps != nil { + return nil, fmt.Errorf("Error reading EMR cluster: %s", errGrps) + } + return respGrps.InstanceGroups, nil +} + +func findGroup(grps []*emr.InstanceGroup, typ string) *emr.InstanceGroup { + for _, grp := range grps { + if grp.InstanceGroupType != nil { + if *grp.InstanceGroupType == typ { + return grp + } + } + } + return nil +} + +func expandTags(m map[string]interface{}) []*emr.Tag { + var result []*emr.Tag + for k, v := range m { + result = append(result, &emr.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + }) + } + + return result +} + +func tagsToMapEMR(ts []*emr.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + result[*t.Key] = *t.Value + } + + return result +} + +func diffTagsEMR(oldTags, newTags []*emr.Tag) ([]*emr.Tag, []*emr.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*emr.Tag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + return expandTags(create), remove +} + +func setTagsEMR(conn *emr.EMR, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsEMR(expandTags(o), expandTags(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %s", remove) + k := make([]*string, len(remove), len(remove)) + for i, t := range remove { + k[i] = t.Key + } + + _, err := conn.RemoveTags(&emr.RemoveTagsInput{ + ResourceId: aws.String(d.Id()), + TagKeys: k, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %s", create) + _, err := conn.AddTags(&emr.AddTagsInput{ + ResourceId: aws.String(d.Id()), + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} + +func expandBootstrapActions(bootstrapActions []interface{}) []*emr.BootstrapActionConfig { + actionsOut := []*emr.BootstrapActionConfig{} + + for _, raw := range bootstrapActions { + actionAttributes := raw.(map[string]interface{}) + actionName := actionAttributes["name"].(string) + actionPath := actionAttributes["path"].(string) + actionArgs := actionAttributes["args"].([]interface{}) + + action := &emr.BootstrapActionConfig{ + Name: aws.String(actionName), + ScriptBootstrapAction: &emr.ScriptBootstrapActionConfig{ + Path: aws.String(actionPath), + Args: expandStringList(actionArgs), + }, + } + actionsOut = append(actionsOut, action) + } + + return actionsOut +} + +func expandConfigures(input string) []*emr.Configuration { + configsOut := []*emr.Configuration{} + if strings.HasPrefix(input, "http") { + if err := readHttpJson(input, &configsOut); err != nil { + log.Printf("[ERR] Error reading HTTP JSON: %s", err) + } + } else if strings.HasSuffix(input, ".json") { + if err := readLocalJson(input, &configsOut); err != nil { + log.Printf("[ERR] Error reading local JSON: %s", err) + } + } else { + if err := readBodyJson(input, &configsOut); err != nil { + log.Printf("[ERR] Error reading body JSON: %s", err) + } + } + log.Printf("[DEBUG] Expanded EMR Configurations %s", configsOut) + + return configsOut +} + +func readHttpJson(url string, target interface{}) error { + r, err := http.Get(url) + if err != nil { + return err + } + defer r.Body.Close() + + return json.NewDecoder(r.Body).Decode(target) +} + +func readLocalJson(localFile string, target interface{}) error { + file, e := ioutil.ReadFile(localFile) + if e != nil { + log.Printf("[ERROR] %s", e) + return e + } + + return json.Unmarshal(file, target) +} + +func readBodyJson(body string, target interface{}) error { + log.Printf("[DEBUG] Raw Body %s\n", body) + err := json.Unmarshal([]byte(body), target) + if err != nil { + log.Printf("[ERROR] parsing JSON %s", err) + return err + } + return nil +} + +func resourceAwsEMRClusterStateRefreshFunc(d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + conn := meta.(*AWSClient).emrconn + + log.Printf("[INFO] Reading EMR Cluster Information: %s", d.Id()) + params := &emr.DescribeClusterInput{ + ClusterId: aws.String(d.Id()), + } + + resp, err := conn.DescribeCluster(params) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if "ClusterNotFound" == awsErr.Code() { + return 42, "destroyed", nil + } + } + log.Printf("[WARN] Error on retrieving EMR Cluster (%s) when waiting: %s", d.Id(), err) + return nil, "", err + } + + emrc := resp.Cluster + + if emrc == nil { + return 42, "destroyed", nil + } + + if resp.Cluster.Status != nil { + log.Printf("[DEBUG] EMR Cluster status (%s): %s", d.Id(), *resp.Cluster.Status) + } + + status := emrc.Status + if *status.State == "TERMINATING" { + reason := *status.StateChangeReason + return emrc, *status.State, fmt.Errorf("EMR Cluster is terminating. %s: %s", + *reason.Code, *reason.Message) + } + + return emrc, *status.State, nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_instance_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_instance_group.go new file mode 100644 index 000000000..66750b48a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_instance_group.go @@ -0,0 +1,316 @@ +package aws + +import ( + "errors" + "log" + "time" + + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/emr" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +var emrInstanceGroupNotFound = errors.New("No matching EMR Instance Group") + +func resourceAwsEMRInstanceGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEMRInstanceGroupCreate, + Read: resourceAwsEMRInstanceGroupRead, + Update: resourceAwsEMRInstanceGroupUpdate, + Delete: resourceAwsEMRInstanceGroupDelete, + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "instance_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "instance_count": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + }, + "running_instance_count": { + Type: schema.TypeInt, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "ebs_optimized": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "ebs_config": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "iops": { + Type: schema.TypeInt, + Optional: true, + }, + "size": { + Type: schema.TypeInt, + Required: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAwsEmrEbsVolumeType, + }, + "volumes_per_instance": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + }, + } +} + +// Populates an emr.EbsConfiguration struct +func readEmrEBSConfig(d *schema.ResourceData) *emr.EbsConfiguration { + result := &emr.EbsConfiguration{} + if v, ok := d.GetOk("ebs_optimized"); ok { + result.EbsOptimized = aws.Bool(v.(bool)) + } + + ebsConfigs := make([]*emr.EbsBlockDeviceConfig, 0) + if rawConfig, ok := d.GetOk("ebs_config"); ok { + configList := rawConfig.(*schema.Set).List() + for _, config := range configList { + conf := config.(map[string]interface{}) + ebs := &emr.EbsBlockDeviceConfig{} + volumeSpec := &emr.VolumeSpecification{ + SizeInGB: aws.Int64(int64(conf["size"].(int))), + VolumeType: aws.String(conf["type"].(string)), + } + if v, ok := conf["iops"].(int); ok && v != 0 { + volumeSpec.Iops = aws.Int64(int64(v)) + } + if v, ok := conf["volumes_per_instance"].(int); ok && v != 0 { + ebs.VolumesPerInstance = aws.Int64(int64(v)) + } + ebs.VolumeSpecification = volumeSpec + ebsConfigs = append(ebsConfigs, ebs) + } + } + result.EbsBlockDeviceConfigs = ebsConfigs + return result +} + +func resourceAwsEMRInstanceGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).emrconn + + clusterId := d.Get("cluster_id").(string) + instanceType := d.Get("instance_type").(string) + instanceCount := d.Get("instance_count").(int) + groupName := d.Get("name").(string) + + ebsConfig := readEmrEBSConfig(d) + + params := &emr.AddInstanceGroupsInput{ + InstanceGroups: []*emr.InstanceGroupConfig{ + { + InstanceRole: aws.String("TASK"), + InstanceCount: aws.Int64(int64(instanceCount)), + InstanceType: aws.String(instanceType), + Name: aws.String(groupName), + EbsConfiguration: ebsConfig, + }, + }, + JobFlowId: aws.String(clusterId), + } + + log.Printf("[DEBUG] Creating EMR task group params: %s", params) + resp, err := conn.AddInstanceGroups(params) + if err != nil { + return err + } + + log.Printf("[DEBUG] Created EMR task group finished: %#v", resp) + if resp == nil || len(resp.InstanceGroupIds) == 0 { + return fmt.Errorf("Error creating instance groups: no instance group returned") + } + d.SetId(*resp.InstanceGroupIds[0]) + + return nil +} + +func resourceAwsEMRInstanceGroupRead(d *schema.ResourceData, meta interface{}) error { + group, err := fetchEMRInstanceGroup(meta, d.Get("cluster_id").(string), d.Id()) + if err != nil { + switch err { + case emrInstanceGroupNotFound: + log.Printf("[DEBUG] EMR Instance Group (%s) not found, removing", d.Id()) + d.SetId("") + return nil + default: + return err + } + } + + // Guard against the chance of fetchEMRInstanceGroup returning nil group but + // not a emrInstanceGroupNotFound error + if group == nil { + log.Printf("[DEBUG] EMR Instance Group (%s) not found, removing", d.Id()) + d.SetId("") + return nil + } + + d.Set("name", group.Name) + d.Set("instance_count", group.RequestedInstanceCount) + d.Set("running_instance_count", group.RunningInstanceCount) + d.Set("instance_type", group.InstanceType) + if group.Status != nil && group.Status.State != nil { + d.Set("status", group.Status.State) + } + + return nil +} + +func fetchAllEMRInstanceGroups(meta interface{}, clusterId string) ([]*emr.InstanceGroup, error) { + conn := meta.(*AWSClient).emrconn + req := &emr.ListInstanceGroupsInput{ + ClusterId: aws.String(clusterId), + } + + var groups []*emr.InstanceGroup + marker := aws.String("intitial") + for marker != nil { + log.Printf("[DEBUG] EMR Cluster Instance Marker: %s", *marker) + respGrps, errGrps := conn.ListInstanceGroups(req) + if errGrps != nil { + return nil, fmt.Errorf("[ERR] Error reading EMR cluster (%s): %s", clusterId, errGrps) + } + if respGrps == nil { + return nil, fmt.Errorf("[ERR] Error reading EMR Instance Group for cluster (%s)", clusterId) + } + + if respGrps.InstanceGroups != nil { + for _, g := range respGrps.InstanceGroups { + groups = append(groups, g) + } + } else { + log.Printf("[DEBUG] EMR Instance Group list was empty") + } + marker = respGrps.Marker + } + + if len(groups) == 0 { + return nil, fmt.Errorf("[WARN] No instance groups found for EMR Cluster (%s)", clusterId) + } + + return groups, nil +} + +func fetchEMRInstanceGroup(meta interface{}, clusterId, groupId string) (*emr.InstanceGroup, error) { + groups, err := fetchAllEMRInstanceGroups(meta, clusterId) + if err != nil { + return nil, err + } + + var group *emr.InstanceGroup + for _, ig := range groups { + if groupId == *ig.Id { + group = ig + break + } + } + + if group != nil { + return group, nil + } + + return nil, emrInstanceGroupNotFound +} + +func resourceAwsEMRInstanceGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).emrconn + + log.Printf("[DEBUG] Modify EMR task group") + instanceCount := d.Get("instance_count").(int) + + params := &emr.ModifyInstanceGroupsInput{ + InstanceGroups: []*emr.InstanceGroupModifyConfig{ + { + InstanceGroupId: aws.String(d.Id()), + InstanceCount: aws.Int64(int64(instanceCount)), + }, + }, + } + + _, err := conn.ModifyInstanceGroups(params) + if err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PROVISIONING", "BOOTSTRAPPING", "RESIZING"}, + Target: []string{"RUNNING"}, + Refresh: instanceGroupStateRefresh(conn, d.Get("cluster_id").(string), d.Id()), + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for instance (%s) to terminate: %s", d.Id(), err) + } + + return resourceAwsEMRInstanceGroupRead(d, meta) +} + +func instanceGroupStateRefresh(meta interface{}, clusterID, igID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + group, err := fetchEMRInstanceGroup(meta, clusterID, igID) + if err != nil { + return nil, "Not Found", err + } + + if group.Status == nil || group.Status.State == nil { + log.Printf("[WARN] ERM Instance Group found, but without state") + return nil, "Undefined", fmt.Errorf("Undefined EMR Cluster Instance Group state") + } + + return group, *group.Status.State, nil + } +} + +func resourceAwsEMRInstanceGroupDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARN] AWS EMR Instance Group does not support DELETE; resizing cluster to zero before removing from state") + conn := meta.(*AWSClient).emrconn + params := &emr.ModifyInstanceGroupsInput{ + InstanceGroups: []*emr.InstanceGroupModifyConfig{ + { + InstanceGroupId: aws.String(d.Id()), + InstanceCount: aws.Int64(0), + }, + }, + } + + _, err := conn.ModifyInstanceGroups(params) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_security_configuration.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_security_configuration.go new file mode 100644 index 000000000..0002d5d29 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_emr_security_configuration.go @@ -0,0 +1,132 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/emr" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsEMRSecurityConfiguration() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEmrSecurityConfigurationCreate, + Read: resourceAwsEmrSecurityConfigurationRead, + Delete: resourceAwsEmrSecurityConfigurationDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 10280 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 10280 characters", k)) + } + return + }, + }, + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 10000 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 10000 characters, name is limited to 10280", k)) + } + return + }, + }, + + "configuration": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateJsonString, + }, + + "creation_date": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsEmrSecurityConfigurationCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).emrconn + + var emrSCName string + if v, ok := d.GetOk("name"); ok { + emrSCName = v.(string) + } else { + if v, ok := d.GetOk("name_prefix"); ok { + emrSCName = resource.PrefixedUniqueId(v.(string)) + } else { + emrSCName = resource.PrefixedUniqueId("tf-emr-sc-") + } + } + + resp, err := conn.CreateSecurityConfiguration(&emr.CreateSecurityConfigurationInput{ + Name: aws.String(emrSCName), + SecurityConfiguration: aws.String(d.Get("configuration").(string)), + }) + + if err != nil { + return err + } + + d.SetId(*resp.Name) + return resourceAwsEmrSecurityConfigurationRead(d, meta) +} + +func resourceAwsEmrSecurityConfigurationRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).emrconn + + resp, err := conn.DescribeSecurityConfiguration(&emr.DescribeSecurityConfigurationInput{ + Name: aws.String(d.Id()), + }) + if err != nil { + if isAWSErr(err, "InvalidRequestException", "does not exist") { + log.Printf("[WARN] EMR Security Configuraiton (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return err + } + + d.Set("creation_date", resp.CreationDateTime) + d.Set("name", resp.Name) + d.Set("configuration", resp.SecurityConfiguration) + + return nil +} + +func resourceAwsEmrSecurityConfigurationDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).emrconn + + _, err := conn.DeleteSecurityConfiguration(&emr.DeleteSecurityConfigurationInput{ + Name: aws.String(d.Id()), + }) + if err != nil { + if isAWSErr(err, "InvalidRequestException", "does not exist") { + d.SetId("") + return nil + } + return err + } + d.SetId("") + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_flow_log.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_flow_log.go new file mode 100644 index 000000000..a95a016a8 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_flow_log.go @@ -0,0 +1,169 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsFlowLog() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLogFlowCreate, + Read: resourceAwsLogFlowRead, + Delete: resourceAwsLogFlowDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "iam_role_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "log_group_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"subnet_id", "eni_id"}, + }, + + "subnet_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"eni_id", "vpc_id"}, + }, + + "eni_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"subnet_id", "vpc_id"}, + }, + + "traffic_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsLogFlowCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + types := []struct { + ID string + Type string + }{ + {ID: d.Get("vpc_id").(string), Type: "VPC"}, + {ID: d.Get("subnet_id").(string), Type: "Subnet"}, + {ID: d.Get("eni_id").(string), Type: "NetworkInterface"}, + } + + var resourceId string + var resourceType string + for _, t := range types { + if t.ID != "" { + resourceId = t.ID + resourceType = t.Type + break + } + } + + if resourceId == "" || resourceType == "" { + return fmt.Errorf("Error: Flow Logs require either a VPC, Subnet, or ENI ID") + } + + opts := &ec2.CreateFlowLogsInput{ + DeliverLogsPermissionArn: aws.String(d.Get("iam_role_arn").(string)), + LogGroupName: aws.String(d.Get("log_group_name").(string)), + ResourceIds: []*string{aws.String(resourceId)}, + ResourceType: aws.String(resourceType), + TrafficType: aws.String(d.Get("traffic_type").(string)), + } + + log.Printf( + "[DEBUG] Flow Log Create configuration: %s", opts) + resp, err := conn.CreateFlowLogs(opts) + if err != nil { + return fmt.Errorf("Error creating Flow Log for (%s), error: %s", resourceId, err) + } + + if len(resp.FlowLogIds) > 1 { + return fmt.Errorf("Error: multiple Flow Logs created for (%s)", resourceId) + } + + d.SetId(*resp.FlowLogIds[0]) + + return resourceAwsLogFlowRead(d, meta) +} + +func resourceAwsLogFlowRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + opts := &ec2.DescribeFlowLogsInput{ + FlowLogIds: []*string{aws.String(d.Id())}, + } + + resp, err := conn.DescribeFlowLogs(opts) + if err != nil { + log.Printf("[WARN] Error describing Flow Logs for id (%s)", d.Id()) + d.SetId("") + return nil + } + + if len(resp.FlowLogs) == 0 { + log.Printf("[WARN] No Flow Logs found for id (%s)", d.Id()) + d.SetId("") + return nil + } + + fl := resp.FlowLogs[0] + d.Set("traffic_type", fl.TrafficType) + d.Set("log_group_name", fl.LogGroupName) + d.Set("iam_role_arn", fl.DeliverLogsPermissionArn) + + var resourceKey string + if strings.HasPrefix(*fl.ResourceId, "vpc-") { + resourceKey = "vpc_id" + } else if strings.HasPrefix(*fl.ResourceId, "subnet-") { + resourceKey = "subnet_id" + } else if strings.HasPrefix(*fl.ResourceId, "eni-") { + resourceKey = "eni_id" + } + if resourceKey != "" { + d.Set(resourceKey, fl.ResourceId) + } + + return nil +} + +func resourceAwsLogFlowDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + log.Printf( + "[DEBUG] Flow Log Destroy: %s", d.Id()) + _, err := conn.DeleteFlowLogs(&ec2.DeleteFlowLogsInput{ + FlowLogIds: []*string{aws.String(d.Id())}, + }) + + if err != nil { + return fmt.Errorf("[WARN] Error deleting Flow Log with ID (%s), error: %s", d.Id(), err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glacier_vault.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glacier_vault.go new file mode 100644 index 000000000..64ac267e8 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_glacier_vault.go @@ -0,0 +1,416 @@ +package aws + +import ( + "errors" + "fmt" + "log" + "regexp" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/glacier" +) + +func resourceAwsGlacierVault() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsGlacierVaultCreate, + Read: resourceAwsGlacierVaultRead, + Update: resourceAwsGlacierVaultUpdate, + Delete: resourceAwsGlacierVaultDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[.0-9A-Za-z-_]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only alphanumeric characters, hyphens, underscores, and periods are allowed in %q", k)) + } + if len(value) > 255 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 255 characters", k)) + } + return + }, + }, + + "location": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "access_policy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateJsonString, + StateFunc: func(v interface{}) string { + json, _ := normalizeJsonString(v) + return json + }, + }, + + "notification": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "events": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "sns_topic": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsGlacierVaultCreate(d *schema.ResourceData, meta interface{}) error { + glacierconn := meta.(*AWSClient).glacierconn + + input := &glacier.CreateVaultInput{ + VaultName: aws.String(d.Get("name").(string)), + } + + out, err := glacierconn.CreateVault(input) + if err != nil { + return fmt.Errorf("Error creating Glacier Vault: %s", err) + } + + d.SetId(d.Get("name").(string)) + d.Set("location", *out.Location) + + return resourceAwsGlacierVaultUpdate(d, meta) +} + +func resourceAwsGlacierVaultUpdate(d *schema.ResourceData, meta interface{}) error { + glacierconn := meta.(*AWSClient).glacierconn + + if err := setGlacierVaultTags(glacierconn, d); err != nil { + return err + } + + if d.HasChange("access_policy") { + if err := resourceAwsGlacierVaultPolicyUpdate(glacierconn, d); err != nil { + return err + } + } + + if d.HasChange("notification") { + if err := resourceAwsGlacierVaultNotificationUpdate(glacierconn, d); err != nil { + return err + } + } + + return resourceAwsGlacierVaultRead(d, meta) +} + +func resourceAwsGlacierVaultRead(d *schema.ResourceData, meta interface{}) error { + glacierconn := meta.(*AWSClient).glacierconn + + input := &glacier.DescribeVaultInput{ + VaultName: aws.String(d.Id()), + } + + out, err := glacierconn.DescribeVault(input) + if err != nil { + return fmt.Errorf("Error reading Glacier Vault: %s", err.Error()) + } + + awsClient := meta.(*AWSClient) + d.Set("name", out.VaultName) + d.Set("arn", out.VaultARN) + + location, err := buildGlacierVaultLocation(awsClient.accountid, d.Id()) + if err != nil { + return err + } + d.Set("location", location) + + tags, err := getGlacierVaultTags(glacierconn, d.Id()) + if err != nil { + return err + } + d.Set("tags", tags) + + log.Printf("[DEBUG] Getting the access_policy for Vault %s", d.Id()) + pol, err := glacierconn.GetVaultAccessPolicy(&glacier.GetVaultAccessPolicyInput{ + VaultName: aws.String(d.Id()), + }) + + if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" { + d.Set("access_policy", "") + } else if pol != nil { + policy, err := normalizeJsonString(*pol.Policy.Policy) + if err != nil { + return errwrap.Wrapf("access policy contains an invalid JSON: {{err}}", err) + } + d.Set("access_policy", policy) + } else { + return err + } + + notifications, err := getGlacierVaultNotification(glacierconn, d.Id()) + if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" { + d.Set("notification", "") + } else if pol != nil { + d.Set("notification", notifications) + } else { + return err + } + + return nil +} + +func resourceAwsGlacierVaultDelete(d *schema.ResourceData, meta interface{}) error { + glacierconn := meta.(*AWSClient).glacierconn + + log.Printf("[DEBUG] Glacier Delete Vault: %s", d.Id()) + _, err := glacierconn.DeleteVault(&glacier.DeleteVaultInput{ + VaultName: aws.String(d.Id()), + }) + if err != nil { + return fmt.Errorf("Error deleting Glacier Vault: %s", err.Error()) + } + return nil +} + +func resourceAwsGlacierVaultNotificationUpdate(glacierconn *glacier.Glacier, d *schema.ResourceData) error { + + if v, ok := d.GetOk("notification"); ok { + settings := v.([]interface{}) + + if len(settings) > 1 { + return fmt.Errorf("Only a single Notification Block is allowed for Glacier Vault") + } else if len(settings) == 1 { + s := settings[0].(map[string]interface{}) + var events []*string + for _, id := range s["events"].(*schema.Set).List() { + events = append(events, aws.String(id.(string))) + } + + _, err := glacierconn.SetVaultNotifications(&glacier.SetVaultNotificationsInput{ + VaultName: aws.String(d.Id()), + VaultNotificationConfig: &glacier.VaultNotificationConfig{ + SNSTopic: aws.String(s["sns_topic"].(string)), + Events: events, + }, + }) + + if err != nil { + return fmt.Errorf("Error Updating Glacier Vault Notifications: %s", err.Error()) + } + } + } else { + _, err := glacierconn.DeleteVaultNotifications(&glacier.DeleteVaultNotificationsInput{ + VaultName: aws.String(d.Id()), + }) + + if err != nil { + return fmt.Errorf("Error Removing Glacier Vault Notifications: %s", err.Error()) + } + + } + + return nil +} + +func resourceAwsGlacierVaultPolicyUpdate(glacierconn *glacier.Glacier, d *schema.ResourceData) error { + vaultName := d.Id() + policyContents := d.Get("access_policy").(string) + + policy := &glacier.VaultAccessPolicy{ + Policy: aws.String(policyContents), + } + + if policyContents != "" { + log.Printf("[DEBUG] Glacier Vault: %s, put policy", vaultName) + + _, err := glacierconn.SetVaultAccessPolicy(&glacier.SetVaultAccessPolicyInput{ + VaultName: aws.String(d.Id()), + Policy: policy, + }) + + if err != nil { + return fmt.Errorf("Error putting Glacier Vault policy: %s", err.Error()) + } + } else { + log.Printf("[DEBUG] Glacier Vault: %s, delete policy: %s", vaultName, policy) + _, err := glacierconn.DeleteVaultAccessPolicy(&glacier.DeleteVaultAccessPolicyInput{ + VaultName: aws.String(d.Id()), + }) + + if err != nil { + return fmt.Errorf("Error deleting Glacier Vault policy: %s", err.Error()) + } + } + + return nil +} + +func setGlacierVaultTags(conn *glacier.Glacier, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffGlacierVaultTags(mapGlacierVaultTags(o), mapGlacierVaultTags(n)) + + // Set tags + if len(remove) > 0 { + tagsToRemove := &glacier.RemoveTagsFromVaultInput{ + VaultName: aws.String(d.Id()), + TagKeys: glacierStringsToPointyString(remove), + } + + log.Printf("[DEBUG] Removing tags: from %s", d.Id()) + _, err := conn.RemoveTagsFromVault(tagsToRemove) + if err != nil { + return err + } + } + if len(create) > 0 { + tagsToAdd := &glacier.AddTagsToVaultInput{ + VaultName: aws.String(d.Id()), + Tags: glacierVaultTagsFromMap(create), + } + + log.Printf("[DEBUG] Creating tags: for %s", d.Id()) + _, err := conn.AddTagsToVault(tagsToAdd) + if err != nil { + return err + } + } + } + + return nil +} + +func mapGlacierVaultTags(m map[string]interface{}) map[string]string { + results := make(map[string]string) + for k, v := range m { + results[k] = v.(string) + } + + return results +} + +func diffGlacierVaultTags(oldTags, newTags map[string]string) (map[string]string, []string) { + + create := make(map[string]string) + for k, v := range newTags { + create[k] = v + } + + // Build the list of what to remove + var remove []string + for k, v := range oldTags { + old, ok := create[k] + if !ok || old != v { + // Delete it! + remove = append(remove, k) + } + } + + return create, remove +} + +func getGlacierVaultTags(glacierconn *glacier.Glacier, vaultName string) (map[string]string, error) { + request := &glacier.ListTagsForVaultInput{ + VaultName: aws.String(vaultName), + } + + log.Printf("[DEBUG] Getting the tags: for %s", vaultName) + response, err := glacierconn.ListTagsForVault(request) + if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "NoSuchTagSet" { + return map[string]string{}, nil + } else if err != nil { + return nil, err + } + + return glacierVaultTagsToMap(response.Tags), nil +} + +func glacierVaultTagsToMap(responseTags map[string]*string) map[string]string { + results := make(map[string]string, len(responseTags)) + for k, v := range responseTags { + results[k] = *v + } + + return results +} + +func glacierVaultTagsFromMap(responseTags map[string]string) map[string]*string { + results := make(map[string]*string, len(responseTags)) + for k, v := range responseTags { + results[k] = aws.String(v) + } + + return results +} + +func glacierStringsToPointyString(s []string) []*string { + results := make([]*string, len(s)) + for i, x := range s { + results[i] = aws.String(x) + } + + return results +} + +func glacierPointersToStringList(pointers []*string) []interface{} { + list := make([]interface{}, len(pointers)) + for i, v := range pointers { + list[i] = *v + } + return list +} + +func buildGlacierVaultLocation(accountId, vaultName string) (string, error) { + if accountId == "" { + return "", errors.New("AWS account ID unavailable - failed to construct Vault location") + } + return fmt.Sprintf("/" + accountId + "/vaults/" + vaultName), nil +} + +func getGlacierVaultNotification(glacierconn *glacier.Glacier, vaultName string) ([]map[string]interface{}, error) { + request := &glacier.GetVaultNotificationsInput{ + VaultName: aws.String(vaultName), + } + + response, err := glacierconn.GetVaultNotifications(request) + if err != nil { + return nil, fmt.Errorf("Error reading Glacier Vault Notifications: %s", err.Error()) + } + + notifications := make(map[string]interface{}, 0) + + log.Print("[DEBUG] Flattening Glacier Vault Notifications") + + notifications["events"] = schema.NewSet(schema.HashString, glacierPointersToStringList(response.VaultNotificationConfig.Events)) + notifications["sns_topic"] = *response.VaultNotificationConfig.SNSTopic + + return []map[string]interface{}{notifications}, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_access_key.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_access_key.go new file mode 100644 index 000000000..515069c03 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_access_key.go @@ -0,0 +1,178 @@ +package aws + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + + "github.com/hashicorp/terraform/helper/encryption" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamAccessKey() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamAccessKeyCreate, + Read: resourceAwsIamAccessKeyRead, + Delete: resourceAwsIamAccessKeyDelete, + + Schema: map[string]*schema.Schema{ + "user": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "status": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "secret": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Deprecated: "Please use a PGP key to encrypt", + }, + "ses_smtp_password": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "pgp_key": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "key_fingerprint": { + Type: schema.TypeString, + Computed: true, + }, + "encrypted_secret": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsIamAccessKeyCreate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + request := &iam.CreateAccessKeyInput{ + UserName: aws.String(d.Get("user").(string)), + } + + createResp, err := iamconn.CreateAccessKey(request) + if err != nil { + return fmt.Errorf( + "Error creating access key for user %s: %s", + *request.UserName, + err, + ) + } + + d.SetId(*createResp.AccessKey.AccessKeyId) + + if createResp.AccessKey == nil || createResp.AccessKey.SecretAccessKey == nil { + return fmt.Errorf("[ERR] CreateAccessKey response did not contain a Secret Access Key as expected") + } + + if v, ok := d.GetOk("pgp_key"); ok { + pgpKey := v.(string) + encryptionKey, err := encryption.RetrieveGPGKey(pgpKey) + if err != nil { + return err + } + fingerprint, encrypted, err := encryption.EncryptValue(encryptionKey, *createResp.AccessKey.SecretAccessKey, "IAM Access Key Secret") + if err != nil { + return err + } + + d.Set("key_fingerprint", fingerprint) + d.Set("encrypted_secret", encrypted) + } else { + if err := d.Set("secret", createResp.AccessKey.SecretAccessKey); err != nil { + return err + } + } + + d.Set("ses_smtp_password", + sesSmtpPasswordFromSecretKey(createResp.AccessKey.SecretAccessKey)) + + return resourceAwsIamAccessKeyReadResult(d, &iam.AccessKeyMetadata{ + AccessKeyId: createResp.AccessKey.AccessKeyId, + CreateDate: createResp.AccessKey.CreateDate, + Status: createResp.AccessKey.Status, + UserName: createResp.AccessKey.UserName, + }) +} + +func resourceAwsIamAccessKeyRead(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + request := &iam.ListAccessKeysInput{ + UserName: aws.String(d.Get("user").(string)), + } + + getResp, err := iamconn.ListAccessKeys(request) + if err != nil { + if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { // XXX TEST ME + // the user does not exist, so the key can't exist. + d.SetId("") + return nil + } + return fmt.Errorf("Error reading IAM acces key: %s", err) + } + + for _, key := range getResp.AccessKeyMetadata { + if key.AccessKeyId != nil && *key.AccessKeyId == d.Id() { + return resourceAwsIamAccessKeyReadResult(d, key) + } + } + + // Guess the key isn't around anymore. + d.SetId("") + return nil +} + +func resourceAwsIamAccessKeyReadResult(d *schema.ResourceData, key *iam.AccessKeyMetadata) error { + d.SetId(*key.AccessKeyId) + if err := d.Set("user", key.UserName); err != nil { + return err + } + if err := d.Set("status", key.Status); err != nil { + return err + } + return nil +} + +func resourceAwsIamAccessKeyDelete(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + request := &iam.DeleteAccessKeyInput{ + AccessKeyId: aws.String(d.Id()), + UserName: aws.String(d.Get("user").(string)), + } + + if _, err := iamconn.DeleteAccessKey(request); err != nil { + return fmt.Errorf("Error deleting access key %s: %s", d.Id(), err) + } + return nil +} + +func sesSmtpPasswordFromSecretKey(key *string) string { + if key == nil { + return "" + } + version := byte(0x02) + message := []byte("SendRawEmail") + hmacKey := []byte(*key) + h := hmac.New(sha256.New, hmacKey) + h.Write(message) + rawSig := h.Sum(nil) + versionedSig := make([]byte, 0, len(rawSig)+1) + versionedSig = append(versionedSig, version) + versionedSig = append(versionedSig, rawSig...) + return base64.StdEncoding.EncodeToString(versionedSig) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_account_alias.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_account_alias.go new file mode 100644 index 000000000..3b1b86f1e --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_account_alias.go @@ -0,0 +1,94 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamAccountAlias() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamAccountAliasCreate, + Read: resourceAwsIamAccountAliasRead, + Delete: resourceAwsIamAccountAliasDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "account_alias": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateAccountAlias, + }, + }, + } +} + +func resourceAwsIamAccountAliasCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + + account_alias := d.Get("account_alias").(string) + + params := &iam.CreateAccountAliasInput{ + AccountAlias: aws.String(account_alias), + } + + _, err := conn.CreateAccountAlias(params) + + if err != nil { + return fmt.Errorf("Error creating account alias with name %s", account_alias) + } + + d.SetId(account_alias) + + return nil +} + +func resourceAwsIamAccountAliasRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + + params := &iam.ListAccountAliasesInput{} + + resp, err := conn.ListAccountAliases(params) + + if err != nil { + return err + } + + if resp == nil || len(resp.AccountAliases) == 0 { + d.SetId("") + return nil + } + + account_alias := aws.StringValue(resp.AccountAliases[0]) + + d.SetId(account_alias) + d.Set("account_alias", account_alias) + + return nil +} + +func resourceAwsIamAccountAliasDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + + account_alias := d.Get("account_alias").(string) + + params := &iam.DeleteAccountAliasInput{ + AccountAlias: aws.String(account_alias), + } + + _, err := conn.DeleteAccountAlias(params) + + if err != nil { + return fmt.Errorf("Error deleting account alias with name %s", account_alias) + } + + d.SetId("") + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_account_password_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_account_password_policy.go new file mode 100644 index 000000000..71dfbf0c8 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_account_password_policy.go @@ -0,0 +1,168 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamAccountPasswordPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamAccountPasswordPolicyUpdate, + Read: resourceAwsIamAccountPasswordPolicyRead, + Update: resourceAwsIamAccountPasswordPolicyUpdate, + Delete: resourceAwsIamAccountPasswordPolicyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "allow_users_to_change_password": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "expire_passwords": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + }, + "hard_expiry": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "max_password_age": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "minimum_password_length": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 6, + }, + "password_reuse_prevention": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "require_lowercase_characters": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "require_numbers": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "require_symbols": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "require_uppercase_characters": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceAwsIamAccountPasswordPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.UpdateAccountPasswordPolicyInput{} + + if v, ok := d.GetOk("allow_users_to_change_password"); ok { + input.AllowUsersToChangePassword = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("hard_expiry"); ok { + input.HardExpiry = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("max_password_age"); ok { + input.MaxPasswordAge = aws.Int64(int64(v.(int))) + } + if v, ok := d.GetOk("minimum_password_length"); ok { + input.MinimumPasswordLength = aws.Int64(int64(v.(int))) + } + if v, ok := d.GetOk("password_reuse_prevention"); ok { + input.PasswordReusePrevention = aws.Int64(int64(v.(int))) + } + if v, ok := d.GetOk("require_lowercase_characters"); ok { + input.RequireLowercaseCharacters = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("require_numbers"); ok { + input.RequireNumbers = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("require_symbols"); ok { + input.RequireSymbols = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("require_uppercase_characters"); ok { + input.RequireUppercaseCharacters = aws.Bool(v.(bool)) + } + + log.Printf("[DEBUG] Updating IAM account password policy: %s", input) + _, err := iamconn.UpdateAccountPasswordPolicy(input) + if err != nil { + return fmt.Errorf("Error updating IAM Password Policy: %s", err) + } + log.Println("[DEBUG] IAM account password policy updated") + + d.SetId("iam-account-password-policy") + + return resourceAwsIamAccountPasswordPolicyRead(d, meta) +} + +func resourceAwsIamAccountPasswordPolicyRead(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.GetAccountPasswordPolicyInput{} + resp, err := iamconn.GetAccountPasswordPolicy(input) + if err != nil { + awsErr, ok := err.(awserr.Error) + if ok && awsErr.Code() == "NoSuchEntity" { + log.Printf("[WARN] IAM account password policy is gone (i.e. default)") + d.SetId("") + return nil + } + return fmt.Errorf("Error reading IAM account password policy: %s", err) + } + + log.Printf("[DEBUG] Received IAM account password policy: %s", resp) + + policy := resp.PasswordPolicy + + d.Set("allow_users_to_change_password", policy.AllowUsersToChangePassword) + d.Set("expire_passwords", policy.ExpirePasswords) + d.Set("hard_expiry", policy.HardExpiry) + d.Set("max_password_age", policy.MaxPasswordAge) + d.Set("minimum_password_length", policy.MinimumPasswordLength) + d.Set("password_reuse_prevention", policy.PasswordReusePrevention) + d.Set("require_lowercase_characters", policy.RequireLowercaseCharacters) + d.Set("require_numbers", policy.RequireNumbers) + d.Set("require_symbols", policy.RequireSymbols) + d.Set("require_uppercase_characters", policy.RequireUppercaseCharacters) + + return nil +} + +func resourceAwsIamAccountPasswordPolicyDelete(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + log.Println("[DEBUG] Deleting IAM account password policy") + input := &iam.DeleteAccountPasswordPolicyInput{} + if _, err := iamconn.DeleteAccountPasswordPolicy(input); err != nil { + return fmt.Errorf("Error deleting IAM Password Policy: %s", err) + } + d.SetId("") + log.Println("[DEBUG] Deleted IAM account password policy") + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group.go new file mode 100644 index 000000000..967f055cd --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group.go @@ -0,0 +1,141 @@ +package aws + +import ( + "fmt" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamGroupCreate, + Read: resourceAwsIamGroupRead, + Update: resourceAwsIamGroupUpdate, + Delete: resourceAwsIamGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "unique_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAwsIamGroupName, + }, + "path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "/", + }, + }, + } +} + +func resourceAwsIamGroupCreate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + name := d.Get("name").(string) + path := d.Get("path").(string) + + request := &iam.CreateGroupInput{ + Path: aws.String(path), + GroupName: aws.String(name), + } + + createResp, err := iamconn.CreateGroup(request) + if err != nil { + return fmt.Errorf("Error creating IAM Group %s: %s", name, err) + } + d.SetId(*createResp.Group.GroupName) + + return resourceAwsIamGroupReadResult(d, createResp.Group) +} + +func resourceAwsIamGroupRead(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + request := &iam.GetGroupInput{ + GroupName: aws.String(d.Id()), + } + + getResp, err := iamconn.GetGroup(request) + if err != nil { + if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading IAM Group %s: %s", d.Id(), err) + } + return resourceAwsIamGroupReadResult(d, getResp.Group) +} + +func resourceAwsIamGroupReadResult(d *schema.ResourceData, group *iam.Group) error { + if err := d.Set("name", group.GroupName); err != nil { + return err + } + if err := d.Set("arn", group.Arn); err != nil { + return err + } + if err := d.Set("path", group.Path); err != nil { + return err + } + if err := d.Set("unique_id", group.GroupId); err != nil { + return err + } + return nil +} + +func resourceAwsIamGroupUpdate(d *schema.ResourceData, meta interface{}) error { + if d.HasChange("name") || d.HasChange("path") { + iamconn := meta.(*AWSClient).iamconn + on, nn := d.GetChange("name") + _, np := d.GetChange("path") + + request := &iam.UpdateGroupInput{ + GroupName: aws.String(on.(string)), + NewGroupName: aws.String(nn.(string)), + NewPath: aws.String(np.(string)), + } + _, err := iamconn.UpdateGroup(request) + if err != nil { + return fmt.Errorf("Error updating IAM Group %s: %s", d.Id(), err) + } + return resourceAwsIamGroupRead(d, meta) + } + return nil +} + +func resourceAwsIamGroupDelete(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + request := &iam.DeleteGroupInput{ + GroupName: aws.String(d.Id()), + } + + if _, err := iamconn.DeleteGroup(request); err != nil { + return fmt.Errorf("Error deleting IAM Group %s: %s", d.Id(), err) + } + return nil +} + +func validateAwsIamGroupName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9A-Za-z=,.@\-_+]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only alphanumeric characters, hyphens, underscores, commas, periods, @ symbols, plus and equals signs allowed in %q: %q", + k, value)) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_membership.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_membership.go new file mode 100644 index 000000000..7977bbfb7 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_membership.go @@ -0,0 +1,169 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamGroupMembership() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamGroupMembershipCreate, + Read: resourceAwsIamGroupMembershipRead, + Update: resourceAwsIamGroupMembershipUpdate, + Delete: resourceAwsIamGroupMembershipDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "users": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "group": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsIamGroupMembershipCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + + group := d.Get("group").(string) + userList := expandStringList(d.Get("users").(*schema.Set).List()) + + if err := addUsersToGroup(conn, userList, group); err != nil { + return err + } + + d.SetId(d.Get("name").(string)) + return resourceAwsIamGroupMembershipRead(d, meta) +} + +func resourceAwsIamGroupMembershipRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + group := d.Get("group").(string) + + var ul []string + var marker *string + for { + resp, err := conn.GetGroup(&iam.GetGroupInput{ + GroupName: aws.String(group), + Marker: marker, + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + // aws specific error + if awsErr.Code() == "NoSuchEntity" { + // group not found + d.SetId("") + return nil + } + } + return err + } + + for _, u := range resp.Users { + ul = append(ul, *u.UserName) + } + + if !*resp.IsTruncated { + break + } + marker = resp.Marker + } + + if err := d.Set("users", ul); err != nil { + return fmt.Errorf("[WARN] Error setting user list from IAM Group Membership (%s), error: %s", group, err) + } + + return nil +} + +func resourceAwsIamGroupMembershipUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + + if d.HasChange("users") { + group := d.Get("group").(string) + + o, n := d.GetChange("users") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove := expandStringList(os.Difference(ns).List()) + add := expandStringList(ns.Difference(os).List()) + + if err := removeUsersFromGroup(conn, remove, group); err != nil { + return err + } + + if err := addUsersToGroup(conn, add, group); err != nil { + return err + } + } + + return resourceAwsIamGroupMembershipRead(d, meta) +} + +func resourceAwsIamGroupMembershipDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + userList := expandStringList(d.Get("users").(*schema.Set).List()) + group := d.Get("group").(string) + + if err := removeUsersFromGroup(conn, userList, group); err != nil { + return err + } + + return nil +} + +func removeUsersFromGroup(conn *iam.IAM, users []*string, group string) error { + for _, u := range users { + _, err := conn.RemoveUserFromGroup(&iam.RemoveUserFromGroupInput{ + UserName: u, + GroupName: aws.String(group), + }) + + if err != nil { + if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { + return nil + } + return err + } + } + return nil +} + +func addUsersToGroup(conn *iam.IAM, users []*string, group string) error { + for _, u := range users { + _, err := conn.AddUserToGroup(&iam.AddUserToGroupInput{ + UserName: u, + GroupName: aws.String(group), + }) + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_policy.go new file mode 100644 index 000000000..1bdf72545 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_policy.go @@ -0,0 +1,129 @@ +package aws + +import ( + "fmt" + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamGroupPolicy() *schema.Resource { + return &schema.Resource{ + // PutGroupPolicy API is idempotent, so these can be the same. + Create: resourceAwsIamGroupPolicyPut, + Update: resourceAwsIamGroupPolicyPut, + + Read: resourceAwsIamGroupPolicyRead, + Delete: resourceAwsIamGroupPolicyDelete, + + Schema: map[string]*schema.Schema{ + "policy": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + }, + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "group": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsIamGroupPolicyPut(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + request := &iam.PutGroupPolicyInput{ + GroupName: aws.String(d.Get("group").(string)), + PolicyDocument: aws.String(d.Get("policy").(string)), + } + + var policyName string + if v, ok := d.GetOk("name"); ok { + policyName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + policyName = resource.PrefixedUniqueId(v.(string)) + } else { + policyName = resource.UniqueId() + } + request.PolicyName = aws.String(policyName) + + if _, err := iamconn.PutGroupPolicy(request); err != nil { + return fmt.Errorf("Error putting IAM group policy %s: %s", *request.PolicyName, err) + } + + d.SetId(fmt.Sprintf("%s:%s", *request.GroupName, *request.PolicyName)) + return nil +} + +func resourceAwsIamGroupPolicyRead(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + group, name := resourceAwsIamGroupPolicyParseId(d.Id()) + + request := &iam.GetGroupPolicyInput{ + PolicyName: aws.String(name), + GroupName: aws.String(group), + } + + var err error + getResp, err := iamconn.GetGroupPolicy(request) + if err != nil { + if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { // XXX test me + d.SetId("") + return nil + } + return fmt.Errorf("Error reading IAM policy %s from group %s: %s", name, group, err) + } + + if getResp.PolicyDocument == nil { + return fmt.Errorf("GetGroupPolicy returned a nil policy document") + } + + policy, err := url.QueryUnescape(*getResp.PolicyDocument) + if err != nil { + return err + } + return d.Set("policy", policy) +} + +func resourceAwsIamGroupPolicyDelete(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + group, name := resourceAwsIamGroupPolicyParseId(d.Id()) + + request := &iam.DeleteGroupPolicyInput{ + PolicyName: aws.String(name), + GroupName: aws.String(group), + } + + if _, err := iamconn.DeleteGroupPolicy(request); err != nil { + return fmt.Errorf("Error deleting IAM group policy %s: %s", d.Id(), err) + } + return nil +} + +func resourceAwsIamGroupPolicyParseId(id string) (groupName, policyName string) { + parts := strings.SplitN(id, ":", 2) + groupName = parts[0] + policyName = parts[1] + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_policy_attachment.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_policy_attachment.go new file mode 100644 index 000000000..cf9595232 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_group_policy_attachment.go @@ -0,0 +1,124 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamGroupPolicyAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamGroupPolicyAttachmentCreate, + Read: resourceAwsIamGroupPolicyAttachmentRead, + Delete: resourceAwsIamGroupPolicyAttachmentDelete, + + Schema: map[string]*schema.Schema{ + "group": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "policy_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsIamGroupPolicyAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + + group := d.Get("group").(string) + arn := d.Get("policy_arn").(string) + + err := attachPolicyToGroup(conn, group, arn) + if err != nil { + return fmt.Errorf("[WARN] Error attaching policy %s to IAM group %s: %v", arn, group, err) + } + + d.SetId(resource.PrefixedUniqueId(fmt.Sprintf("%s-", group))) + return resourceAwsIamGroupPolicyAttachmentRead(d, meta) +} + +func resourceAwsIamGroupPolicyAttachmentRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + group := d.Get("group").(string) + arn := d.Get("policy_arn").(string) + + _, err := conn.GetGroup(&iam.GetGroupInput{ + GroupName: aws.String(group), + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "NoSuchEntity" { + log.Printf("[WARN] No such entity found for Policy Attachment (%s)", group) + d.SetId("") + return nil + } + } + return err + } + + attachedPolicies, err := conn.ListAttachedGroupPolicies(&iam.ListAttachedGroupPoliciesInput{ + GroupName: aws.String(group), + }) + if err != nil { + return err + } + + var policy string + for _, p := range attachedPolicies.AttachedPolicies { + if *p.PolicyArn == arn { + policy = *p.PolicyArn + } + } + + if policy == "" { + log.Printf("[WARN] No such policy found for Group Policy Attachment (%s)", group) + d.SetId("") + } + + return nil +} + +func resourceAwsIamGroupPolicyAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + group := d.Get("group").(string) + arn := d.Get("policy_arn").(string) + + err := detachPolicyFromGroup(conn, group, arn) + if err != nil { + return fmt.Errorf("[WARN] Error removing policy %s from IAM Group %s: %v", arn, group, err) + } + return nil +} + +func attachPolicyToGroup(conn *iam.IAM, group string, arn string) error { + _, err := conn.AttachGroupPolicy(&iam.AttachGroupPolicyInput{ + GroupName: aws.String(group), + PolicyArn: aws.String(arn), + }) + if err != nil { + return err + } + return nil +} + +func detachPolicyFromGroup(conn *iam.IAM, group string, arn string) error { + _, err := conn.DetachGroupPolicy(&iam.DetachGroupPolicyInput{ + GroupName: aws.String(group), + PolicyArn: aws.String(arn), + }) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_instance_profile.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_instance_profile.go new file mode 100644 index 000000000..930ab3b39 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_instance_profile.go @@ -0,0 +1,319 @@ +package aws + +import ( + "fmt" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamInstanceProfile() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamInstanceProfileCreate, + Read: resourceAwsIamInstanceProfileRead, + Update: resourceAwsIamInstanceProfileUpdate, + Delete: resourceAwsIamInstanceProfileDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "create_date": { + Type: schema.TypeString, + Computed: true, + }, + + "unique_id": { + Type: schema.TypeString, + Computed: true, + }, + + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://github.com/boto/botocore/blob/2485f5c/botocore/data/iam/2010-05-08/service-2.json#L8196-L8201 + value := v.(string) + if len(value) > 128 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 128 characters", k)) + } + if !regexp.MustCompile("^[\\w+=,.@-]+$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must match [\\w+=,.@-]", k)) + } + return + }, + }, + + "name_prefix": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://github.com/boto/botocore/blob/2485f5c/botocore/data/iam/2010-05-08/service-2.json#L8196-L8201 + value := v.(string) + if len(value) > 64 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 64 characters, name is limited to 128", k)) + } + if !regexp.MustCompile("^[\\w+=,.@-]+$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must match [\\w+=,.@-]", k)) + } + return + }, + }, + + "path": { + Type: schema.TypeString, + Optional: true, + Default: "/", + ForceNew: true, + }, + + "roles": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ConflictsWith: []string{"role"}, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Deprecated: "Use `role` instead. Only a single role can be passed to an IAM Instance Profile", + }, + + "role": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"roles"}, + }, + }, + } +} + +func resourceAwsIamInstanceProfileCreate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + var name string + if v, ok := d.GetOk("name"); ok { + name = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + name = resource.PrefixedUniqueId(v.(string)) + } else { + name = resource.UniqueId() + } + + _, hasRoles := d.GetOk("roles") + _, hasRole := d.GetOk("role") + + if hasRole == false && hasRoles == false { + return fmt.Errorf("Either `role` or `roles` (deprecated) must be specified when creating an IAM Instance Profile") + } + + request := &iam.CreateInstanceProfileInput{ + InstanceProfileName: aws.String(name), + Path: aws.String(d.Get("path").(string)), + } + + var err error + response, err := iamconn.CreateInstanceProfile(request) + if err == nil { + err = instanceProfileReadResult(d, response.InstanceProfile) + } + if err != nil { + return fmt.Errorf("Error creating IAM instance profile %s: %s", name, err) + } + + waiterRequest := &iam.GetInstanceProfileInput{ + InstanceProfileName: aws.String(name), + } + // don't return until the IAM service reports that the instance profile is ready. + // this ensures that terraform resources which rely on the instance profile will 'see' + // that the instance profile exists. + err = iamconn.WaitUntilInstanceProfileExists(waiterRequest) + if err != nil { + return fmt.Errorf("Timed out while waiting for instance profile %s: %s", name, err) + } + + return resourceAwsIamInstanceProfileUpdate(d, meta) +} + +func instanceProfileAddRole(iamconn *iam.IAM, profileName, roleName string) error { + request := &iam.AddRoleToInstanceProfileInput{ + InstanceProfileName: aws.String(profileName), + RoleName: aws.String(roleName), + } + + _, err := iamconn.AddRoleToInstanceProfile(request) + return err +} + +func instanceProfileRemoveRole(iamconn *iam.IAM, profileName, roleName string) error { + request := &iam.RemoveRoleFromInstanceProfileInput{ + InstanceProfileName: aws.String(profileName), + RoleName: aws.String(roleName), + } + + _, err := iamconn.RemoveRoleFromInstanceProfile(request) + if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { + return nil + } + return err +} + +func instanceProfileSetRoles(d *schema.ResourceData, iamconn *iam.IAM) error { + oldInterface, newInterface := d.GetChange("roles") + oldRoles := oldInterface.(*schema.Set) + newRoles := newInterface.(*schema.Set) + + currentRoles := schema.CopySet(oldRoles) + + d.Partial(true) + + for _, role := range oldRoles.Difference(newRoles).List() { + err := instanceProfileRemoveRole(iamconn, d.Id(), role.(string)) + if err != nil { + return fmt.Errorf("Error removing role %s from IAM instance profile %s: %s", role, d.Id(), err) + } + currentRoles.Remove(role) + d.Set("roles", currentRoles) + d.SetPartial("roles") + } + + for _, role := range newRoles.Difference(oldRoles).List() { + err := instanceProfileAddRole(iamconn, d.Id(), role.(string)) + if err != nil { + return fmt.Errorf("Error adding role %s to IAM instance profile %s: %s", role, d.Id(), err) + } + currentRoles.Add(role) + d.Set("roles", currentRoles) + d.SetPartial("roles") + } + + d.Partial(false) + + return nil +} + +func instanceProfileRemoveAllRoles(d *schema.ResourceData, iamconn *iam.IAM) error { + for _, role := range d.Get("roles").(*schema.Set).List() { + err := instanceProfileRemoveRole(iamconn, d.Id(), role.(string)) + if err != nil { + return fmt.Errorf("Error removing role %s from IAM instance profile %s: %s", role, d.Id(), err) + } + } + return nil +} + +func resourceAwsIamInstanceProfileUpdate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + d.Partial(true) + + if d.HasChange("role") { + oldRole, newRole := d.GetChange("role") + + if oldRole.(string) != "" { + err := instanceProfileRemoveRole(iamconn, d.Id(), oldRole.(string)) + if err != nil { + return fmt.Errorf("Error adding role %s to IAM instance profile %s: %s", oldRole.(string), d.Id(), err) + } + } + + if newRole.(string) != "" { + err := instanceProfileAddRole(iamconn, d.Id(), newRole.(string)) + if err != nil { + return fmt.Errorf("Error adding role %s to IAM instance profile %s: %s", newRole.(string), d.Id(), err) + } + } + + d.SetPartial("role") + } + + if d.HasChange("roles") { + return instanceProfileSetRoles(d, iamconn) + } + + d.Partial(false) + + return nil +} + +func resourceAwsIamInstanceProfileRead(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + request := &iam.GetInstanceProfileInput{ + InstanceProfileName: aws.String(d.Id()), + } + + result, err := iamconn.GetInstanceProfile(request) + if err != nil { + if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading IAM instance profile %s: %s", d.Id(), err) + } + + return instanceProfileReadResult(d, result.InstanceProfile) +} + +func resourceAwsIamInstanceProfileDelete(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + if err := instanceProfileRemoveAllRoles(d, iamconn); err != nil { + return err + } + + request := &iam.DeleteInstanceProfileInput{ + InstanceProfileName: aws.String(d.Id()), + } + _, err := iamconn.DeleteInstanceProfile(request) + if err != nil { + return fmt.Errorf("Error deleting IAM instance profile %s: %s", d.Id(), err) + } + d.SetId("") + return nil +} + +func instanceProfileReadResult(d *schema.ResourceData, result *iam.InstanceProfile) error { + d.SetId(*result.InstanceProfileName) + if err := d.Set("name", result.InstanceProfileName); err != nil { + return err + } + if err := d.Set("arn", result.Arn); err != nil { + return err + } + if err := d.Set("path", result.Path); err != nil { + return err + } + d.Set("unique_id", result.InstanceProfileId) + + if result.Roles != nil && len(result.Roles) > 0 { + d.Set("role", result.Roles[0].RoleName) //there will only be 1 role returned + } + + roles := &schema.Set{F: schema.HashString} + for _, role := range result.Roles { + roles.Add(*role.RoleName) + } + if err := d.Set("roles", roles); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_openid_connect_provider.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_openid_connect_provider.go new file mode 100644 index 000000000..1791da4ec --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_openid_connect_provider.go @@ -0,0 +1,141 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamOpenIDConnectProvider() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamOpenIDConnectProviderCreate, + Read: resourceAwsIamOpenIDConnectProviderRead, + Update: resourceAwsIamOpenIDConnectProviderUpdate, + Delete: resourceAwsIamOpenIDConnectProviderDelete, + Exists: resourceAwsIamOpenIDConnectProviderExists, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "url": &schema.Schema{ + Type: schema.TypeString, + Computed: false, + Required: true, + ForceNew: true, + ValidateFunc: validateOpenIdURL, + DiffSuppressFunc: suppressOpenIdURL, + }, + "client_id_list": &schema.Schema{ + Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeList, + Required: true, + ForceNew: true, + }, + "thumbprint_list": &schema.Schema{ + Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeList, + Required: true, + }, + }, + } +} + +func resourceAwsIamOpenIDConnectProviderCreate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.CreateOpenIDConnectProviderInput{ + Url: aws.String(d.Get("url").(string)), + ClientIDList: expandStringList(d.Get("client_id_list").([]interface{})), + ThumbprintList: expandStringList(d.Get("thumbprint_list").([]interface{})), + } + + out, err := iamconn.CreateOpenIDConnectProvider(input) + if err != nil { + return err + } + + d.SetId(*out.OpenIDConnectProviderArn) + + return resourceAwsIamOpenIDConnectProviderRead(d, meta) +} + +func resourceAwsIamOpenIDConnectProviderRead(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.GetOpenIDConnectProviderInput{ + OpenIDConnectProviderArn: aws.String(d.Id()), + } + out, err := iamconn.GetOpenIDConnectProvider(input) + if err != nil { + return err + } + + d.Set("arn", d.Id()) + d.Set("url", out.Url) + d.Set("client_id_list", flattenStringList(out.ClientIDList)) + d.Set("thumbprint_list", flattenStringList(out.ThumbprintList)) + + return nil +} + +func resourceAwsIamOpenIDConnectProviderUpdate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + if d.HasChange("thumbprint_list") { + input := &iam.UpdateOpenIDConnectProviderThumbprintInput{ + OpenIDConnectProviderArn: aws.String(d.Id()), + ThumbprintList: expandStringList(d.Get("thumbprint_list").([]interface{})), + } + + _, err := iamconn.UpdateOpenIDConnectProviderThumbprint(input) + if err != nil { + return err + } + } + + return resourceAwsIamOpenIDConnectProviderRead(d, meta) +} + +func resourceAwsIamOpenIDConnectProviderDelete(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.DeleteOpenIDConnectProviderInput{ + OpenIDConnectProviderArn: aws.String(d.Id()), + } + _, err := iamconn.DeleteOpenIDConnectProvider(input) + + if err != nil { + if err, ok := err.(awserr.Error); ok && err.Code() == "NoSuchEntity" { + return nil + } + return fmt.Errorf("Error deleting platform application %s", err) + } + + return nil +} + +func resourceAwsIamOpenIDConnectProviderExists(d *schema.ResourceData, meta interface{}) (bool, error) { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.GetOpenIDConnectProviderInput{ + OpenIDConnectProviderArn: aws.String(d.Id()), + } + _, err := iamconn.GetOpenIDConnectProvider(input) + if err != nil { + if err, ok := err.(awserr.Error); ok && err.Code() == "NoSuchEntity" { + return false, nil + } + return true, err + } + + return true, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_policy.go new file mode 100644 index 000000000..b3fdf1c5f --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_policy.go @@ -0,0 +1,296 @@ +package aws + +import ( + "fmt" + "net/url" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamPolicyCreate, + Read: resourceAwsIamPolicyRead, + Update: resourceAwsIamPolicyUpdate, + Delete: resourceAwsIamPolicyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "description": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "path": { + Type: schema.TypeString, + Optional: true, + Default: "/", + ForceNew: true, + }, + "policy": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateIAMPolicyJson, + DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://github.com/boto/botocore/blob/2485f5c/botocore/data/iam/2010-05-08/service-2.json#L8329-L8334 + value := v.(string) + if len(value) > 128 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 128 characters", k)) + } + if !regexp.MustCompile("^[\\w+=,.@-]*$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must match [\\w+=,.@-]", k)) + } + return + }, + }, + "name_prefix": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://github.com/boto/botocore/blob/2485f5c/botocore/data/iam/2010-05-08/service-2.json#L8329-L8334 + value := v.(string) + if len(value) > 96 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 96 characters, name is limited to 128", k)) + } + if !regexp.MustCompile("^[\\w+=,.@-]*$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must match [\\w+=,.@-]", k)) + } + return + }, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsIamPolicyCreate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + var name string + if v, ok := d.GetOk("name"); ok { + name = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + name = resource.PrefixedUniqueId(v.(string)) + } else { + name = resource.UniqueId() + } + + request := &iam.CreatePolicyInput{ + Description: aws.String(d.Get("description").(string)), + Path: aws.String(d.Get("path").(string)), + PolicyDocument: aws.String(d.Get("policy").(string)), + PolicyName: aws.String(name), + } + + response, err := iamconn.CreatePolicy(request) + if err != nil { + return fmt.Errorf("Error creating IAM policy %s: %s", name, err) + } + + return readIamPolicy(d, response.Policy) +} + +func resourceAwsIamPolicyRead(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + getPolicyRequest := &iam.GetPolicyInput{ + PolicyArn: aws.String(d.Id()), + } + + getPolicyResponse, err := iamconn.GetPolicy(getPolicyRequest) + if err != nil { + if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading IAM policy %s: %s", d.Id(), err) + } + + getPolicyVersionRequest := &iam.GetPolicyVersionInput{ + PolicyArn: aws.String(d.Id()), + VersionId: getPolicyResponse.Policy.DefaultVersionId, + } + + getPolicyVersionResponse, err := iamconn.GetPolicyVersion(getPolicyVersionRequest) + if err != nil { + if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading IAM policy version %s: %s", d.Id(), err) + } + + policy, err := url.QueryUnescape(*getPolicyVersionResponse.PolicyVersion.Document) + if err != nil { + return err + } + if err := d.Set("policy", policy); err != nil { + return err + } + + return readIamPolicy(d, getPolicyResponse.Policy) +} + +func resourceAwsIamPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + if err := iamPolicyPruneVersions(d.Id(), iamconn); err != nil { + return err + } + + if !d.HasChange("policy") { + return nil + } + request := &iam.CreatePolicyVersionInput{ + PolicyArn: aws.String(d.Id()), + PolicyDocument: aws.String(d.Get("policy").(string)), + SetAsDefault: aws.Bool(true), + } + + if _, err := iamconn.CreatePolicyVersion(request); err != nil { + return fmt.Errorf("Error updating IAM policy %s: %s", d.Id(), err) + } + return nil +} + +func resourceAwsIamPolicyDelete(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + if err := iamPolicyDeleteNondefaultVersions(d.Id(), iamconn); err != nil { + return err + } + + request := &iam.DeletePolicyInput{ + PolicyArn: aws.String(d.Id()), + } + + _, err := iamconn.DeletePolicy(request) + if err != nil { + if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { + return nil + } + return fmt.Errorf("Error reading IAM policy %s: %#v", d.Id(), err) + } + return nil +} + +// iamPolicyPruneVersions deletes the oldest versions. +// +// Old versions are deleted until there are 4 or less remaining, which means at +// least one more can be created before hitting the maximum of 5. +// +// The default version is never deleted. + +func iamPolicyPruneVersions(arn string, iamconn *iam.IAM) error { + versions, err := iamPolicyListVersions(arn, iamconn) + if err != nil { + return err + } + if len(versions) < 5 { + return nil + } + + var oldestVersion *iam.PolicyVersion + + for _, version := range versions { + if *version.IsDefaultVersion { + continue + } + if oldestVersion == nil || + version.CreateDate.Before(*oldestVersion.CreateDate) { + oldestVersion = version + } + } + + if err := iamPolicyDeleteVersion(arn, *oldestVersion.VersionId, iamconn); err != nil { + return err + } + return nil +} + +func iamPolicyDeleteNondefaultVersions(arn string, iamconn *iam.IAM) error { + versions, err := iamPolicyListVersions(arn, iamconn) + if err != nil { + return err + } + + for _, version := range versions { + if *version.IsDefaultVersion { + continue + } + if err := iamPolicyDeleteVersion(arn, *version.VersionId, iamconn); err != nil { + return err + } + } + + return nil +} + +func iamPolicyDeleteVersion(arn, versionID string, iamconn *iam.IAM) error { + request := &iam.DeletePolicyVersionInput{ + PolicyArn: aws.String(arn), + VersionId: aws.String(versionID), + } + + _, err := iamconn.DeletePolicyVersion(request) + if err != nil { + return fmt.Errorf("Error deleting version %s from IAM policy %s: %s", versionID, arn, err) + } + return nil +} + +func iamPolicyListVersions(arn string, iamconn *iam.IAM) ([]*iam.PolicyVersion, error) { + request := &iam.ListPolicyVersionsInput{ + PolicyArn: aws.String(arn), + } + + response, err := iamconn.ListPolicyVersions(request) + if err != nil { + return nil, fmt.Errorf("Error listing versions for IAM policy %s: %s", arn, err) + } + return response.Versions, nil +} + +func readIamPolicy(d *schema.ResourceData, policy *iam.Policy) error { + d.SetId(*policy.Arn) + if policy.Description != nil { + // the description isn't present in the response to CreatePolicy. + if err := d.Set("description", policy.Description); err != nil { + return err + } + } + if err := d.Set("path", policy.Path); err != nil { + return err + } + if err := d.Set("name", policy.PolicyName); err != nil { + return err + } + if err := d.Set("arn", policy.Arn); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_policy_attachment.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_policy_attachment.go new file mode 100644 index 000000000..adbd81b20 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_policy_attachment.go @@ -0,0 +1,374 @@ +package aws + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamPolicyAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamPolicyAttachmentCreate, + Read: resourceAwsIamPolicyAttachmentRead, + Update: resourceAwsIamPolicyAttachmentUpdate, + Delete: resourceAwsIamPolicyAttachmentDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + if v.(string) == "" { + errors = append(errors, fmt.Errorf( + "%q cannot be an empty string", k)) + } + return + }, + }, + "users": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "roles": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "groups": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "policy_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsIamPolicyAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + + name := d.Get("name").(string) + arn := d.Get("policy_arn").(string) + users := expandStringList(d.Get("users").(*schema.Set).List()) + roles := expandStringList(d.Get("roles").(*schema.Set).List()) + groups := expandStringList(d.Get("groups").(*schema.Set).List()) + + if len(users) == 0 && len(roles) == 0 && len(groups) == 0 { + return fmt.Errorf("[WARN] No Users, Roles, or Groups specified for IAM Policy Attachment %s", name) + } else { + var userErr, roleErr, groupErr error + if users != nil { + userErr = attachPolicyToUsers(conn, users, arn) + } + if roles != nil { + roleErr = attachPolicyToRoles(conn, roles, arn) + } + if groups != nil { + groupErr = attachPolicyToGroups(conn, groups, arn) + } + if userErr != nil || roleErr != nil || groupErr != nil { + return composeErrors(fmt.Sprint("[WARN] Error attaching policy with IAM Policy Attachment ", name, ":"), userErr, roleErr, groupErr) + } + } + d.SetId(d.Get("name").(string)) + return resourceAwsIamPolicyAttachmentRead(d, meta) +} + +func resourceAwsIamPolicyAttachmentRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + arn := d.Get("policy_arn").(string) + name := d.Get("name").(string) + + _, err := conn.GetPolicy(&iam.GetPolicyInput{ + PolicyArn: aws.String(arn), + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "NoSuchEntity" { + log.Printf("[WARN] No such entity found for Policy Attachment (%s)", d.Id()) + d.SetId("") + return nil + } + } + return err + } + + ul := make([]string, 0) + rl := make([]string, 0) + gl := make([]string, 0) + + args := iam.ListEntitiesForPolicyInput{ + PolicyArn: aws.String(arn), + } + err = conn.ListEntitiesForPolicyPages(&args, func(page *iam.ListEntitiesForPolicyOutput, lastPage bool) bool { + for _, u := range page.PolicyUsers { + ul = append(ul, *u.UserName) + } + + for _, r := range page.PolicyRoles { + rl = append(rl, *r.RoleName) + } + + for _, g := range page.PolicyGroups { + gl = append(gl, *g.GroupName) + } + return true + }) + if err != nil { + return err + } + + userErr := d.Set("users", ul) + roleErr := d.Set("roles", rl) + groupErr := d.Set("groups", gl) + + if userErr != nil || roleErr != nil || groupErr != nil { + return composeErrors(fmt.Sprint("[WARN} Error setting user, role, or group list from IAM Policy Attachment ", name, ":"), userErr, roleErr, groupErr) + } + + return nil +} +func resourceAwsIamPolicyAttachmentUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + name := d.Get("name").(string) + var userErr, roleErr, groupErr error + + if d.HasChange("users") { + userErr = updateUsers(conn, d, meta) + } + if d.HasChange("roles") { + roleErr = updateRoles(conn, d, meta) + } + if d.HasChange("groups") { + groupErr = updateGroups(conn, d, meta) + } + if userErr != nil || roleErr != nil || groupErr != nil { + return composeErrors(fmt.Sprint("[WARN] Error updating user, role, or group list from IAM Policy Attachment ", name, ":"), userErr, roleErr, groupErr) + } + return resourceAwsIamPolicyAttachmentRead(d, meta) +} + +func resourceAwsIamPolicyAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + name := d.Get("name").(string) + arn := d.Get("policy_arn").(string) + users := expandStringList(d.Get("users").(*schema.Set).List()) + roles := expandStringList(d.Get("roles").(*schema.Set).List()) + groups := expandStringList(d.Get("groups").(*schema.Set).List()) + + var userErr, roleErr, groupErr error + if len(users) != 0 { + userErr = detachPolicyFromUsers(conn, users, arn) + } + if len(roles) != 0 { + roleErr = detachPolicyFromRoles(conn, roles, arn) + } + if len(groups) != 0 { + groupErr = detachPolicyFromGroups(conn, groups, arn) + } + if userErr != nil || roleErr != nil || groupErr != nil { + return composeErrors(fmt.Sprint("[WARN] Error removing user, role, or group list from IAM Policy Detach ", name, ":"), userErr, roleErr, groupErr) + } + return nil +} + +func composeErrors(desc string, uErr error, rErr error, gErr error) error { + errMsg := fmt.Sprintf(desc) + errs := []error{uErr, rErr, gErr} + for _, e := range errs { + if e != nil { + errMsg = errMsg + "\n– " + e.Error() + } + } + return fmt.Errorf(errMsg) +} + +func attachPolicyToUsers(conn *iam.IAM, users []*string, arn string) error { + for _, u := range users { + _, err := conn.AttachUserPolicy(&iam.AttachUserPolicyInput{ + UserName: u, + PolicyArn: aws.String(arn), + }) + if err != nil { + return err + } + } + return nil +} +func attachPolicyToRoles(conn *iam.IAM, roles []*string, arn string) error { + for _, r := range roles { + _, err := conn.AttachRolePolicy(&iam.AttachRolePolicyInput{ + RoleName: r, + PolicyArn: aws.String(arn), + }) + if err != nil { + return err + } + + var attachmentErr error + attachmentErr = resource.Retry(2*time.Minute, func() *resource.RetryError { + + input := iam.ListRolePoliciesInput{ + RoleName: r, + } + + attachedPolicies, err := conn.ListRolePolicies(&input) + if err != nil { + return resource.NonRetryableError(err) + } + + if len(attachedPolicies.PolicyNames) > 0 { + var foundPolicy bool + for _, policyName := range attachedPolicies.PolicyNames { + if strings.HasSuffix(arn, *policyName) { + foundPolicy = true + break + } + } + + if !foundPolicy { + return resource.NonRetryableError(err) + } + } + + return nil + }) + + if attachmentErr != nil { + return attachmentErr + } + } + return nil +} +func attachPolicyToGroups(conn *iam.IAM, groups []*string, arn string) error { + for _, g := range groups { + _, err := conn.AttachGroupPolicy(&iam.AttachGroupPolicyInput{ + GroupName: g, + PolicyArn: aws.String(arn), + }) + if err != nil { + return err + } + } + return nil +} +func updateUsers(conn *iam.IAM, d *schema.ResourceData, meta interface{}) error { + arn := d.Get("policy_arn").(string) + o, n := d.GetChange("users") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove := expandStringList(os.Difference(ns).List()) + add := expandStringList(ns.Difference(os).List()) + + if rErr := detachPolicyFromUsers(conn, remove, arn); rErr != nil { + return rErr + } + if aErr := attachPolicyToUsers(conn, add, arn); aErr != nil { + return aErr + } + return nil +} +func updateRoles(conn *iam.IAM, d *schema.ResourceData, meta interface{}) error { + arn := d.Get("policy_arn").(string) + o, n := d.GetChange("roles") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove := expandStringList(os.Difference(ns).List()) + add := expandStringList(ns.Difference(os).List()) + + if rErr := detachPolicyFromRoles(conn, remove, arn); rErr != nil { + return rErr + } + if aErr := attachPolicyToRoles(conn, add, arn); aErr != nil { + return aErr + } + return nil +} +func updateGroups(conn *iam.IAM, d *schema.ResourceData, meta interface{}) error { + arn := d.Get("policy_arn").(string) + o, n := d.GetChange("groups") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove := expandStringList(os.Difference(ns).List()) + add := expandStringList(ns.Difference(os).List()) + + if rErr := detachPolicyFromGroups(conn, remove, arn); rErr != nil { + return rErr + } + if aErr := attachPolicyToGroups(conn, add, arn); aErr != nil { + return aErr + } + return nil + +} +func detachPolicyFromUsers(conn *iam.IAM, users []*string, arn string) error { + for _, u := range users { + _, err := conn.DetachUserPolicy(&iam.DetachUserPolicyInput{ + UserName: u, + PolicyArn: aws.String(arn), + }) + if err != nil { + return err + } + } + return nil +} +func detachPolicyFromRoles(conn *iam.IAM, roles []*string, arn string) error { + for _, r := range roles { + _, err := conn.DetachRolePolicy(&iam.DetachRolePolicyInput{ + RoleName: r, + PolicyArn: aws.String(arn), + }) + if err != nil { + return err + } + } + return nil +} +func detachPolicyFromGroups(conn *iam.IAM, groups []*string, arn string) error { + for _, g := range groups { + _, err := conn.DetachGroupPolicy(&iam.DetachGroupPolicyInput{ + GroupName: g, + PolicyArn: aws.String(arn), + }) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role.go new file mode 100644 index 000000000..7480ed3d0 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role.go @@ -0,0 +1,274 @@ +package aws + +import ( + "fmt" + "net/url" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamRole() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamRoleCreate, + Read: resourceAwsIamRoleRead, + Update: resourceAwsIamRoleUpdate, + Delete: resourceAwsIamRoleDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "unique_id": { + Type: schema.TypeString, + Computed: true, + }, + + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://github.com/boto/botocore/blob/2485f5c/botocore/data/iam/2010-05-08/service-2.json#L8329-L8334 + value := v.(string) + if len(value) > 64 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 64 characters", k)) + } + if !regexp.MustCompile("^[\\w+=,.@-]*$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must match [\\w+=,.@-]", k)) + } + return + }, + }, + + "name_prefix": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://github.com/boto/botocore/blob/2485f5c/botocore/data/iam/2010-05-08/service-2.json#L8329-L8334 + value := v.(string) + if len(value) > 32 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 32 characters, name is limited to 64", k)) + } + if !regexp.MustCompile("^[\\w+=,.@-]*$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must match [\\w+=,.@-]", k)) + } + return + }, + }, + + "path": { + Type: schema.TypeString, + Optional: true, + Default: "/", + ForceNew: true, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateIamRoleDescription, + }, + + "assume_role_policy": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, + ValidateFunc: validateJsonString, + }, + + "create_date": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsIamRoleCreate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + var name string + if v, ok := d.GetOk("name"); ok { + name = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + name = resource.PrefixedUniqueId(v.(string)) + } else { + name = resource.UniqueId() + } + + request := &iam.CreateRoleInput{ + Path: aws.String(d.Get("path").(string)), + RoleName: aws.String(name), + AssumeRolePolicyDocument: aws.String(d.Get("assume_role_policy").(string)), + } + + if v, ok := d.GetOk("description"); ok { + request.Description = aws.String(v.(string)) + } + + var createResp *iam.CreateRoleOutput + err := resource.Retry(30*time.Second, func() *resource.RetryError { + var err error + createResp, err = iamconn.CreateRole(request) + // IAM users (referenced in Principal field of assume policy) + // can take ~30 seconds to propagate in AWS + if isAWSErr(err, "MalformedPolicyDocument", "Invalid principal in policy") { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + }) + if err != nil { + return fmt.Errorf("Error creating IAM Role %s: %s", name, err) + } + d.SetId(*createResp.Role.RoleName) + return resourceAwsIamRoleRead(d, meta) +} + +func resourceAwsIamRoleRead(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + request := &iam.GetRoleInput{ + RoleName: aws.String(d.Id()), + } + + getResp, err := iamconn.GetRole(request) + if err != nil { + if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { // XXX test me + d.SetId("") + return nil + } + return fmt.Errorf("Error reading IAM Role %s: %s", d.Id(), err) + } + + role := getResp.Role + + if err := d.Set("name", role.RoleName); err != nil { + return err + } + if err := d.Set("arn", role.Arn); err != nil { + return err + } + if err := d.Set("path", role.Path); err != nil { + return err + } + if err := d.Set("unique_id", role.RoleId); err != nil { + return err + } + if err := d.Set("create_date", role.CreateDate.Format(time.RFC3339)); err != nil { + return err + } + + if role.Description != nil { + // the description isn't present in the response to CreateRole. + if err := d.Set("description", role.Description); err != nil { + return err + } + } + + assumRolePolicy, err := url.QueryUnescape(*role.AssumeRolePolicyDocument) + if err != nil { + return err + } + if err := d.Set("assume_role_policy", assumRolePolicy); err != nil { + return err + } + return nil +} + +func resourceAwsIamRoleUpdate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + if d.HasChange("assume_role_policy") { + assumeRolePolicyInput := &iam.UpdateAssumeRolePolicyInput{ + RoleName: aws.String(d.Id()), + PolicyDocument: aws.String(d.Get("assume_role_policy").(string)), + } + _, err := iamconn.UpdateAssumeRolePolicy(assumeRolePolicyInput) + if err != nil { + if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { + d.SetId("") + return nil + } + return fmt.Errorf("Error Updating IAM Role (%s) Assume Role Policy: %s", d.Id(), err) + } + } + + if d.HasChange("description") { + roleDescriptionInput := &iam.UpdateRoleDescriptionInput{ + RoleName: aws.String(d.Id()), + Description: aws.String(d.Get("description").(string)), + } + _, err := iamconn.UpdateRoleDescription(roleDescriptionInput) + if err != nil { + if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { + d.SetId("") + return nil + } + return fmt.Errorf("Error Updating IAM Role (%s) Assume Role Policy: %s", d.Id(), err) + } + } + + return nil +} + +func resourceAwsIamRoleDelete(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + // Roles cannot be destroyed when attached to an existing Instance Profile + resp, err := iamconn.ListInstanceProfilesForRole(&iam.ListInstanceProfilesForRoleInput{ + RoleName: aws.String(d.Id()), + }) + if err != nil { + return fmt.Errorf("Error listing Profiles for IAM Role (%s) when trying to delete: %s", d.Id(), err) + } + + // Loop and remove this Role from any Profiles + if len(resp.InstanceProfiles) > 0 { + for _, i := range resp.InstanceProfiles { + _, err := iamconn.RemoveRoleFromInstanceProfile(&iam.RemoveRoleFromInstanceProfileInput{ + InstanceProfileName: i.InstanceProfileName, + RoleName: aws.String(d.Id()), + }) + if err != nil { + return fmt.Errorf("Error deleting IAM Role %s: %s", d.Id(), err) + } + } + } + + request := &iam.DeleteRoleInput{ + RoleName: aws.String(d.Id()), + } + + // IAM is eventually consistent and deletion of attached policies may take time + return resource.Retry(30*time.Second, func() *resource.RetryError { + _, err := iamconn.DeleteRole(request) + if err != nil { + awsErr, ok := err.(awserr.Error) + if ok && awsErr.Code() == "DeleteConflict" { + return resource.RetryableError(err) + } + + return resource.NonRetryableError(fmt.Errorf("Error deleting IAM Role %s: %s", d.Id(), err)) + } + return nil + }) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role_policy.go new file mode 100644 index 000000000..ec05a2259 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role_policy.go @@ -0,0 +1,152 @@ +package aws + +import ( + "fmt" + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamRolePolicy() *schema.Resource { + return &schema.Resource{ + // PutRolePolicy API is idempotent, so these can be the same. + Create: resourceAwsIamRolePolicyPut, + Update: resourceAwsIamRolePolicyPut, + + Read: resourceAwsIamRolePolicyRead, + Delete: resourceAwsIamRolePolicyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "policy": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateIAMPolicyJson, + DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: validateIamRolePolicyName, + }, + "name_prefix": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateIamRolePolicyNamePrefix, + }, + "role": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsIamRolePolicyPut(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + request := &iam.PutRolePolicyInput{ + RoleName: aws.String(d.Get("role").(string)), + PolicyDocument: aws.String(d.Get("policy").(string)), + } + + var policyName string + if v, ok := d.GetOk("name"); ok { + policyName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + policyName = resource.PrefixedUniqueId(v.(string)) + } else { + policyName = resource.UniqueId() + } + request.PolicyName = aws.String(policyName) + + if _, err := iamconn.PutRolePolicy(request); err != nil { + return fmt.Errorf("Error putting IAM role policy %s: %s", *request.PolicyName, err) + } + + d.SetId(fmt.Sprintf("%s:%s", *request.RoleName, *request.PolicyName)) + return nil +} + +func resourceAwsIamRolePolicyRead(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + role, name, err := resourceAwsIamRolePolicyParseId(d.Id()) + if err != nil { + return err + } + + request := &iam.GetRolePolicyInput{ + PolicyName: aws.String(name), + RoleName: aws.String(role), + } + + getResp, err := iamconn.GetRolePolicy(request) + if err != nil { + if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { // XXX test me + d.SetId("") + return nil + } + return fmt.Errorf("Error reading IAM policy %s from role %s: %s", name, role, err) + } + + if getResp.PolicyDocument == nil { + return fmt.Errorf("GetRolePolicy returned a nil policy document") + } + + policy, err := url.QueryUnescape(*getResp.PolicyDocument) + if err != nil { + return err + } + if err := d.Set("policy", policy); err != nil { + return err + } + if err := d.Set("name", name); err != nil { + return err + } + return d.Set("role", role) +} + +func resourceAwsIamRolePolicyDelete(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + role, name, err := resourceAwsIamRolePolicyParseId(d.Id()) + if err != nil { + return err + } + + request := &iam.DeleteRolePolicyInput{ + PolicyName: aws.String(name), + RoleName: aws.String(role), + } + + if _, err := iamconn.DeleteRolePolicy(request); err != nil { + return fmt.Errorf("Error deleting IAM role policy %s: %s", d.Id(), err) + } + return nil +} + +func resourceAwsIamRolePolicyParseId(id string) (roleName, policyName string, err error) { + parts := strings.SplitN(id, ":", 2) + if len(parts) != 2 { + err = fmt.Errorf("role_policy id must be of the form :") + return + } + + roleName = parts[0] + policyName = parts[1] + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role_policy_attachment.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role_policy_attachment.go new file mode 100644 index 000000000..bb72f879a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_role_policy_attachment.go @@ -0,0 +1,126 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamRolePolicyAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamRolePolicyAttachmentCreate, + Read: resourceAwsIamRolePolicyAttachmentRead, + Delete: resourceAwsIamRolePolicyAttachmentDelete, + + Schema: map[string]*schema.Schema{ + "role": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "policy_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsIamRolePolicyAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + + role := d.Get("role").(string) + arn := d.Get("policy_arn").(string) + + err := attachPolicyToRole(conn, role, arn) + if err != nil { + return fmt.Errorf("[WARN] Error attaching policy %s to IAM Role %s: %v", arn, role, err) + } + + d.SetId(resource.PrefixedUniqueId(fmt.Sprintf("%s-", role))) + return resourceAwsIamRolePolicyAttachmentRead(d, meta) +} + +func resourceAwsIamRolePolicyAttachmentRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + role := d.Get("role").(string) + arn := d.Get("policy_arn").(string) + + _, err := conn.GetRole(&iam.GetRoleInput{ + RoleName: aws.String(role), + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "NoSuchEntity" { + log.Printf("[WARN] No such entity found for Policy Attachment (%s)", role) + d.SetId("") + return nil + } + } + return err + } + + args := iam.ListAttachedRolePoliciesInput{ + RoleName: aws.String(role), + } + var policy string + err = conn.ListAttachedRolePoliciesPages(&args, func(page *iam.ListAttachedRolePoliciesOutput, lastPage bool) bool { + for _, p := range page.AttachedPolicies { + if *p.PolicyArn == arn { + policy = *p.PolicyArn + } + } + + return policy == "" + }) + if err != nil { + return err + } + if policy == "" { + log.Printf("[WARN] No such policy found for Role Policy Attachment (%s)", role) + d.SetId("") + } + + return nil +} + +func resourceAwsIamRolePolicyAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + role := d.Get("role").(string) + arn := d.Get("policy_arn").(string) + + err := detachPolicyFromRole(conn, role, arn) + if err != nil { + return fmt.Errorf("[WARN] Error removing policy %s from IAM Role %s: %v", arn, role, err) + } + return nil +} + +func attachPolicyToRole(conn *iam.IAM, role string, arn string) error { + _, err := conn.AttachRolePolicy(&iam.AttachRolePolicyInput{ + RoleName: aws.String(role), + PolicyArn: aws.String(arn), + }) + if err != nil { + return err + } + return nil +} + +func detachPolicyFromRole(conn *iam.IAM, role string, arn string) error { + _, err := conn.DetachRolePolicy(&iam.DetachRolePolicyInput{ + RoleName: aws.String(role), + PolicyArn: aws.String(arn), + }) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_saml_provider.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_saml_provider.go new file mode 100644 index 000000000..55496b415 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_saml_provider.go @@ -0,0 +1,130 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamSamlProvider() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamSamlProviderCreate, + Read: resourceAwsIamSamlProviderRead, + Update: resourceAwsIamSamlProviderUpdate, + Delete: resourceAwsIamSamlProviderDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "valid_until": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "saml_metadata_document": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceAwsIamSamlProviderCreate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.CreateSAMLProviderInput{ + Name: aws.String(d.Get("name").(string)), + SAMLMetadataDocument: aws.String(d.Get("saml_metadata_document").(string)), + } + + out, err := iamconn.CreateSAMLProvider(input) + if err != nil { + return err + } + + d.SetId(*out.SAMLProviderArn) + + return resourceAwsIamSamlProviderRead(d, meta) +} + +func resourceAwsIamSamlProviderRead(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.GetSAMLProviderInput{ + SAMLProviderArn: aws.String(d.Id()), + } + out, err := iamconn.GetSAMLProvider(input) + if err != nil { + if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { + log.Printf("[WARN] IAM SAML Provider %q not found.", d.Id()) + d.SetId("") + return nil + } + return err + } + + validUntil := out.ValidUntil.Format(time.RFC1123) + d.Set("arn", d.Id()) + name, err := extractNameFromIAMSamlProviderArn(d.Id(), meta.(*AWSClient).partition) + if err != nil { + return err + } + d.Set("name", name) + d.Set("valid_until", validUntil) + d.Set("saml_metadata_document", *out.SAMLMetadataDocument) + + return nil +} + +func resourceAwsIamSamlProviderUpdate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.UpdateSAMLProviderInput{ + SAMLProviderArn: aws.String(d.Id()), + SAMLMetadataDocument: aws.String(d.Get("saml_metadata_document").(string)), + } + _, err := iamconn.UpdateSAMLProvider(input) + if err != nil { + return err + } + + return resourceAwsIamSamlProviderRead(d, meta) +} + +func resourceAwsIamSamlProviderDelete(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.DeleteSAMLProviderInput{ + SAMLProviderArn: aws.String(d.Id()), + } + _, err := iamconn.DeleteSAMLProvider(input) + + return err +} + +func extractNameFromIAMSamlProviderArn(arn, partition string) (string, error) { + // arn:aws:iam::123456789012:saml-provider/tf-salesforce-test + r := regexp.MustCompile(fmt.Sprintf("^arn:%s:iam::[0-9]{12}:saml-provider/(.+)$", partition)) + submatches := r.FindStringSubmatch(arn) + if len(submatches) != 2 { + return "", fmt.Errorf("Unable to extract name from a given ARN: %q", arn) + } + return submatches[1], nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_server_certificate.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_server_certificate.go new file mode 100644 index 000000000..1395d3a0a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_server_certificate.go @@ -0,0 +1,243 @@ +package aws + +import ( + "crypto/sha1" + "encoding/hex" + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIAMServerCertificate() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIAMServerCertificateCreate, + Read: resourceAwsIAMServerCertificateRead, + Delete: resourceAwsIAMServerCertificateDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsIAMServerCertificateImport, + }, + + Schema: map[string]*schema.Schema{ + "certificate_body": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: normalizeCert, + }, + + "certificate_chain": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + StateFunc: normalizeCert, + }, + + "path": { + Type: schema.TypeString, + Optional: true, + Default: "/", + ForceNew: true, + }, + + "private_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: normalizeCert, + Sensitive: true, + }, + + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 128 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 128 characters", k)) + } + return + }, + }, + + "name_prefix": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 30 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 30 characters, name is limited to 128", k)) + } + return + }, + }, + + "arn": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceAwsIAMServerCertificateCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + + var sslCertName string + if v, ok := d.GetOk("name"); ok { + sslCertName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + sslCertName = resource.PrefixedUniqueId(v.(string)) + } else { + sslCertName = resource.UniqueId() + } + + createOpts := &iam.UploadServerCertificateInput{ + CertificateBody: aws.String(d.Get("certificate_body").(string)), + PrivateKey: aws.String(d.Get("private_key").(string)), + ServerCertificateName: aws.String(sslCertName), + } + + if v, ok := d.GetOk("certificate_chain"); ok { + createOpts.CertificateChain = aws.String(v.(string)) + } + + if v, ok := d.GetOk("path"); ok { + createOpts.Path = aws.String(v.(string)) + } + + log.Printf("[DEBUG] Creating IAM Server Certificate with opts: %s", createOpts) + resp, err := conn.UploadServerCertificate(createOpts) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + return fmt.Errorf("[WARN] Error uploading server certificate, error: %s: %s", awsErr.Code(), awsErr.Message()) + } + return fmt.Errorf("[WARN] Error uploading server certificate, error: %s", err) + } + + d.SetId(*resp.ServerCertificateMetadata.ServerCertificateId) + d.Set("name", sslCertName) + + return resourceAwsIAMServerCertificateRead(d, meta) +} + +func resourceAwsIAMServerCertificateRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + resp, err := conn.GetServerCertificate(&iam.GetServerCertificateInput{ + ServerCertificateName: aws.String(d.Get("name").(string)), + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "NoSuchEntity" { + log.Printf("[WARN] IAM Server Cert (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("[WARN] Error reading IAM Server Certificate: %s: %s", awsErr.Code(), awsErr.Message()) + } + return fmt.Errorf("[WARN] Error reading IAM Server Certificate: %s", err) + } + + d.SetId(*resp.ServerCertificate.ServerCertificateMetadata.ServerCertificateId) + + // these values should always be present, and have a default if not set in + // configuration, and so safe to reference with nil checks + d.Set("certificate_body", normalizeCert(resp.ServerCertificate.CertificateBody)) + + c := normalizeCert(resp.ServerCertificate.CertificateChain) + if c != "" { + d.Set("certificate_chain", c) + } + + d.Set("path", resp.ServerCertificate.ServerCertificateMetadata.Path) + d.Set("arn", resp.ServerCertificate.ServerCertificateMetadata.Arn) + + return nil +} + +func resourceAwsIAMServerCertificateDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + log.Printf("[INFO] Deleting IAM Server Certificate: %s", d.Id()) + err := resource.Retry(10*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteServerCertificate(&iam.DeleteServerCertificateInput{ + ServerCertificateName: aws.String(d.Get("name").(string)), + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "DeleteConflict" && strings.Contains(awsErr.Message(), "currently in use by arn") { + log.Printf("[WARN] Conflict deleting server certificate: %s, retrying", awsErr.Message()) + return resource.RetryableError(err) + } + if awsErr.Code() == "NoSuchEntity" { + log.Printf("[WARN] IAM Server Certificate (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + } + return resource.NonRetryableError(err) + } + return nil + }) + + if err != nil { + return err + } + + d.SetId("") + return nil +} + +func resourceAwsIAMServerCertificateImport( + d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("name", d.Id()) + // private_key can't be fetched from any API call + return []*schema.ResourceData{d}, nil +} + +func normalizeCert(cert interface{}) string { + if cert == nil || cert == (*string)(nil) { + return "" + } + + var rawCert string + switch cert.(type) { + case string: + rawCert = cert.(string) + case *string: + rawCert = *cert.(*string) + default: + return "" + } + + cleanVal := sha1.Sum(stripCR([]byte(strings.TrimSpace(rawCert)))) + return hex.EncodeToString(cleanVal[:]) +} + +// strip CRs from raw literals. Lifted from go/scanner/scanner.go +// See https://github.com/golang/go/blob/release-branch.go1.6/src/go/scanner/scanner.go#L479 +func stripCR(b []byte) []byte { + c := make([]byte, len(b)) + i := 0 + for _, ch := range b { + if ch != '\r' { + c[i] = ch + i++ + } + } + return c[:i] +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user.go new file mode 100644 index 000000000..e2ebdd736 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user.go @@ -0,0 +1,249 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamUser() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamUserCreate, + Read: resourceAwsIamUserRead, + Update: resourceAwsIamUserUpdate, + Delete: resourceAwsIamUserDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + /* + The UniqueID could be used as the Id(), but none of the API + calls allow specifying a user by the UniqueID: they require the + name. The only way to locate a user by UniqueID is to list them + all and that would make this provider unnecessarily complex + and inefficient. Still, there are other reasons one might want + the UniqueID, so we can make it available. + */ + "unique_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAwsIamUserName, + }, + "path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "/", + ForceNew: true, + }, + "force_destroy": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Delete user even if it has non-Terraform-managed IAM access keys, login profile or MFA devices", + }, + }, + } +} + +func resourceAwsIamUserCreate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + name := d.Get("name").(string) + path := d.Get("path").(string) + + request := &iam.CreateUserInput{ + Path: aws.String(path), + UserName: aws.String(name), + } + + log.Println("[DEBUG] Create IAM User request:", request) + createResp, err := iamconn.CreateUser(request) + if err != nil { + return fmt.Errorf("Error creating IAM User %s: %s", name, err) + } + d.SetId(*createResp.User.UserName) + return resourceAwsIamUserReadResult(d, createResp.User) +} + +func resourceAwsIamUserRead(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + request := &iam.GetUserInput{ + UserName: aws.String(d.Id()), + } + + getResp, err := iamconn.GetUser(request) + if err != nil { + if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { // XXX test me + log.Printf("[WARN] No IAM user by name (%s) found", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("Error reading IAM User %s: %s", d.Id(), err) + } + return resourceAwsIamUserReadResult(d, getResp.User) +} + +func resourceAwsIamUserReadResult(d *schema.ResourceData, user *iam.User) error { + if err := d.Set("name", user.UserName); err != nil { + return err + } + if err := d.Set("arn", user.Arn); err != nil { + return err + } + if err := d.Set("path", user.Path); err != nil { + return err + } + if err := d.Set("unique_id", user.UserId); err != nil { + return err + } + return nil +} + +func resourceAwsIamUserUpdate(d *schema.ResourceData, meta interface{}) error { + if d.HasChange("name") || d.HasChange("path") { + iamconn := meta.(*AWSClient).iamconn + on, nn := d.GetChange("name") + _, np := d.GetChange("path") + + request := &iam.UpdateUserInput{ + UserName: aws.String(on.(string)), + NewUserName: aws.String(nn.(string)), + NewPath: aws.String(np.(string)), + } + + log.Println("[DEBUG] Update IAM User request:", request) + _, err := iamconn.UpdateUser(request) + if err != nil { + if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { + log.Printf("[WARN] No IAM user by name (%s) found", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("Error updating IAM User %s: %s", d.Id(), err) + } + return resourceAwsIamUserRead(d, meta) + } + return nil +} + +func resourceAwsIamUserDelete(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + // IAM Users must be removed from all groups before they can be deleted + var groups []string + listGroups := &iam.ListGroupsForUserInput{ + UserName: aws.String(d.Id()), + } + pageOfGroups := func(page *iam.ListGroupsForUserOutput, lastPage bool) (shouldContinue bool) { + for _, g := range page.Groups { + groups = append(groups, *g.GroupName) + } + return !lastPage + } + err := iamconn.ListGroupsForUserPages(listGroups, pageOfGroups) + if err != nil { + return fmt.Errorf("Error removing user %q from all groups: %s", d.Id(), err) + } + for _, g := range groups { + // use iam group membership func to remove user from all groups + log.Printf("[DEBUG] Removing IAM User %s from IAM Group %s", d.Id(), g) + if err := removeUsersFromGroup(iamconn, []*string{aws.String(d.Id())}, g); err != nil { + return err + } + } + + // All access keys, MFA devices and login profile for the user must be removed + if d.Get("force_destroy").(bool) { + var accessKeys []string + listAccessKeys := &iam.ListAccessKeysInput{ + UserName: aws.String(d.Id()), + } + pageOfAccessKeys := func(page *iam.ListAccessKeysOutput, lastPage bool) (shouldContinue bool) { + for _, k := range page.AccessKeyMetadata { + accessKeys = append(accessKeys, *k.AccessKeyId) + } + return !lastPage + } + err = iamconn.ListAccessKeysPages(listAccessKeys, pageOfAccessKeys) + if err != nil { + return fmt.Errorf("Error removing access keys of user %s: %s", d.Id(), err) + } + for _, k := range accessKeys { + _, err := iamconn.DeleteAccessKey(&iam.DeleteAccessKeyInput{ + UserName: aws.String(d.Id()), + AccessKeyId: aws.String(k), + }) + if err != nil { + return fmt.Errorf("Error deleting access key %s: %s", k, err) + } + } + + var MFADevices []string + listMFADevices := &iam.ListMFADevicesInput{ + UserName: aws.String(d.Id()), + } + pageOfMFADevices := func(page *iam.ListMFADevicesOutput, lastPage bool) (shouldContinue bool) { + for _, m := range page.MFADevices { + MFADevices = append(MFADevices, *m.SerialNumber) + } + return !lastPage + } + err = iamconn.ListMFADevicesPages(listMFADevices, pageOfMFADevices) + if err != nil { + return fmt.Errorf("Error removing MFA devices of user %s: %s", d.Id(), err) + } + for _, m := range MFADevices { + _, err := iamconn.DeactivateMFADevice(&iam.DeactivateMFADeviceInput{ + UserName: aws.String(d.Id()), + SerialNumber: aws.String(m), + }) + if err != nil { + return fmt.Errorf("Error deactivating MFA device %s: %s", m, err) + } + } + + _, err = iamconn.DeleteLoginProfile(&iam.DeleteLoginProfileInput{ + UserName: aws.String(d.Id()), + }) + if err != nil { + if iamerr, ok := err.(awserr.Error); !ok || iamerr.Code() != "NoSuchEntity" { + return fmt.Errorf("Error deleting Account Login Profile: %s", err) + } + } + } + + request := &iam.DeleteUserInput{ + UserName: aws.String(d.Id()), + } + + log.Println("[DEBUG] Delete IAM User request:", request) + if _, err := iamconn.DeleteUser(request); err != nil { + return fmt.Errorf("Error deleting IAM User %s: %s", d.Id(), err) + } + return nil +} + +func validateAwsIamUserName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9A-Za-z=,.@\-_+]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only alphanumeric characters, hyphens, underscores, commas, periods, @ symbols, plus and equals signs allowed in %q: %q", + k, value)) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_login_profile.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_login_profile.go new file mode 100644 index 000000000..c60b67fee --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_login_profile.go @@ -0,0 +1,158 @@ +package aws + +import ( + "errors" + "fmt" + "log" + "math/rand" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/encryption" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamUserLoginProfile() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamUserLoginProfileCreate, + Read: schema.Noop, + Update: schema.Noop, + Delete: schema.RemoveFromState, + + Schema: map[string]*schema.Schema{ + "user": { + Type: schema.TypeString, + Required: true, + }, + "pgp_key": { + Type: schema.TypeString, + Required: true, + }, + "password_reset_required": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "password_length": { + Type: schema.TypeInt, + Optional: true, + Default: 20, + ValidateFunc: validateAwsIamLoginProfilePasswordLength, + }, + + "key_fingerprint": { + Type: schema.TypeString, + Computed: true, + }, + "encrypted_password": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func validateAwsIamLoginProfilePasswordLength(v interface{}, _ string) (_ []string, es []error) { + length := v.(int) + if length < 4 { + es = append(es, errors.New("minimum password_length is 4 characters")) + } + if length > 128 { + es = append(es, errors.New("maximum password_length is 128 characters")) + } + return +} + +// generatePassword generates a random password of a given length using +// characters that are likely to satisfy any possible AWS password policy +// (given sufficient length). +func generatePassword(length int) string { + charsets := []string{ + "abcdefghijklmnopqrstuvwxyz", + "ABCDEFGHIJKLMNOPQRSTUVWXYZ", + "012346789", + "!@#$%^&*()_+-=[]{}|'", + } + + // Use all character sets + random := rand.New(rand.NewSource(time.Now().UTC().UnixNano())) + components := make(map[int]byte, length) + for i := 0; i < length; i++ { + charset := charsets[i%len(charsets)] + components[i] = charset[random.Intn(len(charset))] + } + + // Randomise the ordering so we don't end up with a predictable + // lower case, upper case, numeric, symbol pattern + result := make([]byte, length) + i := 0 + for _, b := range components { + result[i] = b + i = i + 1 + } + + return string(result) +} + +func resourceAwsIamUserLoginProfileCreate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + encryptionKey, err := encryption.RetrieveGPGKey(d.Get("pgp_key").(string)) + if err != nil { + return err + } + + username := d.Get("user").(string) + passwordResetRequired := d.Get("password_reset_required").(bool) + passwordLength := d.Get("password_length").(int) + + _, err = iamconn.GetLoginProfile(&iam.GetLoginProfileInput{ + UserName: aws.String(username), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "NoSuchEntity" { + // If there is already a login profile, bring it under management (to prevent + // resource creation diffs) - we will never modify it, but obviously cannot + // set the password. + d.SetId(username) + d.Set("key_fingerprint", "") + d.Set("encrypted_password", "") + return nil + } + } + + initialPassword := generatePassword(passwordLength) + fingerprint, encrypted, err := encryption.EncryptValue(encryptionKey, initialPassword, "Password") + if err != nil { + return err + } + + request := &iam.CreateLoginProfileInput{ + UserName: aws.String(username), + Password: aws.String(initialPassword), + PasswordResetRequired: aws.Bool(passwordResetRequired), + } + + log.Println("[DEBUG] Create IAM User Login Profile request:", request) + createResp, err := iamconn.CreateLoginProfile(request) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "EntityAlreadyExists" { + // If there is already a login profile, bring it under management (to prevent + // resource creation diffs) - we will never modify it, but obviously cannot + // set the password. + d.SetId(username) + d.Set("key_fingerprint", "") + d.Set("encrypted_password", "") + return nil + } + return errwrap.Wrapf(fmt.Sprintf("Error creating IAM User Login Profile for %q: {{err}}", username), err) + } + + d.SetId(*createResp.LoginProfile.UserName) + d.Set("key_fingerprint", fingerprint) + d.Set("encrypted_password", encrypted) + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_policy.go new file mode 100644 index 000000000..8c835519f --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_policy.go @@ -0,0 +1,129 @@ +package aws + +import ( + "fmt" + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamUserPolicy() *schema.Resource { + return &schema.Resource{ + // PutUserPolicy API is idempotent, so these can be the same. + Create: resourceAwsIamUserPolicyPut, + Update: resourceAwsIamUserPolicyPut, + + Read: resourceAwsIamUserPolicyRead, + Delete: resourceAwsIamUserPolicyDelete, + + Schema: map[string]*schema.Schema{ + "policy": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + }, + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "user": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsIamUserPolicyPut(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + request := &iam.PutUserPolicyInput{ + UserName: aws.String(d.Get("user").(string)), + PolicyDocument: aws.String(d.Get("policy").(string)), + } + + var policyName string + if v, ok := d.GetOk("name"); ok { + policyName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + policyName = resource.PrefixedUniqueId(v.(string)) + } else { + policyName = resource.UniqueId() + } + request.PolicyName = aws.String(policyName) + + if _, err := iamconn.PutUserPolicy(request); err != nil { + return fmt.Errorf("Error putting IAM user policy %s: %s", *request.PolicyName, err) + } + + d.SetId(fmt.Sprintf("%s:%s", *request.UserName, *request.PolicyName)) + return nil +} + +func resourceAwsIamUserPolicyRead(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + user, name := resourceAwsIamUserPolicyParseId(d.Id()) + + request := &iam.GetUserPolicyInput{ + PolicyName: aws.String(name), + UserName: aws.String(user), + } + + var err error + getResp, err := iamconn.GetUserPolicy(request) + if err != nil { + if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { // XXX test me + d.SetId("") + return nil + } + return fmt.Errorf("Error reading IAM policy %s from user %s: %s", name, user, err) + } + + if getResp.PolicyDocument == nil { + return fmt.Errorf("GetUserPolicy returned a nil policy document") + } + + policy, err := url.QueryUnescape(*getResp.PolicyDocument) + if err != nil { + return err + } + return d.Set("policy", policy) +} + +func resourceAwsIamUserPolicyDelete(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + user, name := resourceAwsIamUserPolicyParseId(d.Id()) + + request := &iam.DeleteUserPolicyInput{ + PolicyName: aws.String(name), + UserName: aws.String(user), + } + + if _, err := iamconn.DeleteUserPolicy(request); err != nil { + return fmt.Errorf("Error deleting IAM user policy %s: %s", d.Id(), err) + } + return nil +} + +func resourceAwsIamUserPolicyParseId(id string) (userName, policyName string) { + parts := strings.SplitN(id, ":", 2) + userName = parts[0] + policyName = parts[1] + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_policy_attachment.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_policy_attachment.go new file mode 100644 index 000000000..15f7e8779 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_policy_attachment.go @@ -0,0 +1,123 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamUserPolicyAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamUserPolicyAttachmentCreate, + Read: resourceAwsIamUserPolicyAttachmentRead, + Delete: resourceAwsIamUserPolicyAttachmentDelete, + + Schema: map[string]*schema.Schema{ + "user": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "policy_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsIamUserPolicyAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + + user := d.Get("user").(string) + arn := d.Get("policy_arn").(string) + + err := attachPolicyToUser(conn, user, arn) + if err != nil { + return fmt.Errorf("[WARN] Error attaching policy %s to IAM User %s: %v", arn, user, err) + } + + d.SetId(resource.PrefixedUniqueId(fmt.Sprintf("%s-", user))) + return resourceAwsIamUserPolicyAttachmentRead(d, meta) +} + +func resourceAwsIamUserPolicyAttachmentRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + user := d.Get("user").(string) + arn := d.Get("policy_arn").(string) + + _, err := conn.GetUser(&iam.GetUserInput{ + UserName: aws.String(user), + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "NoSuchEntity" { + log.Printf("[WARN] No such entity found for Policy Attachment (%s)", user) + d.SetId("") + return nil + } + } + return err + } + + attachedPolicies, err := conn.ListAttachedUserPolicies(&iam.ListAttachedUserPoliciesInput{ + UserName: aws.String(user), + }) + if err != nil { + return err + } + + var policy string + for _, p := range attachedPolicies.AttachedPolicies { + if *p.PolicyArn == arn { + policy = *p.PolicyArn + } + } + + if policy == "" { + log.Printf("[WARN] No such User found for Policy Attachment (%s)", user) + d.SetId("") + } + return nil +} + +func resourceAwsIamUserPolicyAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).iamconn + user := d.Get("user").(string) + arn := d.Get("policy_arn").(string) + + err := detachPolicyFromUser(conn, user, arn) + if err != nil { + return fmt.Errorf("[WARN] Error removing policy %s from IAM User %s: %v", arn, user, err) + } + return nil +} + +func attachPolicyToUser(conn *iam.IAM, user string, arn string) error { + _, err := conn.AttachUserPolicy(&iam.AttachUserPolicyInput{ + UserName: aws.String(user), + PolicyArn: aws.String(arn), + }) + if err != nil { + return err + } + return nil +} + +func detachPolicyFromUser(conn *iam.IAM, user string, arn string) error { + _, err := conn.DetachUserPolicy(&iam.DetachUserPolicyInput{ + UserName: aws.String(user), + PolicyArn: aws.String(arn), + }) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_ssh_key.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_ssh_key.go new file mode 100644 index 000000000..646eea6aa --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_iam_user_ssh_key.go @@ -0,0 +1,153 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamUserSshKey() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamUserSshKeyCreate, + Read: resourceAwsIamUserSshKeyRead, + Update: resourceAwsIamUserSshKeyUpdate, + Delete: resourceAwsIamUserSshKeyDelete, + + Schema: map[string]*schema.Schema{ + "ssh_public_key_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "username": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "public_key": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "encoding": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validateIamUserSSHKeyEncoding, + }, + + "status": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceAwsIamUserSshKeyCreate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + username := d.Get("username").(string) + publicKey := d.Get("public_key").(string) + + request := &iam.UploadSSHPublicKeyInput{ + UserName: aws.String(username), + SSHPublicKeyBody: aws.String(publicKey), + } + + log.Println("[DEBUG] Create IAM User SSH Key Request:", request) + createResp, err := iamconn.UploadSSHPublicKey(request) + if err != nil { + return fmt.Errorf("Error creating IAM User SSH Key %s: %s", username, err) + } + + d.Set("ssh_public_key_id", createResp.SSHPublicKey.SSHPublicKeyId) + d.SetId(*createResp.SSHPublicKey.SSHPublicKeyId) + + return resourceAwsIamUserSshKeyRead(d, meta) +} + +func resourceAwsIamUserSshKeyRead(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + username := d.Get("username").(string) + request := &iam.GetSSHPublicKeyInput{ + UserName: aws.String(username), + SSHPublicKeyId: aws.String(d.Id()), + Encoding: aws.String(d.Get("encoding").(string)), + } + + getResp, err := iamconn.GetSSHPublicKey(request) + if err != nil { + if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { // XXX test me + log.Printf("[WARN] No IAM user ssh key (%s) found", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("Error reading IAM User SSH Key %s: %s", d.Id(), err) + } + + d.Set("fingerprint", getResp.SSHPublicKey.Fingerprint) + d.Set("status", getResp.SSHPublicKey.Status) + + return nil +} + +func resourceAwsIamUserSshKeyUpdate(d *schema.ResourceData, meta interface{}) error { + if d.HasChange("status") { + iamconn := meta.(*AWSClient).iamconn + + request := &iam.UpdateSSHPublicKeyInput{ + UserName: aws.String(d.Get("username").(string)), + SSHPublicKeyId: aws.String(d.Id()), + Status: aws.String(d.Get("status").(string)), + } + + log.Println("[DEBUG] Update IAM User SSH Key request:", request) + _, err := iamconn.UpdateSSHPublicKey(request) + if err != nil { + if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { + log.Printf("[WARN] No IAM user ssh key by ID (%s) found", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("Error updating IAM User SSH Key %s: %s", d.Id(), err) + } + return resourceAwsIamUserRead(d, meta) + } + return nil +} + +func resourceAwsIamUserSshKeyDelete(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + request := &iam.DeleteSSHPublicKeyInput{ + UserName: aws.String(d.Get("username").(string)), + SSHPublicKeyId: aws.String(d.Id()), + } + + log.Println("[DEBUG] Delete IAM User SSH Key request:", request) + if _, err := iamconn.DeleteSSHPublicKey(request); err != nil { + return fmt.Errorf("Error deleting IAM User SSH Key %s: %s", d.Id(), err) + } + return nil +} + +func validateIamUserSSHKeyEncoding(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + encodingTypes := map[string]bool{ + "PEM": true, + "SSH": true, + } + + if !encodingTypes[value] { + errors = append(errors, fmt.Errorf("IAM User SSH Key Encoding can only be PEM or SSH")) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_assessment_target.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_assessment_target.go new file mode 100644 index 000000000..b6c2c80c0 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_assessment_target.go @@ -0,0 +1,123 @@ +package aws + +import ( + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/inspector" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAWSInspectorAssessmentTarget() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsInspectorAssessmentTargetCreate, + Read: resourceAwsInspectorAssessmentTargetRead, + Update: resourceAwsInspectorAssessmentTargetUpdate, + Delete: resourceAwsInspectorAssessmentTargetDelete, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "resource_group_arn": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceAwsInspectorAssessmentTargetCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).inspectorconn + + targetName := d.Get("name").(string) + resourceGroupArn := d.Get("resource_group_arn").(string) + + resp, err := conn.CreateAssessmentTarget(&inspector.CreateAssessmentTargetInput{ + AssessmentTargetName: aws.String(targetName), + ResourceGroupArn: aws.String(resourceGroupArn), + }) + if err != nil { + return err + } + log.Printf("[DEBUG] Inspector Assessment %s created", *resp.AssessmentTargetArn) + + d.Set("arn", resp.AssessmentTargetArn) + d.SetId(*resp.AssessmentTargetArn) + + return resourceAwsInspectorAssessmentTargetRead(d, meta) +} + +func resourceAwsInspectorAssessmentTargetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).inspectorconn + + resp, err := conn.DescribeAssessmentTargets(&inspector.DescribeAssessmentTargetsInput{ + AssessmentTargetArns: []*string{ + aws.String(d.Id()), + }, + }) + + if err != nil { + if inspectorerr, ok := err.(awserr.Error); ok && inspectorerr.Code() == "InvalidInputException" { + return nil + } else { + log.Printf("[ERROR] Error finding Inspector Assessment Target: %s", err) + return err + } + } + + if resp.AssessmentTargets != nil && len(resp.AssessmentTargets) > 0 { + d.Set("name", resp.AssessmentTargets[0].Name) + } + + return nil +} + +func resourceAwsInspectorAssessmentTargetUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).inspectorconn + + input := inspector.UpdateAssessmentTargetInput{ + AssessmentTargetArn: aws.String(d.Id()), + AssessmentTargetName: aws.String(d.Get("name").(string)), + ResourceGroupArn: aws.String(d.Get("resource_group_arn").(string)), + } + + _, err := conn.UpdateAssessmentTarget(&input) + if err != nil { + return err + } + + log.Println("[DEBUG] Inspector Assessment Target updated") + + return resourceAwsInspectorAssessmentTargetRead(d, meta) +} + +func resourceAwsInspectorAssessmentTargetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).inspectorconn + + return resource.Retry(60*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteAssessmentTarget(&inspector.DeleteAssessmentTargetInput{ + AssessmentTargetArn: aws.String(d.Id()), + }) + if err != nil { + if inspectorerr, ok := err.(awserr.Error); ok && inspectorerr.Code() == "AssessmentRunInProgressException" { + log.Printf("[ERROR] Assement Run in progress: %s", err) + return resource.RetryableError(err) + } else { + log.Printf("[ERROR] Error deleting Assement Target: %s", err) + return resource.NonRetryableError(err) + } + } + return nil + }) + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_assessment_template.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_assessment_template.go new file mode 100644 index 000000000..4856a000c --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_assessment_template.go @@ -0,0 +1,121 @@ +package aws + +import ( + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/inspector" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAWSInspectorAssessmentTemplate() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsInspectorAssessmentTemplateCreate, + Read: resourceAwsInspectorAssessmentTemplateRead, + Delete: resourceAwsInspectorAssessmentTemplateDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "target_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + ForceNew: true, + }, + "duration": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "rules_package_arns": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsInspectorAssessmentTemplateCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).inspectorconn + + rules := []*string{} + if attr := d.Get("rules_package_arns").(*schema.Set); attr.Len() > 0 { + rules = expandStringList(attr.List()) + } + + targetArn := d.Get("target_arn").(string) + templateName := d.Get("name").(string) + duration := int64(d.Get("duration").(int)) + + resp, err := conn.CreateAssessmentTemplate(&inspector.CreateAssessmentTemplateInput{ + AssessmentTargetArn: aws.String(targetArn), + AssessmentTemplateName: aws.String(templateName), + DurationInSeconds: aws.Int64(duration), + RulesPackageArns: rules, + }) + if err != nil { + return err + } + log.Printf("[DEBUG] Inspector Assessment Template %s created", *resp.AssessmentTemplateArn) + + d.Set("arn", resp.AssessmentTemplateArn) + + d.SetId(*resp.AssessmentTemplateArn) + + return resourceAwsInspectorAssessmentTemplateRead(d, meta) +} + +func resourceAwsInspectorAssessmentTemplateRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).inspectorconn + + resp, err := conn.DescribeAssessmentTemplates(&inspector.DescribeAssessmentTemplatesInput{ + AssessmentTemplateArns: []*string{ + aws.String(d.Id()), + }, + }, + ) + if err != nil { + if inspectorerr, ok := err.(awserr.Error); ok && inspectorerr.Code() == "InvalidInputException" { + return nil + } else { + log.Printf("[ERROR] Error finding Inspector Assessment Template: %s", err) + return err + } + } + + if resp.AssessmentTemplates != nil && len(resp.AssessmentTemplates) > 0 { + d.Set("name", resp.AssessmentTemplates[0].Name) + } + return nil +} + +func resourceAwsInspectorAssessmentTemplateDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).inspectorconn + + _, err := conn.DeleteAssessmentTemplate(&inspector.DeleteAssessmentTemplateInput{ + AssessmentTemplateArn: aws.String(d.Id()), + }) + if err != nil { + if inspectorerr, ok := err.(awserr.Error); ok && inspectorerr.Code() == "AssessmentRunInProgressException" { + log.Printf("[ERROR] Assement Run in progress: %s", err) + return err + } else { + log.Printf("[ERROR] Error deleting Assement Template: %s", err) + return err + } + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_resource_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_resource_group.go new file mode 100644 index 000000000..55f56696c --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_inspector_resource_group.go @@ -0,0 +1,76 @@ +package aws + +import ( + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/inspector" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAWSInspectorResourceGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsInspectorResourceGroupCreate, + Read: resourceAwsInspectorResourceGroupRead, + Delete: resourceAwsInspectorResourceGroupDelete, + + Schema: map[string]*schema.Schema{ + "tags": &schema.Schema{ + ForceNew: true, + Type: schema.TypeMap, + Required: true, + }, + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsInspectorResourceGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).inspectorconn + + resp, err := conn.CreateResourceGroup(&inspector.CreateResourceGroupInput{ + ResourceGroupTags: tagsFromMapInspector(d.Get("tags").(map[string]interface{})), + }) + + if err != nil { + return err + } + + d.Set("arn", *resp.ResourceGroupArn) + + d.SetId(*resp.ResourceGroupArn) + + return resourceAwsInspectorResourceGroupRead(d, meta) +} + +func resourceAwsInspectorResourceGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).inspectorconn + + _, err := conn.DescribeResourceGroups(&inspector.DescribeResourceGroupsInput{ + ResourceGroupArns: []*string{ + aws.String(d.Id()), + }, + }) + + if err != nil { + if inspectorerr, ok := err.(awserr.Error); ok && inspectorerr.Code() == "InvalidInputException" { + return nil + } else { + log.Printf("[ERROR] Error finding Inspector resource group: %s", err) + return err + } + } + + return nil +} + +func resourceAwsInspectorResourceGroupDelete(d *schema.ResourceData, meta interface{}) error { + d.Set("arn", "") + d.SetId("") + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_instance.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_instance.go new file mode 100644 index 000000000..400aaf2e0 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_instance.go @@ -0,0 +1,1691 @@ +package aws + +import ( + "bytes" + "crypto/sha1" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsInstanceCreate, + Read: resourceAwsInstanceRead, + Update: resourceAwsInstanceUpdate, + Delete: resourceAwsInstanceDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + SchemaVersion: 1, + MigrateState: resourceAwsInstanceMigrateState, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "ami": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "associate_public_ip_address": { + Type: schema.TypeBool, + ForceNew: true, + Computed: true, + Optional: true, + }, + + "availability_zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "placement_group": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "instance_type": { + Type: schema.TypeString, + Required: true, + }, + + "key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "subnet_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "private_ip": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "source_dest_check": { + Type: schema.TypeBool, + Optional: true, + Default: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // Suppress diff if network_interface is set + _, ok := d.GetOk("network_interface") + return ok + }, + }, + + "user_data": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + StateFunc: func(v interface{}) string { + switch v.(type) { + case string: + return userDataHashSum(v.(string)) + default: + return "" + } + }, + }, + + "security_groups": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "vpc_security_group_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "public_dns": { + Type: schema.TypeString, + Computed: true, + }, + + // TODO: Deprecate me v0.10.0 + "network_interface_id": { + Type: schema.TypeString, + Computed: true, + Deprecated: "Please use `primary_network_interface_id` instead", + }, + + "primary_network_interface_id": { + Type: schema.TypeString, + Computed: true, + }, + + "network_interface": { + ConflictsWith: []string{"associate_public_ip_address", "subnet_id", "private_ip", "vpc_security_group_ids", "security_groups", "ipv6_addresses", "ipv6_address_count", "source_dest_check"}, + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delete_on_termination": { + Type: schema.TypeBool, + Default: false, + Optional: true, + ForceNew: true, + }, + "network_interface_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "device_index": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + }, + }, + }, + + "public_ip": { + Type: schema.TypeString, + Computed: true, + }, + + "instance_state": { + Type: schema.TypeString, + Computed: true, + }, + + "private_dns": { + Type: schema.TypeString, + Computed: true, + }, + + "ebs_optimized": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + + "disable_api_termination": { + Type: schema.TypeBool, + Optional: true, + }, + + "instance_initiated_shutdown_behavior": { + Type: schema.TypeString, + Optional: true, + }, + + "monitoring": { + Type: schema.TypeBool, + Optional: true, + }, + + "iam_instance_profile": { + Type: schema.TypeString, + Optional: true, + }, + + "ipv6_address_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "ipv6_addresses": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "tenancy": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "tags": tagsSchema(), + + "volume_tags": tagsSchemaComputed(), + + "block_device": { + Type: schema.TypeMap, + Optional: true, + Removed: "Split out into three sub-types; see Changelog and Docs", + }, + + "ebs_block_device": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delete_on_termination": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "device_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "encrypted": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "iops": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "snapshot_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "volume_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "volume_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["snapshot_id"].(string))) + return hashcode.String(buf.String()) + }, + }, + + "ephemeral_block_device": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "device_name": { + Type: schema.TypeString, + Required: true, + }, + + "virtual_name": { + Type: schema.TypeString, + Optional: true, + }, + + "no_device": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["virtual_name"].(string))) + if v, ok := m["no_device"].(bool); ok && v { + buf.WriteString(fmt.Sprintf("%t-", v)) + } + return hashcode.String(buf.String()) + }, + }, + + "root_block_device": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + // "You can only modify the volume size, volume type, and Delete on + // Termination flag on the block device mapping entry for the root + // device volume." - bit.ly/ec2bdmap + Schema: map[string]*schema.Schema{ + "delete_on_termination": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "iops": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "volume_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "volume_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + instanceOpts, err := buildAwsInstanceOpts(d, meta) + if err != nil { + return err + } + + // Build the creation struct + runOpts := &ec2.RunInstancesInput{ + BlockDeviceMappings: instanceOpts.BlockDeviceMappings, + DisableApiTermination: instanceOpts.DisableAPITermination, + EbsOptimized: instanceOpts.EBSOptimized, + Monitoring: instanceOpts.Monitoring, + IamInstanceProfile: instanceOpts.IAMInstanceProfile, + ImageId: instanceOpts.ImageID, + InstanceInitiatedShutdownBehavior: instanceOpts.InstanceInitiatedShutdownBehavior, + InstanceType: instanceOpts.InstanceType, + Ipv6AddressCount: instanceOpts.Ipv6AddressCount, + Ipv6Addresses: instanceOpts.Ipv6Addresses, + KeyName: instanceOpts.KeyName, + MaxCount: aws.Int64(int64(1)), + MinCount: aws.Int64(int64(1)), + NetworkInterfaces: instanceOpts.NetworkInterfaces, + Placement: instanceOpts.Placement, + PrivateIpAddress: instanceOpts.PrivateIPAddress, + SecurityGroupIds: instanceOpts.SecurityGroupIDs, + SecurityGroups: instanceOpts.SecurityGroups, + SubnetId: instanceOpts.SubnetID, + UserData: instanceOpts.UserData64, + } + + _, ipv6CountOk := d.GetOk("ipv6_address_count") + _, ipv6AddressOk := d.GetOk("ipv6_addresses") + + if ipv6AddressOk && ipv6CountOk { + return fmt.Errorf("Only 1 of `ipv6_address_count` or `ipv6_addresses` can be specified") + } + + restricted := meta.(*AWSClient).IsGovCloud() || meta.(*AWSClient).IsChinaCloud() + if !restricted { + tagsSpec := make([]*ec2.TagSpecification, 0) + + if v, ok := d.GetOk("tags"); ok { + tags := tagsFromMap(v.(map[string]interface{})) + + spec := &ec2.TagSpecification{ + ResourceType: aws.String("instance"), + Tags: tags, + } + + tagsSpec = append(tagsSpec, spec) + } + + if v, ok := d.GetOk("volume_tags"); ok { + tags := tagsFromMap(v.(map[string]interface{})) + + spec := &ec2.TagSpecification{ + ResourceType: aws.String("volume"), + Tags: tags, + } + + tagsSpec = append(tagsSpec, spec) + } + + if len(tagsSpec) > 0 { + runOpts.TagSpecifications = tagsSpec + } + } + + // Create the instance + log.Printf("[DEBUG] Run configuration: %s", runOpts) + + var runResp *ec2.Reservation + err = resource.Retry(30*time.Second, func() *resource.RetryError { + var err error + runResp, err = conn.RunInstances(runOpts) + // IAM instance profiles can take ~10 seconds to propagate in AWS: + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console + if isAWSErr(err, "InvalidParameterValue", "Invalid IAM Instance Profile") { + log.Print("[DEBUG] Invalid IAM Instance Profile referenced, retrying...") + return resource.RetryableError(err) + } + // IAM roles can also take time to propagate in AWS: + if isAWSErr(err, "InvalidParameterValue", " has no associated IAM Roles") { + log.Print("[DEBUG] IAM Instance Profile appears to have no IAM roles, retrying...") + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + }) + // Warn if the AWS Error involves group ids, to help identify situation + // where a user uses group ids in security_groups for the Default VPC. + // See https://github.com/hashicorp/terraform/issues/3798 + if isAWSErr(err, "InvalidParameterValue", "groupId is invalid") { + return fmt.Errorf("Error launching instance, possible mismatch of Security Group IDs and Names. See AWS Instance docs here: %s.\n\n\tAWS Error: %s", "https://terraform.io/docs/providers/aws/r/instance.html", err.(awserr.Error).Message()) + } + if err != nil { + return fmt.Errorf("Error launching source instance: %s", err) + } + if runResp == nil || len(runResp.Instances) == 0 { + return errors.New("Error launching source instance: no instances returned in response") + } + + instance := runResp.Instances[0] + log.Printf("[INFO] Instance ID: %s", *instance.InstanceId) + + // Store the resulting ID so we can look this up later + d.SetId(*instance.InstanceId) + + // Wait for the instance to become running so we can get some attributes + // that aren't available until later. + log.Printf( + "[DEBUG] Waiting for instance (%s) to become running", + *instance.InstanceId) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"running"}, + Refresh: InstanceStateRefreshFunc(conn, *instance.InstanceId, "terminated"), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + instanceRaw, err := stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for instance (%s) to become ready: %s", + *instance.InstanceId, err) + } + + instance = instanceRaw.(*ec2.Instance) + + // Initialize the connection info + if instance.PublicIpAddress != nil { + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": *instance.PublicIpAddress, + }) + } else if instance.PrivateIpAddress != nil { + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": *instance.PrivateIpAddress, + }) + } + + // Update if we need to + return resourceAwsInstanceUpdate(d, meta) +} + +func resourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + resp, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{ + InstanceIds: []*string{aws.String(d.Id())}, + }) + if err != nil { + // If the instance was not found, return nil so that we can show + // that the instance is gone. + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidInstanceID.NotFound" { + d.SetId("") + return nil + } + + // Some other error, report it + return err + } + + // If nothing was found, then return no state + if len(resp.Reservations) == 0 { + d.SetId("") + return nil + } + + instance := resp.Reservations[0].Instances[0] + + if instance.State != nil { + // If the instance is terminated, then it is gone + if *instance.State.Name == "terminated" { + d.SetId("") + return nil + } + + d.Set("instance_state", instance.State.Name) + } + + if instance.Placement != nil { + d.Set("availability_zone", instance.Placement.AvailabilityZone) + } + if instance.Placement.Tenancy != nil { + d.Set("tenancy", instance.Placement.Tenancy) + } + + d.Set("ami", instance.ImageId) + d.Set("instance_type", instance.InstanceType) + d.Set("key_name", instance.KeyName) + d.Set("public_dns", instance.PublicDnsName) + d.Set("public_ip", instance.PublicIpAddress) + d.Set("private_dns", instance.PrivateDnsName) + d.Set("private_ip", instance.PrivateIpAddress) + d.Set("iam_instance_profile", iamInstanceProfileArnToName(instance.IamInstanceProfile)) + + // Set configured Network Interface Device Index Slice + // We only want to read, and populate state for the configured network_interface attachments. Otherwise, other + // resources have the potential to attach network interfaces to the instance, and cause a perpetual create/destroy + // diff. We should only read on changes configured for this specific resource because of this. + var configuredDeviceIndexes []int + if v, ok := d.GetOk("network_interface"); ok { + vL := v.(*schema.Set).List() + for _, vi := range vL { + mVi := vi.(map[string]interface{}) + configuredDeviceIndexes = append(configuredDeviceIndexes, mVi["device_index"].(int)) + } + } + + var ipv6Addresses []string + if len(instance.NetworkInterfaces) > 0 { + var primaryNetworkInterface ec2.InstanceNetworkInterface + var networkInterfaces []map[string]interface{} + for _, iNi := range instance.NetworkInterfaces { + ni := make(map[string]interface{}) + if *iNi.Attachment.DeviceIndex == 0 { + primaryNetworkInterface = *iNi + } + // If the attached network device is inside our configuration, refresh state with values found. + // Otherwise, assume the network device was attached via an outside resource. + for _, index := range configuredDeviceIndexes { + if index == int(*iNi.Attachment.DeviceIndex) { + ni["device_index"] = *iNi.Attachment.DeviceIndex + ni["network_interface_id"] = *iNi.NetworkInterfaceId + ni["delete_on_termination"] = *iNi.Attachment.DeleteOnTermination + } + } + // Don't add empty network interfaces to schema + if len(ni) == 0 { + continue + } + networkInterfaces = append(networkInterfaces, ni) + } + if err := d.Set("network_interface", networkInterfaces); err != nil { + return fmt.Errorf("Error setting network_interfaces: %v", err) + } + + // Set primary network interface details + // If an instance is shutting down, network interfaces are detached, and attributes may be nil, + // need to protect against nil pointer dereferences + if primaryNetworkInterface.SubnetId != nil { + d.Set("subnet_id", primaryNetworkInterface.SubnetId) + } + if primaryNetworkInterface.NetworkInterfaceId != nil { + d.Set("network_interface_id", primaryNetworkInterface.NetworkInterfaceId) // TODO: Deprecate me v0.10.0 + d.Set("primary_network_interface_id", primaryNetworkInterface.NetworkInterfaceId) + } + if primaryNetworkInterface.Ipv6Addresses != nil { + d.Set("ipv6_address_count", len(primaryNetworkInterface.Ipv6Addresses)) + } + if primaryNetworkInterface.SourceDestCheck != nil { + d.Set("source_dest_check", primaryNetworkInterface.SourceDestCheck) + } + + d.Set("associate_public_ip_address", primaryNetworkInterface.Association != nil) + + for _, address := range primaryNetworkInterface.Ipv6Addresses { + ipv6Addresses = append(ipv6Addresses, *address.Ipv6Address) + } + + } else { + d.Set("subnet_id", instance.SubnetId) + d.Set("network_interface_id", "") // TODO: Deprecate me v0.10.0 + d.Set("primary_network_interface_id", "") + } + + if err := d.Set("ipv6_addresses", ipv6Addresses); err != nil { + log.Printf("[WARN] Error setting ipv6_addresses for AWS Instance (%s): %s", d.Id(), err) + } + + d.Set("ebs_optimized", instance.EbsOptimized) + if instance.SubnetId != nil && *instance.SubnetId != "" { + d.Set("source_dest_check", instance.SourceDestCheck) + } + + if instance.Monitoring != nil && instance.Monitoring.State != nil { + monitoringState := *instance.Monitoring.State + d.Set("monitoring", monitoringState == "enabled" || monitoringState == "pending") + } + + d.Set("tags", tagsToMap(instance.Tags)) + + if err := readVolumeTags(conn, d); err != nil { + return err + } + + if err := readSecurityGroups(d, instance); err != nil { + return err + } + + if err := readBlockDevices(d, instance, conn); err != nil { + return err + } + if _, ok := d.GetOk("ephemeral_block_device"); !ok { + d.Set("ephemeral_block_device", []interface{}{}) + } + + // Instance attributes + { + attr, err := conn.DescribeInstanceAttribute(&ec2.DescribeInstanceAttributeInput{ + Attribute: aws.String("disableApiTermination"), + InstanceId: aws.String(d.Id()), + }) + if err != nil { + return err + } + d.Set("disable_api_termination", attr.DisableApiTermination.Value) + } + { + attr, err := conn.DescribeInstanceAttribute(&ec2.DescribeInstanceAttributeInput{ + Attribute: aws.String(ec2.InstanceAttributeNameUserData), + InstanceId: aws.String(d.Id()), + }) + if err != nil { + return err + } + if attr.UserData.Value != nil { + d.Set("user_data", userDataHashSum(*attr.UserData.Value)) + } + } + + return nil +} + +func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + d.Partial(true) + + restricted := meta.(*AWSClient).IsGovCloud() || meta.(*AWSClient).IsChinaCloud() + + if d.HasChange("tags") { + if !d.IsNewResource() || restricted { + if err := setTags(conn, d); err != nil { + return err + } else { + d.SetPartial("tags") + } + } + } + if d.HasChange("volume_tags") { + if !d.IsNewResource() || !restricted { + if err := setVolumeTags(conn, d); err != nil { + return err + } else { + d.SetPartial("volume_tags") + } + } + } + + if d.HasChange("iam_instance_profile") && !d.IsNewResource() { + request := &ec2.DescribeIamInstanceProfileAssociationsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("instance-id"), + Values: []*string{aws.String(d.Id())}, + }, + }, + } + + resp, err := conn.DescribeIamInstanceProfileAssociations(request) + if err != nil { + return err + } + + // An Iam Instance Profile has been provided and is pending a change + // This means it is an association or a replacement to an association + if _, ok := d.GetOk("iam_instance_profile"); ok { + // Does not have an Iam Instance Profile associated with it, need to associate + if len(resp.IamInstanceProfileAssociations) == 0 { + _, err := conn.AssociateIamInstanceProfile(&ec2.AssociateIamInstanceProfileInput{ + InstanceId: aws.String(d.Id()), + IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ + Name: aws.String(d.Get("iam_instance_profile").(string)), + }, + }) + if err != nil { + return err + } + + } else { + // Has an Iam Instance Profile associated with it, need to replace the association + associationId := resp.IamInstanceProfileAssociations[0].AssociationId + + _, err := conn.ReplaceIamInstanceProfileAssociation(&ec2.ReplaceIamInstanceProfileAssociationInput{ + AssociationId: associationId, + IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ + Name: aws.String(d.Get("iam_instance_profile").(string)), + }, + }) + if err != nil { + return err + } + } + // An Iam Instance Profile has _not_ been provided but is pending a change. This means there is a pending removal + } else { + if len(resp.IamInstanceProfileAssociations) > 0 { + // Has an Iam Instance Profile associated with it, need to remove the association + associationId := resp.IamInstanceProfileAssociations[0].AssociationId + + _, err := conn.DisassociateIamInstanceProfile(&ec2.DisassociateIamInstanceProfileInput{ + AssociationId: associationId, + }) + if err != nil { + return err + } + } + } + } + + // SourceDestCheck can only be modified on an instance without manually specified network interfaces. + // SourceDestCheck, in that case, is configured at the network interface level + if _, ok := d.GetOk("network_interface"); !ok { + + // If we have a new resource and source_dest_check is still true, don't modify + sourceDestCheck := d.Get("source_dest_check").(bool) + + if d.HasChange("source_dest_check") || d.IsNewResource() && !sourceDestCheck { + // SourceDestCheck can only be set on VPC instances + // AWS will return an error of InvalidParameterCombination if we attempt + // to modify the source_dest_check of an instance in EC2 Classic + log.Printf("[INFO] Modifying `source_dest_check` on Instance %s", d.Id()) + _, err := conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{ + InstanceId: aws.String(d.Id()), + SourceDestCheck: &ec2.AttributeBooleanValue{ + Value: aws.Bool(sourceDestCheck), + }, + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok { + // Tolerate InvalidParameterCombination error in Classic, otherwise + // return the error + if "InvalidParameterCombination" != ec2err.Code() { + return err + } + log.Printf("[WARN] Attempted to modify SourceDestCheck on non VPC instance: %s", ec2err.Message()) + } + } + } + } + + if d.HasChange("vpc_security_group_ids") { + var groups []*string + if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { + for _, v := range v.List() { + groups = append(groups, aws.String(v.(string))) + } + } + // If a user has multiple network interface attachments on the target EC2 instance, simply modifying the + // instance attributes via a `ModifyInstanceAttributes()` request would fail with the following error message: + // "There are multiple interfaces attached to instance 'i-XX'. Please specify an interface ID for the operation instead." + // Thus, we need to actually modify the primary network interface for the new security groups, as the primary + // network interface is where we modify/create security group assignments during Create. + log.Printf("[INFO] Modifying `vpc_security_group_ids` on Instance %q", d.Id()) + instances, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{ + InstanceIds: []*string{aws.String(d.Id())}, + }) + if err != nil { + return err + } + instance := instances.Reservations[0].Instances[0] + var primaryInterface ec2.InstanceNetworkInterface + for _, ni := range instance.NetworkInterfaces { + if *ni.Attachment.DeviceIndex == 0 { + primaryInterface = *ni + } + } + + if primaryInterface.NetworkInterfaceId == nil { + log.Print("[Error] Attempted to set vpc_security_group_ids on an instance without a primary network interface") + return fmt.Errorf( + "Failed to update vpc_security_group_ids on %q, which does not contain a primary network interface", + d.Id()) + } + + if _, err := conn.ModifyNetworkInterfaceAttribute(&ec2.ModifyNetworkInterfaceAttributeInput{ + NetworkInterfaceId: primaryInterface.NetworkInterfaceId, + Groups: groups, + }); err != nil { + return err + } + } + + if d.HasChange("instance_type") && !d.IsNewResource() { + log.Printf("[INFO] Stopping Instance %q for instance_type change", d.Id()) + _, err := conn.StopInstances(&ec2.StopInstancesInput{ + InstanceIds: []*string{aws.String(d.Id())}, + }) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"}, + Target: []string{"stopped"}, + Refresh: InstanceStateRefreshFunc(conn, d.Id(), ""), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for instance (%s) to stop: %s", d.Id(), err) + } + + log.Printf("[INFO] Modifying instance type %s", d.Id()) + _, err = conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{ + InstanceId: aws.String(d.Id()), + InstanceType: &ec2.AttributeValue{ + Value: aws.String(d.Get("instance_type").(string)), + }, + }) + if err != nil { + return err + } + + log.Printf("[INFO] Starting Instance %q after instance_type change", d.Id()) + _, err = conn.StartInstances(&ec2.StartInstancesInput{ + InstanceIds: []*string{aws.String(d.Id())}, + }) + + stateConf = &resource.StateChangeConf{ + Pending: []string{"pending", "stopped"}, + Target: []string{"running"}, + Refresh: InstanceStateRefreshFunc(conn, d.Id(), "terminated"), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for instance (%s) to become ready: %s", + d.Id(), err) + } + } + + if d.HasChange("disable_api_termination") { + _, err := conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{ + InstanceId: aws.String(d.Id()), + DisableApiTermination: &ec2.AttributeBooleanValue{ + Value: aws.Bool(d.Get("disable_api_termination").(bool)), + }, + }) + if err != nil { + return err + } + } + + if d.HasChange("instance_initiated_shutdown_behavior") { + log.Printf("[INFO] Modifying instance %s", d.Id()) + _, err := conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{ + InstanceId: aws.String(d.Id()), + InstanceInitiatedShutdownBehavior: &ec2.AttributeValue{ + Value: aws.String(d.Get("instance_initiated_shutdown_behavior").(string)), + }, + }) + if err != nil { + return err + } + } + + if d.HasChange("monitoring") { + var mErr error + if d.Get("monitoring").(bool) { + log.Printf("[DEBUG] Enabling monitoring for Instance (%s)", d.Id()) + _, mErr = conn.MonitorInstances(&ec2.MonitorInstancesInput{ + InstanceIds: []*string{aws.String(d.Id())}, + }) + } else { + log.Printf("[DEBUG] Disabling monitoring for Instance (%s)", d.Id()) + _, mErr = conn.UnmonitorInstances(&ec2.UnmonitorInstancesInput{ + InstanceIds: []*string{aws.String(d.Id())}, + }) + } + if mErr != nil { + return fmt.Errorf("[WARN] Error updating Instance monitoring: %s", mErr) + } + } + + // TODO(mitchellh): wait for the attributes we modified to + // persist the change... + + d.Partial(false) + + return resourceAwsInstanceRead(d, meta) +} + +func resourceAwsInstanceDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + if err := awsTerminateInstance(conn, d.Id(), d); err != nil { + return err + } + + d.SetId("") + return nil +} + +// InstanceStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// an EC2 instance. +func InstanceStateRefreshFunc(conn *ec2.EC2, instanceID, failState string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{ + InstanceIds: []*string{aws.String(instanceID)}, + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidInstanceID.NotFound" { + // Set this to nil as if we didn't find anything. + resp = nil + } else { + log.Printf("Error on InstanceStateRefresh: %s", err) + return nil, "", err + } + } + + if resp == nil || len(resp.Reservations) == 0 || len(resp.Reservations[0].Instances) == 0 { + // Sometimes AWS just has consistency issues and doesn't see + // our instance yet. Return an empty state. + return nil, "", nil + } + + i := resp.Reservations[0].Instances[0] + state := *i.State.Name + + if state == failState { + return i, state, fmt.Errorf("Failed to reach target state. Reason: %s", + stringifyStateReason(i.StateReason)) + + } + + return i, state, nil + } +} + +func stringifyStateReason(sr *ec2.StateReason) string { + if sr.Message != nil { + return *sr.Message + } + if sr.Code != nil { + return *sr.Code + } + + return sr.String() +} + +func readBlockDevices(d *schema.ResourceData, instance *ec2.Instance, conn *ec2.EC2) error { + ibds, err := readBlockDevicesFromInstance(instance, conn) + if err != nil { + return err + } + + if err := d.Set("ebs_block_device", ibds["ebs"]); err != nil { + return err + } + + // This handles the import case which needs to be defaulted to empty + if _, ok := d.GetOk("root_block_device"); !ok { + if err := d.Set("root_block_device", []interface{}{}); err != nil { + return err + } + } + + if ibds["root"] != nil { + roots := []interface{}{ibds["root"]} + if err := d.Set("root_block_device", roots); err != nil { + return err + } + } + + return nil +} + +func readBlockDevicesFromInstance(instance *ec2.Instance, conn *ec2.EC2) (map[string]interface{}, error) { + blockDevices := make(map[string]interface{}) + blockDevices["ebs"] = make([]map[string]interface{}, 0) + blockDevices["root"] = nil + + instanceBlockDevices := make(map[string]*ec2.InstanceBlockDeviceMapping) + for _, bd := range instance.BlockDeviceMappings { + if bd.Ebs != nil { + instanceBlockDevices[*bd.Ebs.VolumeId] = bd + } + } + + if len(instanceBlockDevices) == 0 { + return nil, nil + } + + volIDs := make([]*string, 0, len(instanceBlockDevices)) + for volID := range instanceBlockDevices { + volIDs = append(volIDs, aws.String(volID)) + } + + // Need to call DescribeVolumes to get volume_size and volume_type for each + // EBS block device + volResp, err := conn.DescribeVolumes(&ec2.DescribeVolumesInput{ + VolumeIds: volIDs, + }) + if err != nil { + return nil, err + } + + for _, vol := range volResp.Volumes { + instanceBd := instanceBlockDevices[*vol.VolumeId] + bd := make(map[string]interface{}) + + if instanceBd.Ebs != nil && instanceBd.Ebs.DeleteOnTermination != nil { + bd["delete_on_termination"] = *instanceBd.Ebs.DeleteOnTermination + } + if vol.Size != nil { + bd["volume_size"] = *vol.Size + } + if vol.VolumeType != nil { + bd["volume_type"] = *vol.VolumeType + } + if vol.Iops != nil { + bd["iops"] = *vol.Iops + } + + if blockDeviceIsRoot(instanceBd, instance) { + blockDevices["root"] = bd + } else { + if instanceBd.DeviceName != nil { + bd["device_name"] = *instanceBd.DeviceName + } + if vol.Encrypted != nil { + bd["encrypted"] = *vol.Encrypted + } + if vol.SnapshotId != nil { + bd["snapshot_id"] = *vol.SnapshotId + } + + blockDevices["ebs"] = append(blockDevices["ebs"].([]map[string]interface{}), bd) + } + } + + return blockDevices, nil +} + +func blockDeviceIsRoot(bd *ec2.InstanceBlockDeviceMapping, instance *ec2.Instance) bool { + return bd.DeviceName != nil && + instance.RootDeviceName != nil && + *bd.DeviceName == *instance.RootDeviceName +} + +func fetchRootDeviceName(ami string, conn *ec2.EC2) (*string, error) { + if ami == "" { + return nil, errors.New("Cannot fetch root device name for blank AMI ID.") + } + + log.Printf("[DEBUG] Describing AMI %q to get root block device name", ami) + res, err := conn.DescribeImages(&ec2.DescribeImagesInput{ + ImageIds: []*string{aws.String(ami)}, + }) + if err != nil { + return nil, err + } + + // For a bad image, we just return nil so we don't block a refresh + if len(res.Images) == 0 { + return nil, nil + } + + image := res.Images[0] + rootDeviceName := image.RootDeviceName + + // Instance store backed AMIs do not provide a root device name. + if *image.RootDeviceType == ec2.DeviceTypeInstanceStore { + return nil, nil + } + + // Some AMIs have a RootDeviceName like "/dev/sda1" that does not appear as a + // DeviceName in the BlockDeviceMapping list (which will instead have + // something like "/dev/sda") + // + // While this seems like it breaks an invariant of AMIs, it ends up working + // on the AWS side, and AMIs like this are common enough that we need to + // special case it so Terraform does the right thing. + // + // Our heuristic is: if the RootDeviceName does not appear in the + // BlockDeviceMapping, assume that the DeviceName of the first + // BlockDeviceMapping entry serves as the root device. + rootDeviceNameInMapping := false + for _, bdm := range image.BlockDeviceMappings { + if bdm.DeviceName == image.RootDeviceName { + rootDeviceNameInMapping = true + } + } + + if !rootDeviceNameInMapping && len(image.BlockDeviceMappings) > 0 { + rootDeviceName = image.BlockDeviceMappings[0].DeviceName + } + + if rootDeviceName == nil { + return nil, fmt.Errorf("[WARN] Error finding Root Device Name for AMI (%s)", ami) + } + + return rootDeviceName, nil +} + +func buildNetworkInterfaceOpts(d *schema.ResourceData, groups []*string, nInterfaces interface{}) []*ec2.InstanceNetworkInterfaceSpecification { + networkInterfaces := []*ec2.InstanceNetworkInterfaceSpecification{} + // Get necessary items + associatePublicIPAddress := d.Get("associate_public_ip_address").(bool) + subnet, hasSubnet := d.GetOk("subnet_id") + + if hasSubnet && associatePublicIPAddress { + // If we have a non-default VPC / Subnet specified, we can flag + // AssociatePublicIpAddress to get a Public IP assigned. By default these are not provided. + // You cannot specify both SubnetId and the NetworkInterface.0.* parameters though, otherwise + // you get: Network interfaces and an instance-level subnet ID may not be specified on the same request + // You also need to attach Security Groups to the NetworkInterface instead of the instance, + // to avoid: Network interfaces and an instance-level security groups may not be specified on + // the same request + ni := &ec2.InstanceNetworkInterfaceSpecification{ + AssociatePublicIpAddress: aws.Bool(associatePublicIPAddress), + DeviceIndex: aws.Int64(int64(0)), + SubnetId: aws.String(subnet.(string)), + Groups: groups, + } + + if v, ok := d.GetOk("private_ip"); ok { + ni.PrivateIpAddress = aws.String(v.(string)) + } + + if v, ok := d.GetOk("ipv6_address_count"); ok { + ni.Ipv6AddressCount = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("ipv6_addresses"); ok { + ipv6Addresses := make([]*ec2.InstanceIpv6Address, len(v.([]interface{}))) + for _, address := range v.([]interface{}) { + ipv6Address := &ec2.InstanceIpv6Address{ + Ipv6Address: aws.String(address.(string)), + } + + ipv6Addresses = append(ipv6Addresses, ipv6Address) + } + + ni.Ipv6Addresses = ipv6Addresses + } + + if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { + for _, v := range v.List() { + ni.Groups = append(ni.Groups, aws.String(v.(string))) + } + } + + networkInterfaces = append(networkInterfaces, ni) + } else { + // If we have manually specified network interfaces, build and attach those here. + vL := nInterfaces.(*schema.Set).List() + for _, v := range vL { + ini := v.(map[string]interface{}) + ni := &ec2.InstanceNetworkInterfaceSpecification{ + DeviceIndex: aws.Int64(int64(ini["device_index"].(int))), + NetworkInterfaceId: aws.String(ini["network_interface_id"].(string)), + DeleteOnTermination: aws.Bool(ini["delete_on_termination"].(bool)), + } + networkInterfaces = append(networkInterfaces, ni) + } + } + + return networkInterfaces +} + +func readBlockDeviceMappingsFromConfig( + d *schema.ResourceData, conn *ec2.EC2) ([]*ec2.BlockDeviceMapping, error) { + blockDevices := make([]*ec2.BlockDeviceMapping, 0) + + if v, ok := d.GetOk("ebs_block_device"); ok { + vL := v.(*schema.Set).List() + for _, v := range vL { + bd := v.(map[string]interface{}) + ebs := &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)), + } + + if v, ok := bd["snapshot_id"].(string); ok && v != "" { + ebs.SnapshotId = aws.String(v) + } + + if v, ok := bd["encrypted"].(bool); ok && v { + ebs.Encrypted = aws.Bool(v) + } + + if v, ok := bd["volume_size"].(int); ok && v != 0 { + ebs.VolumeSize = aws.Int64(int64(v)) + } + + if v, ok := bd["volume_type"].(string); ok && v != "" { + ebs.VolumeType = aws.String(v) + if "io1" == strings.ToLower(v) { + // Condition: This parameter is required for requests to create io1 + // volumes; it is not used in requests to create gp2, st1, sc1, or + // standard volumes. + // See: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html + if v, ok := bd["iops"].(int); ok && v > 0 { + ebs.Iops = aws.Int64(int64(v)) + } + } + } + + blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{ + DeviceName: aws.String(bd["device_name"].(string)), + Ebs: ebs, + }) + } + } + + if v, ok := d.GetOk("ephemeral_block_device"); ok { + vL := v.(*schema.Set).List() + for _, v := range vL { + bd := v.(map[string]interface{}) + bdm := &ec2.BlockDeviceMapping{ + DeviceName: aws.String(bd["device_name"].(string)), + VirtualName: aws.String(bd["virtual_name"].(string)), + } + if v, ok := bd["no_device"].(bool); ok && v { + bdm.NoDevice = aws.String("") + // When NoDevice is true, just ignore VirtualName since it's not needed + bdm.VirtualName = nil + } + + if bdm.NoDevice == nil && aws.StringValue(bdm.VirtualName) == "" { + return nil, errors.New("virtual_name cannot be empty when no_device is false or undefined.") + } + + blockDevices = append(blockDevices, bdm) + } + } + + if v, ok := d.GetOk("root_block_device"); ok { + vL := v.([]interface{}) + if len(vL) > 1 { + return nil, errors.New("Cannot specify more than one root_block_device.") + } + for _, v := range vL { + bd := v.(map[string]interface{}) + ebs := &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)), + } + + if v, ok := bd["volume_size"].(int); ok && v != 0 { + ebs.VolumeSize = aws.Int64(int64(v)) + } + + if v, ok := bd["volume_type"].(string); ok && v != "" { + ebs.VolumeType = aws.String(v) + } + + if v, ok := bd["iops"].(int); ok && v > 0 && *ebs.VolumeType == "io1" { + // Only set the iops attribute if the volume type is io1. Setting otherwise + // can trigger a refresh/plan loop based on the computed value that is given + // from AWS, and prevent us from specifying 0 as a valid iops. + // See https://github.com/hashicorp/terraform/pull/4146 + // See https://github.com/hashicorp/terraform/issues/7765 + ebs.Iops = aws.Int64(int64(v)) + } else if v, ok := bd["iops"].(int); ok && v > 0 && *ebs.VolumeType != "io1" { + // Message user about incompatibility + log.Print("[WARN] IOPs is only valid for storate type io1 for EBS Volumes") + } + + if dn, err := fetchRootDeviceName(d.Get("ami").(string), conn); err == nil { + if dn == nil { + return nil, fmt.Errorf( + "Expected 1 AMI for ID: %s, got none", + d.Get("ami").(string)) + } + + blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{ + DeviceName: dn, + Ebs: ebs, + }) + } else { + return nil, err + } + } + } + + return blockDevices, nil +} + +func readVolumeTags(conn *ec2.EC2, d *schema.ResourceData) error { + volumeIds, err := getAwsInstanceVolumeIds(conn, d) + if err != nil { + return err + } + + tagsResp, err := conn.DescribeTags(&ec2.DescribeTagsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("resource-id"), + Values: volumeIds, + }, + }, + }) + if err != nil { + return err + } + + var tags []*ec2.Tag + + for _, t := range tagsResp.Tags { + tag := &ec2.Tag{ + Key: t.Key, + Value: t.Value, + } + tags = append(tags, tag) + } + + d.Set("volume_tags", tagsToMap(tags)) + + return nil +} + +// Determine whether we're referring to security groups with +// IDs or names. We use a heuristic to figure this out. By default, +// we use IDs if we're in a VPC. However, if we previously had an +// all-name list of security groups, we use names. Or, if we had any +// IDs, we use IDs. +func readSecurityGroups(d *schema.ResourceData, instance *ec2.Instance) error { + useID := instance.SubnetId != nil && *instance.SubnetId != "" + if v := d.Get("security_groups"); v != nil { + match := useID + sgs := v.(*schema.Set).List() + if len(sgs) > 0 { + match = false + for _, v := range v.(*schema.Set).List() { + if strings.HasPrefix(v.(string), "sg-") { + match = true + break + } + } + } + + useID = match + } + + // Build up the security groups + sgs := make([]string, 0, len(instance.SecurityGroups)) + if useID { + for _, sg := range instance.SecurityGroups { + sgs = append(sgs, *sg.GroupId) + } + log.Printf("[DEBUG] Setting Security Group IDs: %#v", sgs) + if err := d.Set("vpc_security_group_ids", sgs); err != nil { + return err + } + if err := d.Set("security_groups", []string{}); err != nil { + return err + } + } else { + for _, sg := range instance.SecurityGroups { + sgs = append(sgs, *sg.GroupName) + } + log.Printf("[DEBUG] Setting Security Group Names: %#v", sgs) + if err := d.Set("security_groups", sgs); err != nil { + return err + } + if err := d.Set("vpc_security_group_ids", []string{}); err != nil { + return err + } + } + return nil +} + +type awsInstanceOpts struct { + BlockDeviceMappings []*ec2.BlockDeviceMapping + DisableAPITermination *bool + EBSOptimized *bool + Monitoring *ec2.RunInstancesMonitoringEnabled + IAMInstanceProfile *ec2.IamInstanceProfileSpecification + ImageID *string + InstanceInitiatedShutdownBehavior *string + InstanceType *string + Ipv6AddressCount *int64 + Ipv6Addresses []*ec2.InstanceIpv6Address + KeyName *string + NetworkInterfaces []*ec2.InstanceNetworkInterfaceSpecification + Placement *ec2.Placement + PrivateIPAddress *string + SecurityGroupIDs []*string + SecurityGroups []*string + SpotPlacement *ec2.SpotPlacement + SubnetID *string + UserData64 *string +} + +func buildAwsInstanceOpts( + d *schema.ResourceData, meta interface{}) (*awsInstanceOpts, error) { + conn := meta.(*AWSClient).ec2conn + + opts := &awsInstanceOpts{ + DisableAPITermination: aws.Bool(d.Get("disable_api_termination").(bool)), + EBSOptimized: aws.Bool(d.Get("ebs_optimized").(bool)), + ImageID: aws.String(d.Get("ami").(string)), + InstanceType: aws.String(d.Get("instance_type").(string)), + } + + if v := d.Get("instance_initiated_shutdown_behavior").(string); v != "" { + opts.InstanceInitiatedShutdownBehavior = aws.String(v) + } + + opts.Monitoring = &ec2.RunInstancesMonitoringEnabled{ + Enabled: aws.Bool(d.Get("monitoring").(bool)), + } + + opts.IAMInstanceProfile = &ec2.IamInstanceProfileSpecification{ + Name: aws.String(d.Get("iam_instance_profile").(string)), + } + + user_data := d.Get("user_data").(string) + + opts.UserData64 = aws.String(base64Encode([]byte(user_data))) + + // check for non-default Subnet, and cast it to a String + subnet, hasSubnet := d.GetOk("subnet_id") + subnetID := subnet.(string) + + // Placement is used for aws_instance; SpotPlacement is used for + // aws_spot_instance_request. They represent the same data. :-| + opts.Placement = &ec2.Placement{ + AvailabilityZone: aws.String(d.Get("availability_zone").(string)), + GroupName: aws.String(d.Get("placement_group").(string)), + } + + opts.SpotPlacement = &ec2.SpotPlacement{ + AvailabilityZone: aws.String(d.Get("availability_zone").(string)), + GroupName: aws.String(d.Get("placement_group").(string)), + } + + if v := d.Get("tenancy").(string); v != "" { + opts.Placement.Tenancy = aws.String(v) + } + + associatePublicIPAddress := d.Get("associate_public_ip_address").(bool) + + var groups []*string + if v := d.Get("security_groups"); v != nil { + // Security group names. + // For a nondefault VPC, you must use security group IDs instead. + // See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html + sgs := v.(*schema.Set).List() + if len(sgs) > 0 && hasSubnet { + log.Print("[WARN] Deprecated. Attempting to use 'security_groups' within a VPC instance. Use 'vpc_security_group_ids' instead.") + } + for _, v := range sgs { + str := v.(string) + groups = append(groups, aws.String(str)) + } + } + + networkInterfaces, interfacesOk := d.GetOk("network_interface") + + // If setting subnet and public address, OR manual network interfaces, populate those now. + if hasSubnet && associatePublicIPAddress || interfacesOk { + // Otherwise we're attaching (a) network interface(s) + opts.NetworkInterfaces = buildNetworkInterfaceOpts(d, groups, networkInterfaces) + } else { + // If simply specifying a subnetID, privateIP, Security Groups, or VPC Security Groups, build these now + if subnetID != "" { + opts.SubnetID = aws.String(subnetID) + } + + if v, ok := d.GetOk("private_ip"); ok { + opts.PrivateIPAddress = aws.String(v.(string)) + } + if opts.SubnetID != nil && + *opts.SubnetID != "" { + opts.SecurityGroupIDs = groups + } else { + opts.SecurityGroups = groups + } + + if v, ok := d.GetOk("ipv6_address_count"); ok { + opts.Ipv6AddressCount = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("ipv6_addresses"); ok { + ipv6Addresses := make([]*ec2.InstanceIpv6Address, len(v.([]interface{}))) + for _, address := range v.([]interface{}) { + ipv6Address := &ec2.InstanceIpv6Address{ + Ipv6Address: aws.String(address.(string)), + } + + ipv6Addresses = append(ipv6Addresses, ipv6Address) + } + + opts.Ipv6Addresses = ipv6Addresses + } + + if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { + for _, v := range v.List() { + opts.SecurityGroupIDs = append(opts.SecurityGroupIDs, aws.String(v.(string))) + } + } + } + + if v, ok := d.GetOk("key_name"); ok { + opts.KeyName = aws.String(v.(string)) + } + + blockDevices, err := readBlockDeviceMappingsFromConfig(d, conn) + if err != nil { + return nil, err + } + if len(blockDevices) > 0 { + opts.BlockDeviceMappings = blockDevices + } + return opts, nil +} + +func awsTerminateInstance(conn *ec2.EC2, id string, d *schema.ResourceData) error { + log.Printf("[INFO] Terminating instance: %s", id) + req := &ec2.TerminateInstancesInput{ + InstanceIds: []*string{aws.String(id)}, + } + if _, err := conn.TerminateInstances(req); err != nil { + return fmt.Errorf("Error terminating instance: %s", err) + } + + log.Printf("[DEBUG] Waiting for instance (%s) to become terminated", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"}, + Target: []string{"terminated"}, + Refresh: InstanceStateRefreshFunc(conn, id, ""), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err := stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for instance (%s) to terminate: %s", id, err) + } + + return nil +} + +func iamInstanceProfileArnToName(ip *ec2.IamInstanceProfile) string { + if ip == nil || ip.Arn == nil { + return "" + } + parts := strings.Split(*ip.Arn, "/") + return parts[len(parts)-1] +} + +func userDataHashSum(user_data string) string { + // Check whether the user_data is not Base64 encoded. + // Always calculate hash of base64 decoded value since we + // check against double-encoding when setting it + v, base64DecodeError := base64.StdEncoding.DecodeString(user_data) + if base64DecodeError != nil { + v = []byte(user_data) + } + + hash := sha1.Sum(v) + return hex.EncodeToString(hash[:]) +} + +func getAwsInstanceVolumeIds(conn *ec2.EC2, d *schema.ResourceData) ([]*string, error) { + volumeIds := make([]*string, 0) + + opts := &ec2.DescribeVolumesInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("attachment.instance-id"), + Values: []*string{aws.String(d.Id())}, + }, + }, + } + + resp, err := conn.DescribeVolumes(opts) + if err != nil { + return nil, err + } + + for _, v := range resp.Volumes { + volumeIds = append(volumeIds, v.VolumeId) + } + + return volumeIds, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_instance_migrate.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_instance_migrate.go new file mode 100644 index 000000000..31f28b39f --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_instance_migrate.go @@ -0,0 +1,111 @@ +package aws + +import ( + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/terraform" +) + +func resourceAwsInstanceMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found AWS Instance State v0; migrating to v1") + return migrateAwsInstanceStateV0toV1(is) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateAwsInstanceStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + if is.Empty() || is.Attributes == nil { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + // Delete old count + delete(is.Attributes, "block_device.#") + + oldBds, err := readV0BlockDevices(is) + if err != nil { + return is, err + } + // seed count fields for new types + is.Attributes["ebs_block_device.#"] = "0" + is.Attributes["ephemeral_block_device.#"] = "0" + // depending on if state was v0.3.7 or an earlier version, it might have + // root_block_device defined already + if _, ok := is.Attributes["root_block_device.#"]; !ok { + is.Attributes["root_block_device.#"] = "0" + } + for _, oldBd := range oldBds { + if err := writeV1BlockDevice(is, oldBd); err != nil { + return is, err + } + } + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} + +func readV0BlockDevices(is *terraform.InstanceState) (map[string]map[string]string, error) { + oldBds := make(map[string]map[string]string) + for k, v := range is.Attributes { + if !strings.HasPrefix(k, "block_device.") { + continue + } + path := strings.Split(k, ".") + if len(path) != 3 { + return oldBds, fmt.Errorf("Found unexpected block_device field: %#v", k) + } + hashcode, attribute := path[1], path[2] + oldBd, ok := oldBds[hashcode] + if !ok { + oldBd = make(map[string]string) + oldBds[hashcode] = oldBd + } + oldBd[attribute] = v + delete(is.Attributes, k) + } + return oldBds, nil +} + +func writeV1BlockDevice( + is *terraform.InstanceState, oldBd map[string]string) error { + code := hashcode.String(oldBd["device_name"]) + bdType := "ebs_block_device" + if vn, ok := oldBd["virtual_name"]; ok && strings.HasPrefix(vn, "ephemeral") { + bdType = "ephemeral_block_device" + } else if dn, ok := oldBd["device_name"]; ok && dn == "/dev/sda1" { + bdType = "root_block_device" + } + + switch bdType { + case "ebs_block_device": + delete(oldBd, "virtual_name") + case "root_block_device": + delete(oldBd, "virtual_name") + delete(oldBd, "encrypted") + delete(oldBd, "snapshot_id") + case "ephemeral_block_device": + delete(oldBd, "delete_on_termination") + delete(oldBd, "encrypted") + delete(oldBd, "iops") + delete(oldBd, "volume_size") + delete(oldBd, "volume_type") + } + for attr, val := range oldBd { + attrKey := fmt.Sprintf("%s.%d.%s", bdType, code, attr) + is.Attributes[attrKey] = val + } + + countAttr := fmt.Sprintf("%s.#", bdType) + count, _ := strconv.Atoi(is.Attributes[countAttr]) + is.Attributes[countAttr] = strconv.Itoa(count + 1) + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_internet_gateway.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_internet_gateway.go new file mode 100644 index 000000000..3834aa58e --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_internet_gateway.go @@ -0,0 +1,354 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsInternetGateway() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsInternetGatewayCreate, + Read: resourceAwsInternetGatewayRead, + Update: resourceAwsInternetGatewayUpdate, + Delete: resourceAwsInternetGatewayDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "tags": tagsSchema(), + }, + } +} + +func resourceAwsInternetGatewayCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + // Create the gateway + log.Printf("[DEBUG] Creating internet gateway") + var err error + resp, err := conn.CreateInternetGateway(nil) + if err != nil { + return fmt.Errorf("Error creating internet gateway: %s", err) + } + + // Get the ID and store it + ig := *resp.InternetGateway + d.SetId(*ig.InternetGatewayId) + log.Printf("[INFO] InternetGateway ID: %s", d.Id()) + + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + igRaw, _, err := IGStateRefreshFunc(conn, d.Id())() + if igRaw != nil { + return nil + } + if err == nil { + return resource.RetryableError(err) + } else { + return resource.NonRetryableError(err) + } + }) + + if err != nil { + return errwrap.Wrapf("{{err}}", err) + } + + err = setTags(conn, d) + if err != nil { + return err + } + + // Attach the new gateway to the correct vpc + return resourceAwsInternetGatewayAttach(d, meta) +} + +func resourceAwsInternetGatewayRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + igRaw, _, err := IGStateRefreshFunc(conn, d.Id())() + if err != nil { + return err + } + if igRaw == nil { + // Seems we have lost our internet gateway + d.SetId("") + return nil + } + + ig := igRaw.(*ec2.InternetGateway) + if len(ig.Attachments) == 0 { + // Gateway exists but not attached to the VPC + d.Set("vpc_id", "") + } else { + d.Set("vpc_id", ig.Attachments[0].VpcId) + } + + d.Set("tags", tagsToMap(ig.Tags)) + + return nil +} + +func resourceAwsInternetGatewayUpdate(d *schema.ResourceData, meta interface{}) error { + if d.HasChange("vpc_id") { + // If we're already attached, detach it first + if err := resourceAwsInternetGatewayDetach(d, meta); err != nil { + return err + } + + // Attach the gateway to the new vpc + if err := resourceAwsInternetGatewayAttach(d, meta); err != nil { + return err + } + } + + conn := meta.(*AWSClient).ec2conn + + if err := setTags(conn, d); err != nil { + return err + } + + d.SetPartial("tags") + + return nil +} + +func resourceAwsInternetGatewayDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + // Detach if it is attached + if err := resourceAwsInternetGatewayDetach(d, meta); err != nil { + return err + } + + log.Printf("[INFO] Deleting Internet Gateway: %s", d.Id()) + + return resource.Retry(10*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteInternetGateway(&ec2.DeleteInternetGatewayInput{ + InternetGatewayId: aws.String(d.Id()), + }) + if err == nil { + return nil + } + + ec2err, ok := err.(awserr.Error) + if !ok { + return resource.RetryableError(err) + } + + switch ec2err.Code() { + case "InvalidInternetGatewayID.NotFound": + return nil + case "DependencyViolation": + return resource.RetryableError(err) // retry + } + + return resource.NonRetryableError(err) + }) +} + +func resourceAwsInternetGatewayAttach(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + if d.Get("vpc_id").(string) == "" { + log.Printf( + "[DEBUG] Not attaching Internet Gateway '%s' as no VPC ID is set", + d.Id()) + return nil + } + + log.Printf( + "[INFO] Attaching Internet Gateway '%s' to VPC '%s'", + d.Id(), + d.Get("vpc_id").(string)) + + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + _, err := conn.AttachInternetGateway(&ec2.AttachInternetGatewayInput{ + InternetGatewayId: aws.String(d.Id()), + VpcId: aws.String(d.Get("vpc_id").(string)), + }) + if err == nil { + return nil + } + if ec2err, ok := err.(awserr.Error); ok { + switch ec2err.Code() { + case "InvalidInternetGatewayID.NotFound": + return resource.RetryableError(err) // retry + } + } + return resource.NonRetryableError(err) + }) + if err != nil { + return err + } + + // A note on the states below: the AWS docs (as of July, 2014) say + // that the states would be: attached, attaching, detached, detaching, + // but when running, I noticed that the state is usually "available" when + // it is attached. + + // Wait for it to be fully attached before continuing + log.Printf("[DEBUG] Waiting for internet gateway (%s) to attach", d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"detached", "attaching"}, + Target: []string{"available"}, + Refresh: IGAttachStateRefreshFunc(conn, d.Id(), "available"), + Timeout: 4 * time.Minute, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for internet gateway (%s) to attach: %s", + d.Id(), err) + } + + return nil +} + +func resourceAwsInternetGatewayDetach(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + // Get the old VPC ID to detach from + vpcID, _ := d.GetChange("vpc_id") + + if vpcID.(string) == "" { + log.Printf( + "[DEBUG] Not detaching Internet Gateway '%s' as no VPC ID is set", + d.Id()) + return nil + } + + log.Printf( + "[INFO] Detaching Internet Gateway '%s' from VPC '%s'", + d.Id(), + vpcID.(string)) + + // Wait for it to be fully detached before continuing + log.Printf("[DEBUG] Waiting for internet gateway (%s) to detach", d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"detaching"}, + Target: []string{"detached"}, + Refresh: detachIGStateRefreshFunc(conn, d.Id(), vpcID.(string)), + Timeout: 15 * time.Minute, + Delay: 10 * time.Second, + NotFoundChecks: 30, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for internet gateway (%s) to detach: %s", + d.Id(), err) + } + + return nil +} + +// InstanceStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// an EC2 instance. +func detachIGStateRefreshFunc(conn *ec2.EC2, gatewayID, vpcID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + _, err := conn.DetachInternetGateway(&ec2.DetachInternetGatewayInput{ + InternetGatewayId: aws.String(gatewayID), + VpcId: aws.String(vpcID), + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok { + switch ec2err.Code() { + case "InvalidInternetGatewayID.NotFound": + log.Printf("[TRACE] Error detaching Internet Gateway '%s' from VPC '%s': %s", gatewayID, vpcID, err) + return nil, "Not Found", nil + + case "Gateway.NotAttached": + return "detached", "detached", nil + + case "DependencyViolation": + return nil, "detaching", nil + } + } + } + + // DetachInternetGateway only returns an error, so if it's nil, assume we're + // detached + return "detached", "detached", nil + } +} + +// IGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// an internet gateway. +func IGStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeInternetGateways(&ec2.DescribeInternetGatewaysInput{ + InternetGatewayIds: []*string{aws.String(id)}, + }) + if err != nil { + ec2err, ok := err.(awserr.Error) + if ok && ec2err.Code() == "InvalidInternetGatewayID.NotFound" { + resp = nil + } else { + log.Printf("[ERROR] Error on IGStateRefresh: %s", err) + return nil, "", err + } + } + + if resp == nil { + // Sometimes AWS just has consistency issues and doesn't see + // our instance yet. Return an empty state. + return nil, "", nil + } + + ig := resp.InternetGateways[0] + return ig, "available", nil + } +} + +// IGAttachStateRefreshFunc returns a resource.StateRefreshFunc that is used +// watch the state of an internet gateway's attachment. +func IGAttachStateRefreshFunc(conn *ec2.EC2, id string, expected string) resource.StateRefreshFunc { + var start time.Time + return func() (interface{}, string, error) { + if start.IsZero() { + start = time.Now() + } + + resp, err := conn.DescribeInternetGateways(&ec2.DescribeInternetGatewaysInput{ + InternetGatewayIds: []*string{aws.String(id)}, + }) + if err != nil { + ec2err, ok := err.(awserr.Error) + if ok && ec2err.Code() == "InvalidInternetGatewayID.NotFound" { + resp = nil + } else { + log.Printf("[ERROR] Error on IGStateRefresh: %s", err) + return nil, "", err + } + } + + if resp == nil { + // Sometimes AWS just has consistency issues and doesn't see + // our instance yet. Return an empty state. + return nil, "", nil + } + + ig := resp.InternetGateways[0] + + if time.Now().Sub(start) > 10*time.Second { + return ig, expected, nil + } + + if len(ig.Attachments) == 0 { + // No attachments, we're detached + return ig, "detached", nil + } + + return ig, *ig.Attachments[0].State, nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_key_pair.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_key_pair.go new file mode 100644 index 000000000..02050c7af --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_key_pair.go @@ -0,0 +1,131 @@ +package aws + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" +) + +func resourceAwsKeyPair() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsKeyPairCreate, + Read: resourceAwsKeyPairRead, + Update: nil, + Delete: resourceAwsKeyPairDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + SchemaVersion: 1, + MigrateState: resourceAwsKeyPairMigrateState, + + Schema: map[string]*schema.Schema{ + "key_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"key_name_prefix"}, + }, + "key_name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 100 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 100 characters, name is limited to 255", k)) + } + return + }, + }, + "public_key": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: func(v interface{}) string { + switch v.(type) { + case string: + return strings.TrimSpace(v.(string)) + default: + return "" + } + }, + }, + "fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsKeyPairCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + var keyName string + if v, ok := d.GetOk("key_name"); ok { + keyName = v.(string) + } else if v, ok := d.GetOk("key_name_prefix"); ok { + keyName = resource.PrefixedUniqueId(v.(string)) + d.Set("key_name", keyName) + } else { + keyName = resource.UniqueId() + d.Set("key_name", keyName) + } + + publicKey := d.Get("public_key").(string) + req := &ec2.ImportKeyPairInput{ + KeyName: aws.String(keyName), + PublicKeyMaterial: []byte(publicKey), + } + resp, err := conn.ImportKeyPair(req) + if err != nil { + return fmt.Errorf("Error import KeyPair: %s", err) + } + + d.SetId(*resp.KeyName) + return nil +} + +func resourceAwsKeyPairRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + req := &ec2.DescribeKeyPairsInput{ + KeyNames: []*string{aws.String(d.Id())}, + } + resp, err := conn.DescribeKeyPairs(req) + if err != nil { + awsErr, ok := err.(awserr.Error) + if ok && awsErr.Code() == "InvalidKeyPair.NotFound" { + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving KeyPair: %s", err) + } + + for _, keyPair := range resp.KeyPairs { + if *keyPair.KeyName == d.Id() { + d.Set("key_name", keyPair.KeyName) + d.Set("fingerprint", keyPair.KeyFingerprint) + return nil + } + } + + return fmt.Errorf("Unable to find key pair within: %#v", resp.KeyPairs) +} + +func resourceAwsKeyPairDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + _, err := conn.DeleteKeyPair(&ec2.DeleteKeyPairInput{ + KeyName: aws.String(d.Id()), + }) + return err +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_key_pair_migrate.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_key_pair_migrate.go new file mode 100644 index 000000000..c937ac360 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_key_pair_migrate.go @@ -0,0 +1,36 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceAwsKeyPairMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found AWS Key Pair State v0; migrating to v1") + return migrateKeyPairStateV0toV1(is) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateKeyPairStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + // replace public_key with a stripped version, removing `\n` from the end + // see https://github.com/hashicorp/terraform/issues/3455 + is.Attributes["public_key"] = strings.TrimSpace(is.Attributes["public_key"]) + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_firehose_delivery_stream.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_firehose_delivery_stream.go new file mode 100644 index 000000000..3cd476be9 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_firehose_delivery_stream.go @@ -0,0 +1,773 @@ +package aws + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/firehose" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func cloudWatchLoggingOptionsSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeSet, + MaxItems: 1, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "log_group_name": { + Type: schema.TypeString, + Optional: true, + }, + + "log_stream_name": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + } +} + +func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsKinesisFirehoseDeliveryStreamCreate, + Read: resourceAwsKinesisFirehoseDeliveryStreamRead, + Update: resourceAwsKinesisFirehoseDeliveryStreamUpdate, + Delete: resourceAwsKinesisFirehoseDeliveryStreamDelete, + + SchemaVersion: 1, + MigrateState: resourceAwsKinesisFirehoseMigrateState, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 64 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 64 characters", k)) + } + return + }, + }, + + "destination": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: func(v interface{}) string { + value := v.(string) + return strings.ToLower(value) + }, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "s3" && value != "redshift" && value != "elasticsearch" { + errors = append(errors, fmt.Errorf( + "%q must be one of 's3', 'redshift', 'elasticsearch'", k)) + } + return + }, + }, + + "s3_configuration": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket_arn": { + Type: schema.TypeString, + Required: true, + }, + + "buffer_size": { + Type: schema.TypeInt, + Optional: true, + Default: 5, + }, + + "buffer_interval": { + Type: schema.TypeInt, + Optional: true, + Default: 300, + }, + + "compression_format": { + Type: schema.TypeString, + Optional: true, + Default: "UNCOMPRESSED", + }, + + "kms_key_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, + + "role_arn": { + Type: schema.TypeString, + Required: true, + }, + + "prefix": { + Type: schema.TypeString, + Optional: true, + }, + + "cloudwatch_logging_options": cloudWatchLoggingOptionsSchema(), + }, + }, + }, + + "redshift_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_jdbcurl": { + Type: schema.TypeString, + Required: true, + }, + + "username": { + Type: schema.TypeString, + Required: true, + }, + + "password": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + }, + + "role_arn": { + Type: schema.TypeString, + Required: true, + }, + + "retry_duration": { + Type: schema.TypeInt, + Optional: true, + Default: 3600, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 0 || value > 7200 { + errors = append(errors, fmt.Errorf( + "%q must be in the range from 0 to 7200 seconds.", k)) + } + return + }, + }, + + "copy_options": { + Type: schema.TypeString, + Optional: true, + }, + + "data_table_columns": { + Type: schema.TypeString, + Optional: true, + }, + + "data_table_name": { + Type: schema.TypeString, + Required: true, + }, + + "cloudwatch_logging_options": cloudWatchLoggingOptionsSchema(), + }, + }, + }, + + "elasticsearch_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "buffering_interval": { + Type: schema.TypeInt, + Optional: true, + Default: 300, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 60 || value > 900 { + errors = append(errors, fmt.Errorf( + "%q must be in the range from 60 to 900 seconds.", k)) + } + return + }, + }, + + "buffering_size": { + Type: schema.TypeInt, + Optional: true, + Default: 5, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 1 || value > 100 { + errors = append(errors, fmt.Errorf( + "%q must be in the range from 1 to 100 MB.", k)) + } + return + }, + }, + + "domain_arn": { + Type: schema.TypeString, + Required: true, + }, + + "index_name": { + Type: schema.TypeString, + Required: true, + }, + + "index_rotation_period": { + Type: schema.TypeString, + Optional: true, + Default: "OneDay", + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "NoRotation" && value != "OneHour" && value != "OneDay" && value != "OneWeek" && value != "OneMonth" { + errors = append(errors, fmt.Errorf( + "%q must be one of 'NoRotation', 'OneHour', 'OneDay', 'OneWeek', 'OneMonth'", k)) + } + return + }, + }, + + "retry_duration": { + Type: schema.TypeInt, + Optional: true, + Default: 300, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 0 || value > 7200 { + errors = append(errors, fmt.Errorf( + "%q must be in the range from 0 to 7200 seconds.", k)) + } + return + }, + }, + + "role_arn": { + Type: schema.TypeString, + Required: true, + }, + + "s3_backup_mode": { + Type: schema.TypeString, + Optional: true, + Default: "FailedDocumentsOnly", + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "FailedDocumentsOnly" && value != "AllDocuments" { + errors = append(errors, fmt.Errorf( + "%q must be one of 'FailedDocumentsOnly', 'AllDocuments'", k)) + } + return + }, + }, + + "type_name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 100 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 100 characters", k)) + } + return + }, + }, + + "cloudwatch_logging_options": cloudWatchLoggingOptionsSchema(), + }, + }, + }, + + "arn": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "version_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "destination_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func createS3Config(d *schema.ResourceData) *firehose.S3DestinationConfiguration { + s3 := d.Get("s3_configuration").([]interface{})[0].(map[string]interface{}) + + configuration := &firehose.S3DestinationConfiguration{ + BucketARN: aws.String(s3["bucket_arn"].(string)), + RoleARN: aws.String(s3["role_arn"].(string)), + BufferingHints: &firehose.BufferingHints{ + IntervalInSeconds: aws.Int64(int64(s3["buffer_interval"].(int))), + SizeInMBs: aws.Int64(int64(s3["buffer_size"].(int))), + }, + Prefix: extractPrefixConfiguration(s3), + CompressionFormat: aws.String(s3["compression_format"].(string)), + EncryptionConfiguration: extractEncryptionConfiguration(s3), + } + + if _, ok := s3["cloudwatch_logging_options"]; ok { + configuration.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(s3) + } + + return configuration +} + +func updateS3Config(d *schema.ResourceData) *firehose.S3DestinationUpdate { + s3 := d.Get("s3_configuration").([]interface{})[0].(map[string]interface{}) + + configuration := &firehose.S3DestinationUpdate{ + BucketARN: aws.String(s3["bucket_arn"].(string)), + RoleARN: aws.String(s3["role_arn"].(string)), + BufferingHints: &firehose.BufferingHints{ + IntervalInSeconds: aws.Int64((int64)(s3["buffer_interval"].(int))), + SizeInMBs: aws.Int64((int64)(s3["buffer_size"].(int))), + }, + Prefix: extractPrefixConfiguration(s3), + CompressionFormat: aws.String(s3["compression_format"].(string)), + EncryptionConfiguration: extractEncryptionConfiguration(s3), + CloudWatchLoggingOptions: extractCloudWatchLoggingConfiguration(s3), + } + + if _, ok := s3["cloudwatch_logging_options"]; ok { + configuration.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(s3) + } + + return configuration +} + +func extractEncryptionConfiguration(s3 map[string]interface{}) *firehose.EncryptionConfiguration { + if key, ok := s3["kms_key_arn"]; ok && len(key.(string)) > 0 { + return &firehose.EncryptionConfiguration{ + KMSEncryptionConfig: &firehose.KMSEncryptionConfig{ + AWSKMSKeyARN: aws.String(key.(string)), + }, + } + } + + return &firehose.EncryptionConfiguration{ + NoEncryptionConfig: aws.String("NoEncryption"), + } +} + +func extractCloudWatchLoggingConfiguration(s3 map[string]interface{}) *firehose.CloudWatchLoggingOptions { + config := s3["cloudwatch_logging_options"].(*schema.Set).List() + if len(config) == 0 { + return nil + } + + loggingConfig := config[0].(map[string]interface{}) + loggingOptions := &firehose.CloudWatchLoggingOptions{ + Enabled: aws.Bool(loggingConfig["enabled"].(bool)), + } + + if v, ok := loggingConfig["log_group_name"]; ok { + loggingOptions.LogGroupName = aws.String(v.(string)) + } + + if v, ok := loggingConfig["log_stream_name"]; ok { + loggingOptions.LogStreamName = aws.String(v.(string)) + } + + return loggingOptions + +} + +func extractPrefixConfiguration(s3 map[string]interface{}) *string { + if v, ok := s3["prefix"]; ok { + return aws.String(v.(string)) + } + + return nil +} + +func createRedshiftConfig(d *schema.ResourceData, s3Config *firehose.S3DestinationConfiguration) (*firehose.RedshiftDestinationConfiguration, error) { + redshiftRaw, ok := d.GetOk("redshift_configuration") + if !ok { + return nil, fmt.Errorf("[ERR] Error loading Redshift Configuration for Kinesis Firehose: redshift_configuration not found") + } + rl := redshiftRaw.([]interface{}) + + redshift := rl[0].(map[string]interface{}) + + configuration := &firehose.RedshiftDestinationConfiguration{ + ClusterJDBCURL: aws.String(redshift["cluster_jdbcurl"].(string)), + RetryOptions: extractRedshiftRetryOptions(redshift), + Password: aws.String(redshift["password"].(string)), + Username: aws.String(redshift["username"].(string)), + RoleARN: aws.String(redshift["role_arn"].(string)), + CopyCommand: extractCopyCommandConfiguration(redshift), + S3Configuration: s3Config, + } + + if _, ok := redshift["cloudwatch_logging_options"]; ok { + configuration.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(redshift) + } + + return configuration, nil +} + +func updateRedshiftConfig(d *schema.ResourceData, s3Update *firehose.S3DestinationUpdate) (*firehose.RedshiftDestinationUpdate, error) { + redshiftRaw, ok := d.GetOk("redshift_configuration") + if !ok { + return nil, fmt.Errorf("[ERR] Error loading Redshift Configuration for Kinesis Firehose: redshift_configuration not found") + } + rl := redshiftRaw.([]interface{}) + + redshift := rl[0].(map[string]interface{}) + + configuration := &firehose.RedshiftDestinationUpdate{ + ClusterJDBCURL: aws.String(redshift["cluster_jdbcurl"].(string)), + RetryOptions: extractRedshiftRetryOptions(redshift), + Password: aws.String(redshift["password"].(string)), + Username: aws.String(redshift["username"].(string)), + RoleARN: aws.String(redshift["role_arn"].(string)), + CopyCommand: extractCopyCommandConfiguration(redshift), + S3Update: s3Update, + } + + if _, ok := redshift["cloudwatch_logging_options"]; ok { + configuration.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(redshift) + } + + return configuration, nil +} + +func createElasticsearchConfig(d *schema.ResourceData, s3Config *firehose.S3DestinationConfiguration) (*firehose.ElasticsearchDestinationConfiguration, error) { + esConfig, ok := d.GetOk("elasticsearch_configuration") + if !ok { + return nil, fmt.Errorf("[ERR] Error loading Elasticsearch Configuration for Kinesis Firehose: elasticsearch_configuration not found") + } + esList := esConfig.([]interface{}) + + es := esList[0].(map[string]interface{}) + + config := &firehose.ElasticsearchDestinationConfiguration{ + BufferingHints: extractBufferingHints(es), + DomainARN: aws.String(es["domain_arn"].(string)), + IndexName: aws.String(es["index_name"].(string)), + RetryOptions: extractElasticSearchRetryOptions(es), + RoleARN: aws.String(es["role_arn"].(string)), + TypeName: aws.String(es["type_name"].(string)), + S3Configuration: s3Config, + } + + if _, ok := es["cloudwatch_logging_options"]; ok { + config.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(es) + } + + if indexRotationPeriod, ok := es["index_rotation_period"]; ok { + config.IndexRotationPeriod = aws.String(indexRotationPeriod.(string)) + } + if s3BackupMode, ok := es["s3_backup_mode"]; ok { + config.S3BackupMode = aws.String(s3BackupMode.(string)) + } + + return config, nil +} + +func updateElasticsearchConfig(d *schema.ResourceData, s3Update *firehose.S3DestinationUpdate) (*firehose.ElasticsearchDestinationUpdate, error) { + esConfig, ok := d.GetOk("elasticsearch_configuration") + if !ok { + return nil, fmt.Errorf("[ERR] Error loading Elasticsearch Configuration for Kinesis Firehose: elasticsearch_configuration not found") + } + esList := esConfig.([]interface{}) + + es := esList[0].(map[string]interface{}) + + update := &firehose.ElasticsearchDestinationUpdate{ + BufferingHints: extractBufferingHints(es), + DomainARN: aws.String(es["domain_arn"].(string)), + IndexName: aws.String(es["index_name"].(string)), + RetryOptions: extractElasticSearchRetryOptions(es), + RoleARN: aws.String(es["role_arn"].(string)), + TypeName: aws.String(es["type_name"].(string)), + S3Update: s3Update, + } + + if _, ok := es["cloudwatch_logging_options"]; ok { + update.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(es) + } + + if indexRotationPeriod, ok := es["index_rotation_period"]; ok { + update.IndexRotationPeriod = aws.String(indexRotationPeriod.(string)) + } + + return update, nil +} + +func extractBufferingHints(es map[string]interface{}) *firehose.ElasticsearchBufferingHints { + bufferingHints := &firehose.ElasticsearchBufferingHints{} + + if bufferingInterval, ok := es["buffering_interval"].(int); ok { + bufferingHints.IntervalInSeconds = aws.Int64(int64(bufferingInterval)) + } + if bufferingSize, ok := es["buffering_size"].(int); ok { + bufferingHints.SizeInMBs = aws.Int64(int64(bufferingSize)) + } + + return bufferingHints +} + +func extractElasticSearchRetryOptions(es map[string]interface{}) *firehose.ElasticsearchRetryOptions { + retryOptions := &firehose.ElasticsearchRetryOptions{} + + if retryDuration, ok := es["retry_duration"].(int); ok { + retryOptions.DurationInSeconds = aws.Int64(int64(retryDuration)) + } + + return retryOptions +} + +func extractRedshiftRetryOptions(redshift map[string]interface{}) *firehose.RedshiftRetryOptions { + retryOptions := &firehose.RedshiftRetryOptions{} + + if retryDuration, ok := redshift["retry_duration"].(int); ok { + retryOptions.DurationInSeconds = aws.Int64(int64(retryDuration)) + } + + return retryOptions +} + +func extractCopyCommandConfiguration(redshift map[string]interface{}) *firehose.CopyCommand { + cmd := &firehose.CopyCommand{ + DataTableName: aws.String(redshift["data_table_name"].(string)), + } + if copyOptions, ok := redshift["copy_options"]; ok { + cmd.CopyOptions = aws.String(copyOptions.(string)) + } + if columns, ok := redshift["data_table_columns"]; ok { + cmd.DataTableColumns = aws.String(columns.(string)) + } + + return cmd +} + +func resourceAwsKinesisFirehoseDeliveryStreamCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).firehoseconn + + sn := d.Get("name").(string) + s3Config := createS3Config(d) + + createInput := &firehose.CreateDeliveryStreamInput{ + DeliveryStreamName: aws.String(sn), + } + + if d.Get("destination").(string) == "s3" { + createInput.S3DestinationConfiguration = s3Config + } else if d.Get("destination").(string) == "elasticsearch" { + esConfig, err := createElasticsearchConfig(d, s3Config) + if err != nil { + return err + } + createInput.ElasticsearchDestinationConfiguration = esConfig + } else { + rc, err := createRedshiftConfig(d, s3Config) + if err != nil { + return err + } + createInput.RedshiftDestinationConfiguration = rc + } + + var lastError error + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + _, err := conn.CreateDeliveryStream(createInput) + if err != nil { + log.Printf("[DEBUG] Error creating Firehose Delivery Stream: %s", err) + lastError = err + + if awsErr, ok := err.(awserr.Error); ok { + // IAM roles can take ~10 seconds to propagate in AWS: + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console + if awsErr.Code() == "InvalidArgumentException" && strings.Contains(awsErr.Message(), "Firehose is unable to assume role") { + log.Printf("[DEBUG] Firehose could not assume role referenced, retrying...") + return resource.RetryableError(awsErr) + } + } + // Not retryable + return resource.NonRetryableError(err) + } + + return nil + }) + if err != nil { + if awsErr, ok := lastError.(awserr.Error); ok { + return fmt.Errorf("[WARN] Error creating Kinesis Firehose Delivery Stream: \"%s\", code: \"%s\"", awsErr.Message(), awsErr.Code()) + } + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"CREATING"}, + Target: []string{"ACTIVE"}, + Refresh: firehoseStreamStateRefreshFunc(conn, sn), + Timeout: 20 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + firehoseStream, err := stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for Kinesis Stream (%s) to become active: %s", + sn, err) + } + + s := firehoseStream.(*firehose.DeliveryStreamDescription) + d.SetId(*s.DeliveryStreamARN) + d.Set("arn", s.DeliveryStreamARN) + + return resourceAwsKinesisFirehoseDeliveryStreamRead(d, meta) +} + +func resourceAwsKinesisFirehoseDeliveryStreamUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).firehoseconn + + sn := d.Get("name").(string) + s3Config := updateS3Config(d) + + updateInput := &firehose.UpdateDestinationInput{ + DeliveryStreamName: aws.String(sn), + CurrentDeliveryStreamVersionId: aws.String(d.Get("version_id").(string)), + DestinationId: aws.String(d.Get("destination_id").(string)), + } + + if d.Get("destination").(string) == "s3" { + updateInput.S3DestinationUpdate = s3Config + } else if d.Get("destination").(string) == "elasticsearch" { + esUpdate, err := updateElasticsearchConfig(d, s3Config) + if err != nil { + return err + } + updateInput.ElasticsearchDestinationUpdate = esUpdate + } else { + rc, err := updateRedshiftConfig(d, s3Config) + if err != nil { + return err + } + updateInput.RedshiftDestinationUpdate = rc + } + + _, err := conn.UpdateDestination(updateInput) + if err != nil { + return fmt.Errorf( + "Error Updating Kinesis Firehose Delivery Stream: \"%s\"\n%s", + sn, err) + } + + return resourceAwsKinesisFirehoseDeliveryStreamRead(d, meta) +} + +func resourceAwsKinesisFirehoseDeliveryStreamRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).firehoseconn + + resp, err := conn.DescribeDeliveryStream(&firehose.DescribeDeliveryStreamInput{ + DeliveryStreamName: aws.String(d.Get("name").(string)), + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "ResourceNotFoundException" { + d.SetId("") + return nil + } + return fmt.Errorf("[WARN] Error reading Kinesis Firehose Delivery Stream: \"%s\", code: \"%s\"", awsErr.Message(), awsErr.Code()) + } + return err + } + + s := resp.DeliveryStreamDescription + d.Set("version_id", s.VersionId) + d.Set("arn", *s.DeliveryStreamARN) + if len(s.Destinations) > 0 { + destination := s.Destinations[0] + d.Set("destination_id", *destination.DestinationId) + } + + return nil +} + +func resourceAwsKinesisFirehoseDeliveryStreamDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).firehoseconn + + sn := d.Get("name").(string) + _, err := conn.DeleteDeliveryStream(&firehose.DeleteDeliveryStreamInput{ + DeliveryStreamName: aws.String(sn), + }) + + if err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"DELETING"}, + Target: []string{"DESTROYED"}, + Refresh: firehoseStreamStateRefreshFunc(conn, sn), + Timeout: 20 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for Delivery Stream (%s) to be destroyed: %s", + sn, err) + } + + d.SetId("") + return nil +} + +func firehoseStreamStateRefreshFunc(conn *firehose.Firehose, sn string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + describeOpts := &firehose.DescribeDeliveryStreamInput{ + DeliveryStreamName: aws.String(sn), + } + resp, err := conn.DescribeDeliveryStream(describeOpts) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "ResourceNotFoundException" { + return 42, "DESTROYED", nil + } + return nil, awsErr.Code(), err + } + return nil, "failed", err + } + + return resp.DeliveryStreamDescription, *resp.DeliveryStreamDescription.DeliveryStreamStatus, nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_firehose_delivery_stream_migrate.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_firehose_delivery_stream_migrate.go new file mode 100644 index 000000000..7ed8bfa33 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_firehose_delivery_stream_migrate.go @@ -0,0 +1,59 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceAwsKinesisFirehoseMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found AWS Kinesis Firehose Delivery Stream State v0; migrating to v1") + return migrateKinesisFirehoseV0toV1(is) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateKinesisFirehoseV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty Kinesis Firehose Delivery State; nothing to migrate.") + return is, nil + } + + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + // migrate flate S3 configuration to a s3_configuration block + // grab initial values + is.Attributes["s3_configuration.#"] = "1" + // Required parameters + is.Attributes["s3_configuration.0.role_arn"] = is.Attributes["role_arn"] + is.Attributes["s3_configuration.0.bucket_arn"] = is.Attributes["s3_bucket_arn"] + + // Optional parameters + if is.Attributes["s3_buffer_size"] != "" { + is.Attributes["s3_configuration.0.buffer_size"] = is.Attributes["s3_buffer_size"] + } + if is.Attributes["s3_data_compression"] != "" { + is.Attributes["s3_configuration.0.compression_format"] = is.Attributes["s3_data_compression"] + } + if is.Attributes["s3_buffer_interval"] != "" { + is.Attributes["s3_configuration.0.buffer_interval"] = is.Attributes["s3_buffer_interval"] + } + if is.Attributes["s3_prefix"] != "" { + is.Attributes["s3_configuration.0.prefix"] = is.Attributes["s3_prefix"] + } + + delete(is.Attributes, "role_arn") + delete(is.Attributes, "s3_bucket_arn") + delete(is.Attributes, "s3_buffer_size") + delete(is.Attributes, "s3_data_compression") + delete(is.Attributes, "s3_buffer_interval") + delete(is.Attributes, "s3_prefix") + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_stream.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_stream.go new file mode 100644 index 000000000..c620ee3a1 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kinesis_stream.go @@ -0,0 +1,393 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsKinesisStream() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsKinesisStreamCreate, + Read: resourceAwsKinesisStreamRead, + Update: resourceAwsKinesisStreamUpdate, + Delete: resourceAwsKinesisStreamDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsKinesisStreamImport, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "shard_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "retention_period": { + Type: schema.TypeInt, + Optional: true, + Default: 24, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 24 || value > 168 { + errors = append(errors, fmt.Errorf( + "%q must be between 24 and 168 hours", k)) + } + return + }, + }, + + "shard_level_metrics": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "arn": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "tags": tagsSchema(), + }, + } +} + +func resourceAwsKinesisStreamImport( + d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("name", d.Id()) + return []*schema.ResourceData{d}, nil +} + +func resourceAwsKinesisStreamCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kinesisconn + sn := d.Get("name").(string) + createOpts := &kinesis.CreateStreamInput{ + ShardCount: aws.Int64(int64(d.Get("shard_count").(int))), + StreamName: aws.String(sn), + } + + _, err := conn.CreateStream(createOpts) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + return fmt.Errorf("[WARN] Error creating Kinesis Stream: \"%s\", code: \"%s\"", awsErr.Message(), awsErr.Code()) + } + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"CREATING"}, + Target: []string{"ACTIVE"}, + Refresh: streamStateRefreshFunc(conn, sn), + Timeout: 5 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + streamRaw, err := stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for Kinesis Stream (%s) to become active: %s", + sn, err) + } + + s := streamRaw.(*kinesisStreamState) + d.SetId(s.arn) + d.Set("arn", s.arn) + d.Set("shard_count", len(s.openShards)) + + return resourceAwsKinesisStreamUpdate(d, meta) +} + +func resourceAwsKinesisStreamUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kinesisconn + + d.Partial(true) + if err := setTagsKinesis(conn, d); err != nil { + return err + } + + d.SetPartial("tags") + d.Partial(false) + + if err := setKinesisRetentionPeriod(conn, d); err != nil { + return err + } + if err := updateKinesisShardLevelMetrics(conn, d); err != nil { + return err + } + + return resourceAwsKinesisStreamRead(d, meta) +} + +func resourceAwsKinesisStreamRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kinesisconn + sn := d.Get("name").(string) + + state, err := readKinesisStreamState(conn, sn) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "ResourceNotFoundException" { + d.SetId("") + return nil + } + return fmt.Errorf("[WARN] Error reading Kinesis Stream: \"%s\", code: \"%s\"", awsErr.Message(), awsErr.Code()) + } + return err + + } + d.SetId(state.arn) + d.Set("arn", state.arn) + d.Set("shard_count", len(state.openShards)) + d.Set("retention_period", state.retentionPeriod) + + if len(state.shardLevelMetrics) > 0 { + d.Set("shard_level_metrics", state.shardLevelMetrics) + } + + // set tags + describeTagsOpts := &kinesis.ListTagsForStreamInput{ + StreamName: aws.String(sn), + } + tagsResp, err := conn.ListTagsForStream(describeTagsOpts) + if err != nil { + log.Printf("[DEBUG] Error retrieving tags for Stream: %s. %s", sn, err) + } else { + d.Set("tags", tagsToMapKinesis(tagsResp.Tags)) + } + + return nil +} + +func resourceAwsKinesisStreamDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kinesisconn + sn := d.Get("name").(string) + _, err := conn.DeleteStream(&kinesis.DeleteStreamInput{ + StreamName: aws.String(sn), + }) + + if err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"DELETING"}, + Target: []string{"DESTROYED"}, + Refresh: streamStateRefreshFunc(conn, sn), + Timeout: 5 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for Stream (%s) to be destroyed: %s", + sn, err) + } + + d.SetId("") + return nil +} + +func setKinesisRetentionPeriod(conn *kinesis.Kinesis, d *schema.ResourceData) error { + sn := d.Get("name").(string) + + oraw, nraw := d.GetChange("retention_period") + o := oraw.(int) + n := nraw.(int) + + if n == 0 { + log.Printf("[DEBUG] Kinesis Stream (%q) Retention Period Not Changed", sn) + return nil + } + + if n > o { + log.Printf("[DEBUG] Increasing %s Stream Retention Period to %d", sn, n) + _, err := conn.IncreaseStreamRetentionPeriod(&kinesis.IncreaseStreamRetentionPeriodInput{ + StreamName: aws.String(sn), + RetentionPeriodHours: aws.Int64(int64(n)), + }) + if err != nil { + return err + } + + } else { + log.Printf("[DEBUG] Decreasing %s Stream Retention Period to %d", sn, n) + _, err := conn.DecreaseStreamRetentionPeriod(&kinesis.DecreaseStreamRetentionPeriodInput{ + StreamName: aws.String(sn), + RetentionPeriodHours: aws.Int64(int64(n)), + }) + if err != nil { + return err + } + } + + if err := waitForKinesisToBeActive(conn, sn); err != nil { + return err + } + + return nil +} + +func updateKinesisShardLevelMetrics(conn *kinesis.Kinesis, d *schema.ResourceData) error { + sn := d.Get("name").(string) + + o, n := d.GetChange("shard_level_metrics") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + disableMetrics := os.Difference(ns) + if disableMetrics.Len() != 0 { + metrics := disableMetrics.List() + log.Printf("[DEBUG] Disabling shard level metrics %v for stream %s", metrics, sn) + + props := &kinesis.DisableEnhancedMonitoringInput{ + StreamName: aws.String(sn), + ShardLevelMetrics: expandStringList(metrics), + } + + _, err := conn.DisableEnhancedMonitoring(props) + if err != nil { + return fmt.Errorf("Failure to disable shard level metrics for stream %s: %s", sn, err) + } + if err := waitForKinesisToBeActive(conn, sn); err != nil { + return err + } + } + + enabledMetrics := ns.Difference(os) + if enabledMetrics.Len() != 0 { + metrics := enabledMetrics.List() + log.Printf("[DEBUG] Enabling shard level metrics %v for stream %s", metrics, sn) + + props := &kinesis.EnableEnhancedMonitoringInput{ + StreamName: aws.String(sn), + ShardLevelMetrics: expandStringList(metrics), + } + + _, err := conn.EnableEnhancedMonitoring(props) + if err != nil { + return fmt.Errorf("Failure to enable shard level metrics for stream %s: %s", sn, err) + } + if err := waitForKinesisToBeActive(conn, sn); err != nil { + return err + } + } + + return nil +} + +type kinesisStreamState struct { + arn string + creationTimestamp int64 + status string + retentionPeriod int64 + openShards []string + closedShards []string + shardLevelMetrics []string +} + +func readKinesisStreamState(conn *kinesis.Kinesis, sn string) (*kinesisStreamState, error) { + describeOpts := &kinesis.DescribeStreamInput{ + StreamName: aws.String(sn), + } + + state := &kinesisStreamState{} + err := conn.DescribeStreamPages(describeOpts, func(page *kinesis.DescribeStreamOutput, last bool) (shouldContinue bool) { + state.arn = aws.StringValue(page.StreamDescription.StreamARN) + state.creationTimestamp = aws.TimeValue(page.StreamDescription.StreamCreationTimestamp).Unix() + state.status = aws.StringValue(page.StreamDescription.StreamStatus) + state.retentionPeriod = aws.Int64Value(page.StreamDescription.RetentionPeriodHours) + state.openShards = append(state.openShards, flattenShards(openShards(page.StreamDescription.Shards))...) + state.closedShards = append(state.closedShards, flattenShards(closedShards(page.StreamDescription.Shards))...) + state.shardLevelMetrics = flattenKinesisShardLevelMetrics(page.StreamDescription.EnhancedMonitoring) + return !last + }) + return state, err +} + +func streamStateRefreshFunc(conn *kinesis.Kinesis, sn string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + state, err := readKinesisStreamState(conn, sn) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "ResourceNotFoundException" { + return 42, "DESTROYED", nil + } + return nil, awsErr.Code(), err + } + return nil, "failed", err + } + + return state, state.status, nil + } +} + +func waitForKinesisToBeActive(conn *kinesis.Kinesis, sn string) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{"UPDATING"}, + Target: []string{"ACTIVE"}, + Refresh: streamStateRefreshFunc(conn, sn), + Timeout: 5 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err := stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for Kinesis Stream (%s) to become active: %s", + sn, err) + } + return nil +} + +func openShards(shards []*kinesis.Shard) []*kinesis.Shard { + return filterShards(shards, true) +} + +func closedShards(shards []*kinesis.Shard) []*kinesis.Shard { + return filterShards(shards, false) +} + +// See http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html +func filterShards(shards []*kinesis.Shard, open bool) []*kinesis.Shard { + res := make([]*kinesis.Shard, 0, len(shards)) + for _, s := range shards { + if open && s.SequenceNumberRange.EndingSequenceNumber == nil { + res = append(res, s) + } else if !open && s.SequenceNumberRange.EndingSequenceNumber != nil { + res = append(res, s) + } + } + return res +} + +func flattenShards(shards []*kinesis.Shard) []string { + res := make([]string, len(shards)) + for i, s := range shards { + res[i] = aws.StringValue(s.ShardId) + } + return res +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_alias.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_alias.go new file mode 100644 index 000000000..7e3f0f2f6 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_alias.go @@ -0,0 +1,184 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/kms" +) + +func resourceAwsKmsAlias() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsKmsAliasCreate, + Read: resourceAwsKmsAliasRead, + Update: resourceAwsKmsAliasUpdate, + Delete: resourceAwsKmsAliasDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAwsKmsAliasImport, + }, + + Schema: map[string]*schema.Schema{ + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: validateAwsKmsName, + }, + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(string) + if !regexp.MustCompile(`^(alias\/)[a-zA-Z0-9:/_-]+$`).MatchString(value) { + es = append(es, fmt.Errorf( + "%q must begin with 'alias/' and be comprised of only [a-zA-Z0-9:/_-]", k)) + } + return + }, + }, + "target_key_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceAwsKmsAliasCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kmsconn + + var name string + if v, ok := d.GetOk("name"); ok { + name = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + name = resource.PrefixedUniqueId(v.(string)) + } else { + name = resource.PrefixedUniqueId("alias/") + } + + targetKeyId := d.Get("target_key_id").(string) + + log.Printf("[DEBUG] KMS alias create name: %s, target_key: %s", name, targetKeyId) + + req := &kms.CreateAliasInput{ + AliasName: aws.String(name), + TargetKeyId: aws.String(targetKeyId), + } + _, err := conn.CreateAlias(req) + if err != nil { + return err + } + d.SetId(name) + return resourceAwsKmsAliasRead(d, meta) +} + +func resourceAwsKmsAliasRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kmsconn + + alias, err := findKmsAliasByName(conn, d.Id(), nil) + if err != nil { + return err + } + if alias == nil { + log.Printf("[DEBUG] Removing KMS Alias (%s) as it's already gone", d.Id()) + d.SetId("") + return nil + } + + log.Printf("[DEBUG] Found KMS Alias: %s", alias) + + d.Set("arn", alias.AliasArn) + d.Set("target_key_id", alias.TargetKeyId) + + return nil +} + +func resourceAwsKmsAliasUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kmsconn + + if d.HasChange("target_key_id") { + err := resourceAwsKmsAliasTargetUpdate(conn, d) + if err != nil { + return err + } + } + return nil +} + +func resourceAwsKmsAliasTargetUpdate(conn *kms.KMS, d *schema.ResourceData) error { + name := d.Get("name").(string) + targetKeyId := d.Get("target_key_id").(string) + + log.Printf("[DEBUG] KMS alias: %s, update target: %s", name, targetKeyId) + + req := &kms.UpdateAliasInput{ + AliasName: aws.String(name), + TargetKeyId: aws.String(targetKeyId), + } + _, err := conn.UpdateAlias(req) + + return err +} + +func resourceAwsKmsAliasDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kmsconn + + req := &kms.DeleteAliasInput{ + AliasName: aws.String(d.Id()), + } + _, err := conn.DeleteAlias(req) + if err != nil { + return err + } + + log.Printf("[DEBUG] KMS Alias: (%s) deleted.", d.Id()) + d.SetId("") + return nil +} + +// API by default limits results to 50 aliases +// This is how we make sure we won't miss any alias +// See http://docs.aws.amazon.com/kms/latest/APIReference/API_ListAliases.html +func findKmsAliasByName(conn *kms.KMS, name string, marker *string) (*kms.AliasListEntry, error) { + req := kms.ListAliasesInput{ + Limit: aws.Int64(int64(100)), + } + if marker != nil { + req.Marker = marker + } + + log.Printf("[DEBUG] Listing KMS aliases: %s", req) + resp, err := conn.ListAliases(&req) + if err != nil { + return nil, err + } + + for _, entry := range resp.Aliases { + if *entry.AliasName == name { + return entry, nil + } + } + if *resp.Truncated { + log.Printf("[DEBUG] KMS alias list is truncated, listing more via %s", *resp.NextMarker) + return findKmsAliasByName(conn, name, resp.NextMarker) + } + + return nil, nil +} + +func resourceAwsKmsAliasImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("name", d.Id()) + return []*schema.ResourceData{d}, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_key.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_key.go new file mode 100644 index 000000000..f95f76d95 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_kms_key.go @@ -0,0 +1,457 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/kms" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsKmsKey() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsKmsKeyCreate, + Read: resourceAwsKmsKeyRead, + Update: resourceAwsKmsKeyUpdate, + Delete: resourceAwsKmsKeyDelete, + Exists: resourceAwsKmsKeyExists, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "key_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "key_usage": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(string) + if !(value == "ENCRYPT_DECRYPT" || value == "") { + es = append(es, fmt.Errorf( + "%q must be ENCRYPT_DECRYPT or not specified", k)) + } + return + }, + }, + "policy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateJsonString, + DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, + }, + "is_enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "enable_key_rotation": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "deletion_window_in_days": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(int) + if value > 30 || value < 7 { + es = append(es, fmt.Errorf( + "%q must be between 7 and 30 days inclusive", k)) + } + return + }, + }, + "tags": tagsSchema(), + }, + } +} + +func resourceAwsKmsKeyCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kmsconn + + // Allow aws to chose default values if we don't pass them + var req kms.CreateKeyInput + if v, exists := d.GetOk("description"); exists { + req.Description = aws.String(v.(string)) + } + if v, exists := d.GetOk("key_usage"); exists { + req.KeyUsage = aws.String(v.(string)) + } + if v, exists := d.GetOk("policy"); exists { + req.Policy = aws.String(v.(string)) + } + if v, exists := d.GetOk("tags"); exists { + req.Tags = tagsFromMapKMS(v.(map[string]interface{})) + } + + var resp *kms.CreateKeyOutput + // AWS requires any principal in the policy to exist before the key is created. + // The KMS service's awareness of principals is limited by "eventual consistency". + // They acknowledge this here: + // http://docs.aws.amazon.com/kms/latest/APIReference/API_CreateKey.html + err := resource.Retry(30*time.Second, func() *resource.RetryError { + var err error + resp, err = conn.CreateKey(&req) + if isAWSErr(err, "MalformedPolicyDocumentException", "") { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + }) + if err != nil { + return err + } + + d.SetId(*resp.KeyMetadata.KeyId) + d.Set("key_id", resp.KeyMetadata.KeyId) + + return _resourceAwsKmsKeyUpdate(d, meta, true) +} + +func resourceAwsKmsKeyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kmsconn + + req := &kms.DescribeKeyInput{ + KeyId: aws.String(d.Id()), + } + resp, err := conn.DescribeKey(req) + if err != nil { + return err + } + metadata := resp.KeyMetadata + + if *metadata.KeyState == "PendingDeletion" { + log.Printf("[WARN] Removing KMS key %s because it's already gone", d.Id()) + d.SetId("") + return nil + } + + d.SetId(*metadata.KeyId) + + d.Set("arn", metadata.Arn) + d.Set("key_id", metadata.KeyId) + d.Set("description", metadata.Description) + d.Set("key_usage", metadata.KeyUsage) + d.Set("is_enabled", metadata.Enabled) + + p, err := conn.GetKeyPolicy(&kms.GetKeyPolicyInput{ + KeyId: metadata.KeyId, + PolicyName: aws.String("default"), + }) + if err != nil { + return err + } + + policy, err := normalizeJsonString(*p.Policy) + if err != nil { + return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err) + } + d.Set("policy", policy) + + krs, err := conn.GetKeyRotationStatus(&kms.GetKeyRotationStatusInput{ + KeyId: metadata.KeyId, + }) + if err != nil { + return err + } + d.Set("enable_key_rotation", krs.KeyRotationEnabled) + + tagList, err := conn.ListResourceTags(&kms.ListResourceTagsInput{ + KeyId: metadata.KeyId, + }) + if err != nil { + return fmt.Errorf("Failed to get KMS key tags (key: %s): %s", d.Get("key_id").(string), err) + } + d.Set("tags", tagsToMapKMS(tagList.Tags)) + + return nil +} + +func resourceAwsKmsKeyUpdate(d *schema.ResourceData, meta interface{}) error { + return _resourceAwsKmsKeyUpdate(d, meta, false) +} + +// We expect new keys to be enabled already +// but there is no easy way to differentiate between Update() +// called from Create() and regular update, so we have this wrapper +func _resourceAwsKmsKeyUpdate(d *schema.ResourceData, meta interface{}, isFresh bool) error { + conn := meta.(*AWSClient).kmsconn + + if d.HasChange("is_enabled") && d.Get("is_enabled").(bool) && !isFresh { + // Enable before any attributes will be modified + if err := updateKmsKeyStatus(conn, d.Id(), d.Get("is_enabled").(bool)); err != nil { + return err + } + } + + if d.HasChange("enable_key_rotation") { + if err := updateKmsKeyRotationStatus(conn, d); err != nil { + return err + } + } + + if d.HasChange("description") { + if err := resourceAwsKmsKeyDescriptionUpdate(conn, d); err != nil { + return err + } + } + if d.HasChange("policy") { + if err := resourceAwsKmsKeyPolicyUpdate(conn, d); err != nil { + return err + } + } + + if d.HasChange("is_enabled") && !d.Get("is_enabled").(bool) { + // Only disable when all attributes are modified + // because we cannot modify disabled keys + if err := updateKmsKeyStatus(conn, d.Id(), d.Get("is_enabled").(bool)); err != nil { + return err + } + } + + if err := setTagsKMS(conn, d, d.Id()); err != nil { + return err + } + + return resourceAwsKmsKeyRead(d, meta) +} + +func resourceAwsKmsKeyDescriptionUpdate(conn *kms.KMS, d *schema.ResourceData) error { + description := d.Get("description").(string) + keyId := d.Get("key_id").(string) + + log.Printf("[DEBUG] KMS key: %s, update description: %s", keyId, description) + + req := &kms.UpdateKeyDescriptionInput{ + Description: aws.String(description), + KeyId: aws.String(keyId), + } + _, err := conn.UpdateKeyDescription(req) + return err +} + +func resourceAwsKmsKeyPolicyUpdate(conn *kms.KMS, d *schema.ResourceData) error { + policy, err := normalizeJsonString(d.Get("policy").(string)) + if err != nil { + return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err) + } + keyId := d.Get("key_id").(string) + + log.Printf("[DEBUG] KMS key: %s, update policy: %s", keyId, policy) + + req := &kms.PutKeyPolicyInput{ + KeyId: aws.String(keyId), + Policy: aws.String(policy), + PolicyName: aws.String("default"), + } + _, err = conn.PutKeyPolicy(req) + return err +} + +func updateKmsKeyStatus(conn *kms.KMS, id string, shouldBeEnabled bool) error { + var err error + + if shouldBeEnabled { + log.Printf("[DEBUG] Enabling KMS key %q", id) + _, err = conn.EnableKey(&kms.EnableKeyInput{ + KeyId: aws.String(id), + }) + } else { + log.Printf("[DEBUG] Disabling KMS key %q", id) + _, err = conn.DisableKey(&kms.DisableKeyInput{ + KeyId: aws.String(id), + }) + } + + if err != nil { + return fmt.Errorf("Failed to set KMS key %q status to %t: %q", + id, shouldBeEnabled, err.Error()) + } + + // Wait for propagation since KMS is eventually consistent + wait := resource.StateChangeConf{ + Pending: []string{fmt.Sprintf("%t", !shouldBeEnabled)}, + Target: []string{fmt.Sprintf("%t", shouldBeEnabled)}, + Timeout: 20 * time.Minute, + MinTimeout: 2 * time.Second, + ContinuousTargetOccurence: 10, + Refresh: func() (interface{}, string, error) { + log.Printf("[DEBUG] Checking if KMS key %s enabled status is %t", + id, shouldBeEnabled) + resp, err := conn.DescribeKey(&kms.DescribeKeyInput{ + KeyId: aws.String(id), + }) + if err != nil { + return resp, "FAILED", err + } + status := fmt.Sprintf("%t", *resp.KeyMetadata.Enabled) + log.Printf("[DEBUG] KMS key %s status received: %s, retrying", id, status) + + return resp, status, nil + }, + } + + _, err = wait.WaitForState() + if err != nil { + return fmt.Errorf("Failed setting KMS key status to %t: %s", shouldBeEnabled, err) + } + + return nil +} + +func updateKmsKeyRotationStatus(conn *kms.KMS, d *schema.ResourceData) error { + shouldEnableRotation := d.Get("enable_key_rotation").(bool) + + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + var err error + if shouldEnableRotation { + log.Printf("[DEBUG] Enabling key rotation for KMS key %q", d.Id()) + _, err = conn.EnableKeyRotation(&kms.EnableKeyRotationInput{ + KeyId: aws.String(d.Id()), + }) + } else { + log.Printf("[DEBUG] Disabling key rotation for KMS key %q", d.Id()) + _, err = conn.DisableKeyRotation(&kms.DisableKeyRotationInput{ + KeyId: aws.String(d.Id()), + }) + } + + if err != nil { + awsErr, ok := err.(awserr.Error) + if ok && awsErr.Code() == "DisabledException" { + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + } + + return nil + }) + + if err != nil { + return fmt.Errorf("Failed to set key rotation for %q to %t: %q", + d.Id(), shouldEnableRotation, err.Error()) + } + + // Wait for propagation since KMS is eventually consistent + wait := resource.StateChangeConf{ + Pending: []string{fmt.Sprintf("%t", !shouldEnableRotation)}, + Target: []string{fmt.Sprintf("%t", shouldEnableRotation)}, + Timeout: 5 * time.Minute, + MinTimeout: 1 * time.Second, + ContinuousTargetOccurence: 5, + Refresh: func() (interface{}, string, error) { + log.Printf("[DEBUG] Checking if KMS key %s rotation status is %t", + d.Id(), shouldEnableRotation) + resp, err := conn.GetKeyRotationStatus(&kms.GetKeyRotationStatusInput{ + KeyId: aws.String(d.Id()), + }) + if err != nil { + return resp, "FAILED", err + } + status := fmt.Sprintf("%t", *resp.KeyRotationEnabled) + log.Printf("[DEBUG] KMS key %s rotation status received: %s, retrying", d.Id(), status) + + return resp, status, nil + }, + } + + _, err = wait.WaitForState() + if err != nil { + return fmt.Errorf("Failed setting KMS key rotation status to %t: %s", shouldEnableRotation, err) + } + + return nil +} + +func resourceAwsKmsKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + conn := meta.(*AWSClient).kmsconn + + req := &kms.DescribeKeyInput{ + KeyId: aws.String(d.Id()), + } + resp, err := conn.DescribeKey(req) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "NotFoundException" { + return false, nil + } + } + return false, err + } + metadata := resp.KeyMetadata + + if *metadata.KeyState == "PendingDeletion" { + return false, nil + } + + return true, nil +} + +func resourceAwsKmsKeyDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kmsconn + keyId := d.Get("key_id").(string) + + req := &kms.ScheduleKeyDeletionInput{ + KeyId: aws.String(keyId), + } + if v, exists := d.GetOk("deletion_window_in_days"); exists { + req.PendingWindowInDays = aws.Int64(int64(v.(int))) + } + _, err := conn.ScheduleKeyDeletion(req) + if err != nil { + return err + } + + // Wait for propagation since KMS is eventually consistent + wait := resource.StateChangeConf{ + Pending: []string{"Enabled", "Disabled"}, + Target: []string{"PendingDeletion"}, + Timeout: 20 * time.Minute, + MinTimeout: 2 * time.Second, + ContinuousTargetOccurence: 10, + Refresh: func() (interface{}, string, error) { + log.Printf("[DEBUG] Checking if KMS key %s state is PendingDeletion", keyId) + resp, err := conn.DescribeKey(&kms.DescribeKeyInput{ + KeyId: aws.String(keyId), + }) + if err != nil { + return resp, "Failed", err + } + + metadata := *resp.KeyMetadata + log.Printf("[DEBUG] KMS key %s state is %s, retrying", keyId, *metadata.KeyState) + + return resp, *metadata.KeyState, nil + }, + } + + _, err = wait.WaitForState() + if err != nil { + return fmt.Errorf("Failed deactivating KMS key %s: %s", keyId, err) + } + + log.Printf("[DEBUG] KMS Key %s deactivated.", keyId) + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_alias.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_alias.go new file mode 100644 index 000000000..083225f3a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_alias.go @@ -0,0 +1,146 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/lambda" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsLambdaAlias() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLambdaAliasCreate, + Read: resourceAwsLambdaAliasRead, + Update: resourceAwsLambdaAliasUpdate, + Delete: resourceAwsLambdaAliasDelete, + + Schema: map[string]*schema.Schema{ + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "function_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "function_version": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +// resourceAwsLambdaAliasCreate maps to: +// CreateAlias in the API / SDK +func resourceAwsLambdaAliasCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lambdaconn + + functionName := d.Get("function_name").(string) + aliasName := d.Get("name").(string) + + log.Printf("[DEBUG] Creating Lambda alias: alias %s for function %s", aliasName, functionName) + + params := &lambda.CreateAliasInput{ + Description: aws.String(d.Get("description").(string)), + FunctionName: aws.String(functionName), + FunctionVersion: aws.String(d.Get("function_version").(string)), + Name: aws.String(aliasName), + } + + aliasConfiguration, err := conn.CreateAlias(params) + if err != nil { + return fmt.Errorf("Error creating Lambda alias: %s", err) + } + + d.SetId(*aliasConfiguration.AliasArn) + + return resourceAwsLambdaAliasRead(d, meta) +} + +// resourceAwsLambdaAliasRead maps to: +// GetAlias in the API / SDK +func resourceAwsLambdaAliasRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lambdaconn + + log.Printf("[DEBUG] Fetching Lambda alias: %s:%s", d.Get("function_name"), d.Get("name")) + + params := &lambda.GetAliasInput{ + FunctionName: aws.String(d.Get("function_name").(string)), + Name: aws.String(d.Get("name").(string)), + } + + aliasConfiguration, err := conn.GetAlias(params) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "ResourceNotFoundException" && strings.Contains(awsErr.Message(), "Cannot find alias arn") { + d.SetId("") + return nil + } + } + return err + } + + d.Set("description", aliasConfiguration.Description) + d.Set("function_version", aliasConfiguration.FunctionVersion) + d.Set("name", aliasConfiguration.Name) + d.Set("arn", aliasConfiguration.AliasArn) + + return nil +} + +// resourceAwsLambdaAliasDelete maps to: +// DeleteAlias in the API / SDK +func resourceAwsLambdaAliasDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lambdaconn + + log.Printf("[INFO] Deleting Lambda alias: %s:%s", d.Get("function_name"), d.Get("name")) + + params := &lambda.DeleteAliasInput{ + FunctionName: aws.String(d.Get("function_name").(string)), + Name: aws.String(d.Get("name").(string)), + } + + _, err := conn.DeleteAlias(params) + if err != nil { + return fmt.Errorf("Error deleting Lambda alias: %s", err) + } + + d.SetId("") + + return nil +} + +// resourceAwsLambdaAliasUpdate maps to: +// UpdateAlias in the API / SDK +func resourceAwsLambdaAliasUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lambdaconn + + log.Printf("[DEBUG] Updating Lambda alias: %s:%s", d.Get("function_name"), d.Get("name")) + + params := &lambda.UpdateAliasInput{ + Description: aws.String(d.Get("description").(string)), + FunctionName: aws.String(d.Get("function_name").(string)), + FunctionVersion: aws.String(d.Get("function_version").(string)), + Name: aws.String(d.Get("name").(string)), + } + + _, err := conn.UpdateAlias(params) + if err != nil { + return fmt.Errorf("Error updating Lambda alias: %s", err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_event_source_mapping.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_event_source_mapping.go new file mode 100644 index 000000000..dd8f64e35 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_event_source_mapping.go @@ -0,0 +1,215 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/lambda" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsLambdaEventSourceMapping() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLambdaEventSourceMappingCreate, + Read: resourceAwsLambdaEventSourceMappingRead, + Update: resourceAwsLambdaEventSourceMappingUpdate, + Delete: resourceAwsLambdaEventSourceMappingDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "event_source_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "function_name": { + Type: schema.TypeString, + Required: true, + }, + "starting_position": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "batch_size": { + Type: schema.TypeInt, + Optional: true, + Default: 100, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "function_arn": { + Type: schema.TypeString, + Computed: true, + }, + "last_modified": { + Type: schema.TypeString, + Computed: true, + }, + "last_processing_result": { + Type: schema.TypeString, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "state_transition_reason": { + Type: schema.TypeString, + Computed: true, + }, + "uuid": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +// resourceAwsLambdaEventSourceMappingCreate maps to: +// CreateEventSourceMapping in the API / SDK +func resourceAwsLambdaEventSourceMappingCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lambdaconn + + functionName := d.Get("function_name").(string) + eventSourceArn := d.Get("event_source_arn").(string) + + log.Printf("[DEBUG] Creating Lambda event source mapping: source %s to function %s", eventSourceArn, functionName) + + params := &lambda.CreateEventSourceMappingInput{ + EventSourceArn: aws.String(eventSourceArn), + FunctionName: aws.String(functionName), + StartingPosition: aws.String(d.Get("starting_position").(string)), + BatchSize: aws.Int64(int64(d.Get("batch_size").(int))), + Enabled: aws.Bool(d.Get("enabled").(bool)), + } + + // IAM profiles and roles can take some time to propagate in AWS: + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console + // Error creating Lambda function: InvalidParameterValueException: The + // function defined for the task cannot be assumed by Lambda. + // + // The role may exist, but the permissions may not have propagated, so we + // retry + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + eventSourceMappingConfiguration, err := conn.CreateEventSourceMapping(params) + if err != nil { + if awserr, ok := err.(awserr.Error); ok { + if awserr.Code() == "InvalidParameterValueException" { + return resource.RetryableError(awserr) + } + } + return resource.NonRetryableError(err) + } + // No error + d.Set("uuid", eventSourceMappingConfiguration.UUID) + d.SetId(*eventSourceMappingConfiguration.UUID) + return nil + }) + + if err != nil { + return fmt.Errorf("Error creating Lambda event source mapping: %s", err) + } + + return resourceAwsLambdaEventSourceMappingRead(d, meta) +} + +// resourceAwsLambdaEventSourceMappingRead maps to: +// GetEventSourceMapping in the API / SDK +func resourceAwsLambdaEventSourceMappingRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lambdaconn + + log.Printf("[DEBUG] Fetching Lambda event source mapping: %s", d.Id()) + + params := &lambda.GetEventSourceMappingInput{ + UUID: aws.String(d.Id()), + } + + eventSourceMappingConfiguration, err := conn.GetEventSourceMapping(params) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "ResourceNotFoundException" { + log.Printf("[DEBUG] Lambda event source mapping (%s) not found", d.Id()) + d.SetId("") + + return nil + } + return err + } + + d.Set("batch_size", eventSourceMappingConfiguration.BatchSize) + d.Set("event_source_arn", eventSourceMappingConfiguration.EventSourceArn) + d.Set("function_arn", eventSourceMappingConfiguration.FunctionArn) + d.Set("last_modified", eventSourceMappingConfiguration.LastModified) + d.Set("last_processing_result", eventSourceMappingConfiguration.LastProcessingResult) + d.Set("state", eventSourceMappingConfiguration.State) + d.Set("state_transition_reason", eventSourceMappingConfiguration.StateTransitionReason) + d.Set("uuid", eventSourceMappingConfiguration.UUID) + d.Set("function_name", eventSourceMappingConfiguration.FunctionArn) + + return nil +} + +// resourceAwsLambdaEventSourceMappingDelete maps to: +// DeleteEventSourceMapping in the API / SDK +func resourceAwsLambdaEventSourceMappingDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lambdaconn + + log.Printf("[INFO] Deleting Lambda event source mapping: %s", d.Id()) + + params := &lambda.DeleteEventSourceMappingInput{ + UUID: aws.String(d.Id()), + } + + _, err := conn.DeleteEventSourceMapping(params) + if err != nil { + return fmt.Errorf("Error deleting Lambda event source mapping: %s", err) + } + + d.SetId("") + + return nil +} + +// resourceAwsLambdaEventSourceMappingUpdate maps to: +// UpdateEventSourceMapping in the API / SDK +func resourceAwsLambdaEventSourceMappingUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lambdaconn + + log.Printf("[DEBUG] Updating Lambda event source mapping: %s", d.Id()) + + params := &lambda.UpdateEventSourceMappingInput{ + UUID: aws.String(d.Id()), + BatchSize: aws.Int64(int64(d.Get("batch_size").(int))), + FunctionName: aws.String(d.Get("function_name").(string)), + Enabled: aws.Bool(d.Get("enabled").(bool)), + } + + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.UpdateEventSourceMapping(params) + if err != nil { + if awserr, ok := err.(awserr.Error); ok { + if awserr.Code() == "InvalidParameterValueException" { + return resource.RetryableError(awserr) + } + } + return resource.NonRetryableError(err) + } + return nil + }) + + if err != nil { + return fmt.Errorf("Error updating Lambda event source mapping: %s", err) + } + + return resourceAwsLambdaEventSourceMappingRead(d, meta) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_function.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_function.go new file mode 100644 index 000000000..527883964 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_function.go @@ -0,0 +1,707 @@ +package aws + +import ( + "fmt" + "io/ioutil" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/lambda" + "github.com/mitchellh/go-homedir" + + "errors" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" +) + +const awsMutexLambdaKey = `aws_lambda_function` + +func resourceAwsLambdaFunction() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLambdaFunctionCreate, + Read: resourceAwsLambdaFunctionRead, + Update: resourceAwsLambdaFunctionUpdate, + Delete: resourceAwsLambdaFunctionDelete, + + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("function_name", d.Id()) + return []*schema.ResourceData{d}, nil + }, + }, + + Schema: map[string]*schema.Schema{ + "filename": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"s3_bucket", "s3_key", "s3_object_version"}, + }, + "s3_bucket": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"filename"}, + }, + "s3_key": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"filename"}, + }, + "s3_object_version": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"filename"}, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "dead_letter_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MinItems: 0, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + }, + }, + }, + "function_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "handler": { + Type: schema.TypeString, + Required: true, + }, + "memory_size": { + Type: schema.TypeInt, + Optional: true, + Default: 128, + }, + "role": { + Type: schema.TypeString, + Required: true, + }, + "runtime": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateRuntime, + }, + "timeout": { + Type: schema.TypeInt, + Optional: true, + Default: 3, + }, + "publish": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "version": { + Type: schema.TypeString, + Computed: true, + }, + "vpc_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "subnet_ids": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "security_group_ids": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "vpc_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "qualified_arn": { + Type: schema.TypeString, + Computed: true, + }, + "invoke_arn": { + Type: schema.TypeString, + Computed: true, + }, + "last_modified": { + Type: schema.TypeString, + Computed: true, + }, + "source_code_hash": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "environment": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "variables": { + Type: schema.TypeMap, + Optional: true, + Elem: schema.TypeString, + }, + }, + }, + }, + + "tracing_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mode": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"Active", "PassThrough"}, true), + }, + }, + }, + }, + + "kms_key_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, + + "tags": tagsSchema(), + }, + } +} + +// resourceAwsLambdaFunction maps to: +// CreateFunction in the API / SDK +func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lambdaconn + + functionName := d.Get("function_name").(string) + iamRole := d.Get("role").(string) + + log.Printf("[DEBUG] Creating Lambda Function %s with role %s", functionName, iamRole) + + filename, hasFilename := d.GetOk("filename") + s3Bucket, bucketOk := d.GetOk("s3_bucket") + s3Key, keyOk := d.GetOk("s3_key") + s3ObjectVersion, versionOk := d.GetOk("s3_object_version") + + if !hasFilename && !bucketOk && !keyOk && !versionOk { + return errors.New("filename or s3_* attributes must be set") + } + + var functionCode *lambda.FunctionCode + if hasFilename { + // Grab an exclusive lock so that we're only reading one function into + // memory at a time. + // See https://github.com/hashicorp/terraform/issues/9364 + awsMutexKV.Lock(awsMutexLambdaKey) + defer awsMutexKV.Unlock(awsMutexLambdaKey) + file, err := loadFileContent(filename.(string)) + if err != nil { + return fmt.Errorf("Unable to load %q: %s", filename.(string), err) + } + functionCode = &lambda.FunctionCode{ + ZipFile: file, + } + } else { + if !bucketOk || !keyOk { + return errors.New("s3_bucket and s3_key must all be set while using S3 code source") + } + functionCode = &lambda.FunctionCode{ + S3Bucket: aws.String(s3Bucket.(string)), + S3Key: aws.String(s3Key.(string)), + } + if versionOk { + functionCode.S3ObjectVersion = aws.String(s3ObjectVersion.(string)) + } + } + + params := &lambda.CreateFunctionInput{ + Code: functionCode, + Description: aws.String(d.Get("description").(string)), + FunctionName: aws.String(functionName), + Handler: aws.String(d.Get("handler").(string)), + MemorySize: aws.Int64(int64(d.Get("memory_size").(int))), + Role: aws.String(iamRole), + Runtime: aws.String(d.Get("runtime").(string)), + Timeout: aws.Int64(int64(d.Get("timeout").(int))), + Publish: aws.Bool(d.Get("publish").(bool)), + } + + if v, ok := d.GetOk("dead_letter_config"); ok { + dlcMaps := v.([]interface{}) + if len(dlcMaps) == 1 { // Schema guarantees either 0 or 1 + // Prevent panic on nil dead_letter_config. See GH-14961 + if dlcMaps[0] == nil { + return fmt.Errorf("Nil dead_letter_config supplied for function: %s", functionName) + } + dlcMap := dlcMaps[0].(map[string]interface{}) + params.DeadLetterConfig = &lambda.DeadLetterConfig{ + TargetArn: aws.String(dlcMap["target_arn"].(string)), + } + } + } + + if v, ok := d.GetOk("vpc_config"); ok { + config, err := validateVPCConfig(v) + if err != nil { + return err + } + + if config != nil { + var subnetIds []*string + for _, id := range config["subnet_ids"].(*schema.Set).List() { + subnetIds = append(subnetIds, aws.String(id.(string))) + } + + var securityGroupIds []*string + for _, id := range config["security_group_ids"].(*schema.Set).List() { + securityGroupIds = append(securityGroupIds, aws.String(id.(string))) + } + + params.VpcConfig = &lambda.VpcConfig{ + SubnetIds: subnetIds, + SecurityGroupIds: securityGroupIds, + } + } + } + + if v, ok := d.GetOk("tracing_config"); ok { + tracingConfig := v.([]interface{}) + tracing := tracingConfig[0].(map[string]interface{}) + params.TracingConfig = &lambda.TracingConfig{ + Mode: aws.String(tracing["mode"].(string)), + } + } + + if v, ok := d.GetOk("environment"); ok { + environments := v.([]interface{}) + environment, ok := environments[0].(map[string]interface{}) + if !ok { + return errors.New("At least one field is expected inside environment") + } + + if environmentVariables, ok := environment["variables"]; ok { + variables := readEnvironmentVariables(environmentVariables.(map[string]interface{})) + + params.Environment = &lambda.Environment{ + Variables: aws.StringMap(variables), + } + } + } + + if v, ok := d.GetOk("kms_key_arn"); ok { + params.KMSKeyArn = aws.String(v.(string)) + } + + if v, exists := d.GetOk("tags"); exists { + params.Tags = tagsFromMapGeneric(v.(map[string]interface{})) + } + + // IAM profiles can take ~10 seconds to propagate in AWS: + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console + // Error creating Lambda function: InvalidParameterValueException: The role defined for the task cannot be assumed by Lambda. + err := resource.Retry(10*time.Minute, func() *resource.RetryError { + _, err := conn.CreateFunction(params) + if err != nil { + log.Printf("[DEBUG] Error creating Lambda Function: %s", err) + + if isAWSErr(err, "InvalidParameterValueException", "The role defined for the function cannot be assumed by Lambda") { + log.Printf("[DEBUG] Received %s, retrying CreateFunction", err) + return resource.RetryableError(err) + } + if isAWSErr(err, "InvalidParameterValueException", "The provided execution role does not have permissions") { + log.Printf("[DEBUG] Received %s, retrying CreateFunction", err) + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return fmt.Errorf("Error creating Lambda function: %s", err) + } + + d.SetId(d.Get("function_name").(string)) + + return resourceAwsLambdaFunctionRead(d, meta) +} + +// resourceAwsLambdaFunctionRead maps to: +// GetFunction in the API / SDK +func resourceAwsLambdaFunctionRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lambdaconn + + log.Printf("[DEBUG] Fetching Lambda Function: %s", d.Id()) + + params := &lambda.GetFunctionInput{ + FunctionName: aws.String(d.Get("function_name").(string)), + } + + getFunctionOutput, err := conn.GetFunction(params) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceNotFoundException" && !d.IsNewResource() { + d.SetId("") + return nil + } + return err + } + + // getFunctionOutput.Code.Location is a pre-signed URL pointing at the zip + // file that we uploaded when we created the resource. You can use it to + // download the code from AWS. The other part is + // getFunctionOutput.Configuration which holds metadata. + + function := getFunctionOutput.Configuration + // TODO error checking / handling on the Set() calls. + d.Set("arn", function.FunctionArn) + d.Set("description", function.Description) + d.Set("handler", function.Handler) + d.Set("memory_size", function.MemorySize) + d.Set("last_modified", function.LastModified) + d.Set("role", function.Role) + d.Set("runtime", function.Runtime) + d.Set("timeout", function.Timeout) + d.Set("kms_key_arn", function.KMSKeyArn) + d.Set("tags", tagsToMapGeneric(getFunctionOutput.Tags)) + + config := flattenLambdaVpcConfigResponse(function.VpcConfig) + log.Printf("[INFO] Setting Lambda %s VPC config %#v from API", d.Id(), config) + vpcSetErr := d.Set("vpc_config", config) + if vpcSetErr != nil { + return fmt.Errorf("Failed setting vpc_config: %s", vpcSetErr) + } + + d.Set("source_code_hash", function.CodeSha256) + + if err := d.Set("environment", flattenLambdaEnvironment(function.Environment)); err != nil { + log.Printf("[ERR] Error setting environment for Lambda Function (%s): %s", d.Id(), err) + } + + if function.DeadLetterConfig != nil && function.DeadLetterConfig.TargetArn != nil { + d.Set("dead_letter_config", []interface{}{ + map[string]interface{}{ + "target_arn": *function.DeadLetterConfig.TargetArn, + }, + }) + } else { + d.Set("dead_letter_config", []interface{}{}) + } + + if function.TracingConfig != nil { + d.Set("tracing_config", []interface{}{ + map[string]interface{}{ + "mode": *function.TracingConfig.Mode, + }, + }) + } + + // List is sorted from oldest to latest + // so this may get costly over time :'( + var lastVersion, lastQualifiedArn string + err = listVersionsByFunctionPages(conn, &lambda.ListVersionsByFunctionInput{ + FunctionName: function.FunctionName, + MaxItems: aws.Int64(10000), + }, func(p *lambda.ListVersionsByFunctionOutput, lastPage bool) bool { + if lastPage { + last := p.Versions[len(p.Versions)-1] + lastVersion = *last.Version + lastQualifiedArn = *last.FunctionArn + return false + } + return true + }) + if err != nil { + return err + } + + d.Set("version", lastVersion) + d.Set("qualified_arn", lastQualifiedArn) + + d.Set("invoke_arn", buildLambdaInvokeArn(*function.FunctionArn, meta.(*AWSClient).region)) + + return nil +} + +func listVersionsByFunctionPages(c *lambda.Lambda, input *lambda.ListVersionsByFunctionInput, + fn func(p *lambda.ListVersionsByFunctionOutput, lastPage bool) bool) error { + for { + page, err := c.ListVersionsByFunction(input) + if err != nil { + return err + } + lastPage := page.NextMarker == nil + + shouldContinue := fn(page, lastPage) + if !shouldContinue || lastPage { + break + } + input.Marker = page.NextMarker + } + return nil +} + +// resourceAwsLambdaFunction maps to: +// DeleteFunction in the API / SDK +func resourceAwsLambdaFunctionDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lambdaconn + + log.Printf("[INFO] Deleting Lambda Function: %s", d.Id()) + + params := &lambda.DeleteFunctionInput{ + FunctionName: aws.String(d.Get("function_name").(string)), + } + + _, err := conn.DeleteFunction(params) + if err != nil { + return fmt.Errorf("Error deleting Lambda Function: %s", err) + } + + d.SetId("") + + return nil +} + +// resourceAwsLambdaFunctionUpdate maps to: +// UpdateFunctionCode in the API / SDK +func resourceAwsLambdaFunctionUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lambdaconn + + d.Partial(true) + + arn := d.Get("arn").(string) + if tagErr := setTagsLambda(conn, d, arn); tagErr != nil { + return tagErr + } + d.SetPartial("tags") + + if d.HasChange("filename") || d.HasChange("source_code_hash") || d.HasChange("s3_bucket") || d.HasChange("s3_key") || d.HasChange("s3_object_version") { + codeReq := &lambda.UpdateFunctionCodeInput{ + FunctionName: aws.String(d.Id()), + Publish: aws.Bool(d.Get("publish").(bool)), + } + + if v, ok := d.GetOk("filename"); ok { + // Grab an exclusive lock so that we're only reading one function into + // memory at a time. + // See https://github.com/hashicorp/terraform/issues/9364 + awsMutexKV.Lock(awsMutexLambdaKey) + defer awsMutexKV.Unlock(awsMutexLambdaKey) + file, err := loadFileContent(v.(string)) + if err != nil { + return fmt.Errorf("Unable to load %q: %s", v.(string), err) + } + codeReq.ZipFile = file + } else { + s3Bucket, _ := d.GetOk("s3_bucket") + s3Key, _ := d.GetOk("s3_key") + s3ObjectVersion, versionOk := d.GetOk("s3_object_version") + + codeReq.S3Bucket = aws.String(s3Bucket.(string)) + codeReq.S3Key = aws.String(s3Key.(string)) + if versionOk { + codeReq.S3ObjectVersion = aws.String(s3ObjectVersion.(string)) + } + } + + log.Printf("[DEBUG] Send Update Lambda Function Code request: %#v", codeReq) + + _, err := conn.UpdateFunctionCode(codeReq) + if err != nil { + return fmt.Errorf("Error modifying Lambda Function Code %s: %s", d.Id(), err) + } + + d.SetPartial("filename") + d.SetPartial("source_code_hash") + d.SetPartial("s3_bucket") + d.SetPartial("s3_key") + d.SetPartial("s3_object_version") + } + + configReq := &lambda.UpdateFunctionConfigurationInput{ + FunctionName: aws.String(d.Id()), + } + + configUpdate := false + if d.HasChange("description") { + configReq.Description = aws.String(d.Get("description").(string)) + configUpdate = true + } + if d.HasChange("handler") { + configReq.Handler = aws.String(d.Get("handler").(string)) + configUpdate = true + } + if d.HasChange("memory_size") { + configReq.MemorySize = aws.Int64(int64(d.Get("memory_size").(int))) + configUpdate = true + } + if d.HasChange("role") { + configReq.Role = aws.String(d.Get("role").(string)) + configUpdate = true + } + if d.HasChange("timeout") { + configReq.Timeout = aws.Int64(int64(d.Get("timeout").(int))) + configUpdate = true + } + if d.HasChange("kms_key_arn") { + configReq.KMSKeyArn = aws.String(d.Get("kms_key_arn").(string)) + configUpdate = true + } + if d.HasChange("dead_letter_config") { + dlcMaps := d.Get("dead_letter_config").([]interface{}) + if len(dlcMaps) == 1 { // Schema guarantees either 0 or 1 + dlcMap := dlcMaps[0].(map[string]interface{}) + configReq.DeadLetterConfig = &lambda.DeadLetterConfig{ + TargetArn: aws.String(dlcMap["target_arn"].(string)), + } + configUpdate = true + } + } + if d.HasChange("tracing_config") { + tracingConfig := d.Get("tracing_config").([]interface{}) + if len(tracingConfig) == 1 { // Schema guarantees either 0 or 1 + config := tracingConfig[0].(map[string]interface{}) + configReq.TracingConfig = &lambda.TracingConfig{ + Mode: aws.String(config["mode"].(string)), + } + configUpdate = true + } + } + if d.HasChange("runtime") { + configReq.Runtime = aws.String(d.Get("runtime").(string)) + configUpdate = true + } + if d.HasChange("environment") { + if v, ok := d.GetOk("environment"); ok { + environments := v.([]interface{}) + environment, ok := environments[0].(map[string]interface{}) + if !ok { + return errors.New("At least one field is expected inside environment") + } + + if environmentVariables, ok := environment["variables"]; ok { + variables := readEnvironmentVariables(environmentVariables.(map[string]interface{})) + + configReq.Environment = &lambda.Environment{ + Variables: aws.StringMap(variables), + } + configUpdate = true + } + } else { + configReq.Environment = &lambda.Environment{ + Variables: aws.StringMap(map[string]string{}), + } + configUpdate = true + } + } + + if configUpdate { + log.Printf("[DEBUG] Send Update Lambda Function Configuration request: %#v", configReq) + _, err := conn.UpdateFunctionConfiguration(configReq) + if err != nil { + return fmt.Errorf("Error modifying Lambda Function Configuration %s: %s", d.Id(), err) + } + d.SetPartial("description") + d.SetPartial("handler") + d.SetPartial("memory_size") + d.SetPartial("role") + d.SetPartial("timeout") + } + d.Partial(false) + + return resourceAwsLambdaFunctionRead(d, meta) +} + +// loadFileContent returns contents of a file in a given path +func loadFileContent(v string) ([]byte, error) { + filename, err := homedir.Expand(v) + if err != nil { + return nil, err + } + fileContent, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return fileContent, nil +} + +func readEnvironmentVariables(ev map[string]interface{}) map[string]string { + variables := make(map[string]string) + for k, v := range ev { + variables[k] = v.(string) + } + + return variables +} + +func validateVPCConfig(v interface{}) (map[string]interface{}, error) { + configs := v.([]interface{}) + if len(configs) > 1 { + return nil, errors.New("Only a single vpc_config block is expected") + } + + config, ok := configs[0].(map[string]interface{}) + + if !ok { + return nil, errors.New("vpc_config is ") + } + + // if subnet_ids and security_group_ids are both empty then the VPC is optional + if config["subnet_ids"].(*schema.Set).Len() == 0 && config["security_group_ids"].(*schema.Set).Len() == 0 { + return nil, nil + } + + if config["subnet_ids"].(*schema.Set).Len() == 0 { + return nil, errors.New("vpc_config.subnet_ids cannot be empty") + } + + if config["security_group_ids"].(*schema.Set).Len() == 0 { + return nil, errors.New("vpc_config.security_group_ids cannot be empty") + } + + return config, nil +} + +func validateRuntime(v interface{}, k string) (ws []string, errors []error) { + runtime := v.(string) + + if runtime == lambda.RuntimeNodejs { + errors = append(errors, fmt.Errorf( + "%s has reached end of life since October 2016 and has been deprecated in favor of %s.", + runtime, lambda.RuntimeNodejs43)) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_permission.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_permission.go new file mode 100644 index 000000000..6372526e9 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_permission.go @@ -0,0 +1,378 @@ +package aws + +import ( + "encoding/json" + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/lambda" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +var LambdaFunctionRegexp = `^(arn:[\w-]+:lambda:)?([a-z]{2}-[a-z]+-\d{1}:)?(\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\$LATEST|[a-zA-Z0-9-_]+))?$` + +func resourceAwsLambdaPermission() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLambdaPermissionCreate, + Read: resourceAwsLambdaPermissionRead, + Delete: resourceAwsLambdaPermissionDelete, + + Schema: map[string]*schema.Schema{ + "action": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateLambdaPermissionAction, + }, + "function_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateLambdaFunctionName, + }, + "principal": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "qualifier": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateLambdaQualifier, + }, + "source_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateAwsAccountId, + }, + "source_arn": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + "statement_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validatePolicyStatementId, + }, + }, + } +} + +func resourceAwsLambdaPermissionCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lambdaconn + + functionName := d.Get("function_name").(string) + + // There is a bug in the API (reported and acknowledged by AWS) + // which causes some permissions to be ignored when API calls are sent in parallel + // We work around this bug via mutex + awsMutexKV.Lock(functionName) + defer awsMutexKV.Unlock(functionName) + + input := lambda.AddPermissionInput{ + Action: aws.String(d.Get("action").(string)), + FunctionName: aws.String(functionName), + Principal: aws.String(d.Get("principal").(string)), + StatementId: aws.String(d.Get("statement_id").(string)), + } + + if v, ok := d.GetOk("qualifier"); ok { + input.Qualifier = aws.String(v.(string)) + } + if v, ok := d.GetOk("source_account"); ok { + input.SourceAccount = aws.String(v.(string)) + } + if v, ok := d.GetOk("source_arn"); ok { + input.SourceArn = aws.String(v.(string)) + } + + log.Printf("[DEBUG] Adding new Lambda permission: %s", input) + var out *lambda.AddPermissionOutput + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + var err error + out, err = conn.AddPermission(&input) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + // IAM is eventually consistent :/ + if awsErr.Code() == "ResourceConflictException" { + return resource.RetryableError( + fmt.Errorf("[WARN] Error adding new Lambda Permission for %s, retrying: %s", + *input.FunctionName, err)) + } + } + return resource.NonRetryableError(err) + } + return nil + }) + + if err != nil { + return err + } + + if out != nil && out.Statement != nil { + log.Printf("[DEBUG] Created new Lambda permission: %s", *out.Statement) + } else { + log.Printf("[DEBUG] Created new Lambda permission, but no Statement was included") + } + + d.SetId(d.Get("statement_id").(string)) + + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + // IAM is eventually cosistent :/ + err := resourceAwsLambdaPermissionRead(d, meta) + if err != nil { + if strings.HasPrefix(err.Error(), "Error reading Lambda policy: ResourceNotFoundException") { + return resource.RetryableError( + fmt.Errorf("[WARN] Error reading newly created Lambda Permission for %s, retrying: %s", + *input.FunctionName, err)) + } + if strings.HasPrefix(err.Error(), "Failed to find statement \""+d.Id()) { + return resource.RetryableError( + fmt.Errorf("[WARN] Error reading newly created Lambda Permission statement for %s, retrying: %s", + *input.FunctionName, err)) + } + + log.Printf("[ERROR] An actual error occurred when expecting Lambda policy to be there: %s", err) + return resource.NonRetryableError(err) + } + return nil + }) + + return err +} + +func resourceAwsLambdaPermissionRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lambdaconn + + input := lambda.GetPolicyInput{ + FunctionName: aws.String(d.Get("function_name").(string)), + } + if v, ok := d.GetOk("qualifier"); ok { + input.Qualifier = aws.String(v.(string)) + } + + log.Printf("[DEBUG] Looking for Lambda permission: %s", input) + var out *lambda.GetPolicyOutput + var statement *LambdaPolicyStatement + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + // IAM is eventually cosistent :/ + var err error + out, err = conn.GetPolicy(&input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "ResourceNotFoundException" { + return resource.RetryableError(err) + } + } + return resource.NonRetryableError(err) + } + + policyInBytes := []byte(*out.Policy) + policy := LambdaPolicy{} + err = json.Unmarshal(policyInBytes, &policy) + if err != nil { + return resource.NonRetryableError(err) + } + + statement, err = findLambdaPolicyStatementById(&policy, d.Id()) + return resource.RetryableError(err) + }) + + if err != nil { + // Missing whole policy or Lambda function (API error) + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "ResourceNotFoundException" { + log.Printf("[WARN] No Lambda Permission Policy found: %v", input) + d.SetId("") + return nil + } + } + + // Missing permission inside valid policy + if nfErr, ok := err.(*resource.NotFoundError); ok { + log.Printf("[WARN] %s", nfErr) + d.SetId("") + return nil + } + + return err + } + + qualifier, err := getQualifierFromLambdaAliasOrVersionArn(statement.Resource) + if err != nil { + log.Printf("[ERR] Error getting Lambda Qualifier: %s", err) + } + d.Set("qualifier", qualifier) + + // Save Lambda function name in the same format + if strings.HasPrefix(d.Get("function_name").(string), "arn:"+meta.(*AWSClient).partition+":lambda:") { + // Strip qualifier off + trimmedArn := strings.TrimSuffix(statement.Resource, ":"+qualifier) + d.Set("function_name", trimmedArn) + } else { + functionName, err := getFunctionNameFromLambdaArn(statement.Resource) + if err != nil { + return err + } + d.Set("function_name", functionName) + } + + d.Set("action", statement.Action) + // Check if the pricipal is a cross-account IAM role + if _, ok := statement.Principal["AWS"]; ok { + d.Set("principal", statement.Principal["AWS"]) + } else { + d.Set("principal", statement.Principal["Service"]) + } + + if stringEquals, ok := statement.Condition["StringEquals"]; ok { + d.Set("source_account", stringEquals["AWS:SourceAccount"]) + } + + if arnLike, ok := statement.Condition["ArnLike"]; ok { + d.Set("source_arn", arnLike["AWS:SourceArn"]) + } + + return nil +} + +func resourceAwsLambdaPermissionDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lambdaconn + + functionName := d.Get("function_name").(string) + + // There is a bug in the API (reported and acknowledged by AWS) + // which causes some permissions to be ignored when API calls are sent in parallel + // We work around this bug via mutex + awsMutexKV.Lock(functionName) + defer awsMutexKV.Unlock(functionName) + + input := lambda.RemovePermissionInput{ + FunctionName: aws.String(functionName), + StatementId: aws.String(d.Id()), + } + + if v, ok := d.GetOk("qualifier"); ok { + input.Qualifier = aws.String(v.(string)) + } + + log.Printf("[DEBUG] Removing Lambda permission: %s", input) + _, err := conn.RemovePermission(&input) + if err != nil { + return err + } + + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + log.Printf("[DEBUG] Checking if Lambda permission %q is deleted", d.Id()) + + params := &lambda.GetPolicyInput{ + FunctionName: aws.String(d.Get("function_name").(string)), + } + if v, ok := d.GetOk("qualifier"); ok { + params.Qualifier = aws.String(v.(string)) + } + + log.Printf("[DEBUG] Looking for Lambda permission: %s", *params) + resp, err := conn.GetPolicy(params) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "ResourceNotFoundException" { + return nil + } + } + return resource.NonRetryableError(err) + } + + if resp.Policy == nil { + return nil + } + + policyInBytes := []byte(*resp.Policy) + policy := LambdaPolicy{} + err = json.Unmarshal(policyInBytes, &policy) + if err != nil { + return resource.RetryableError( + fmt.Errorf("Error unmarshalling Lambda policy: %s", err)) + } + + _, err = findLambdaPolicyStatementById(&policy, d.Id()) + if err != nil { + return nil + } + + log.Printf("[DEBUG] No error when checking if Lambda permission %s is deleted", d.Id()) + return nil + }) + + if err != nil { + return fmt.Errorf("Failed removing Lambda permission: %s", err) + } + + log.Printf("[DEBUG] Lambda permission with ID %q removed", d.Id()) + d.SetId("") + + return nil +} + +func findLambdaPolicyStatementById(policy *LambdaPolicy, id string) ( + *LambdaPolicyStatement, error) { + + log.Printf("[DEBUG] Received %d statements in Lambda policy: %s", len(policy.Statement), policy.Statement) + for _, statement := range policy.Statement { + if statement.Sid == id { + return &statement, nil + } + } + + return nil, &resource.NotFoundError{ + LastRequest: id, + LastResponse: policy, + Message: fmt.Sprintf("Failed to find statement %q in Lambda policy:\n%s", id, policy.Statement), + } +} + +func getQualifierFromLambdaAliasOrVersionArn(arn string) (string, error) { + matches := regexp.MustCompile(LambdaFunctionRegexp).FindStringSubmatch(arn) + if len(matches) < 8 || matches[7] == "" { + return "", fmt.Errorf("Invalid ARN or otherwise unable to get qualifier from ARN (%q)", + arn) + } + + return matches[7], nil +} + +func getFunctionNameFromLambdaArn(arn string) (string, error) { + matches := regexp.MustCompile(LambdaFunctionRegexp).FindStringSubmatch(arn) + if len(matches) < 6 || matches[5] == "" { + return "", fmt.Errorf("Invalid ARN or otherwise unable to get qualifier from ARN (%q)", + arn) + } + return matches[5], nil +} + +type LambdaPolicy struct { + Version string + Statement []LambdaPolicyStatement + Id string +} + +type LambdaPolicyStatement struct { + Condition map[string]map[string]string + Action string + Resource string + Effect string + Principal map[string]string + Sid string +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_launch_configuration.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_launch_configuration.go new file mode 100644 index 000000000..a232e6bb3 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_launch_configuration.go @@ -0,0 +1,640 @@ +package aws + +import ( + "bytes" + "crypto/sha1" + "encoding/hex" + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsLaunchConfiguration() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLaunchConfigurationCreate, + Read: resourceAwsLaunchConfigurationRead, + Delete: resourceAwsLaunchConfigurationDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://github.com/boto/botocore/blob/9f322b1/botocore/data/autoscaling/2011-01-01/service-2.json#L1932-L1939 + value := v.(string) + if len(value) > 255 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 255 characters", k)) + } + return + }, + }, + + "name_prefix": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://github.com/boto/botocore/blob/9f322b1/botocore/data/autoscaling/2011-01-01/service-2.json#L1932-L1939 + // uuid is 26 characters, limit the prefix to 229. + value := v.(string) + if len(value) > 229 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 229 characters, name is limited to 255", k)) + } + return + }, + }, + + "image_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "instance_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "iam_instance_profile": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "key_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "user_data": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + StateFunc: func(v interface{}) string { + switch v.(type) { + case string: + hash := sha1.Sum([]byte(v.(string))) + return hex.EncodeToString(hash[:]) + default: + return "" + } + }, + }, + + "security_groups": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "vpc_classic_link_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "vpc_classic_link_security_groups": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "associate_public_ip_address": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + + "spot_price": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "ebs_optimized": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "placement_tenancy": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "enable_monitoring": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + }, + + "ebs_block_device": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delete_on_termination": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "device_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "iops": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "snapshot_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "volume_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "volume_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "encrypted": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, + }, + + "ephemeral_block_device": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "device_name": { + Type: schema.TypeString, + Required: true, + }, + + "virtual_name": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["virtual_name"].(string))) + return hashcode.String(buf.String()) + }, + }, + + "root_block_device": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + // "You can only modify the volume size, volume type, and Delete on + // Termination flag on the block device mapping entry for the root + // device volume." - bit.ly/ec2bdmap + Schema: map[string]*schema.Schema{ + "delete_on_termination": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "iops": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "volume_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "volume_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface{}) error { + autoscalingconn := meta.(*AWSClient).autoscalingconn + ec2conn := meta.(*AWSClient).ec2conn + + createLaunchConfigurationOpts := autoscaling.CreateLaunchConfigurationInput{ + LaunchConfigurationName: aws.String(d.Get("name").(string)), + ImageId: aws.String(d.Get("image_id").(string)), + InstanceType: aws.String(d.Get("instance_type").(string)), + EbsOptimized: aws.Bool(d.Get("ebs_optimized").(bool)), + } + + if v, ok := d.GetOk("user_data"); ok { + userData := base64Encode([]byte(v.(string))) + createLaunchConfigurationOpts.UserData = aws.String(userData) + } + + createLaunchConfigurationOpts.InstanceMonitoring = &autoscaling.InstanceMonitoring{ + Enabled: aws.Bool(d.Get("enable_monitoring").(bool)), + } + + if v, ok := d.GetOk("iam_instance_profile"); ok { + createLaunchConfigurationOpts.IamInstanceProfile = aws.String(v.(string)) + } + + if v, ok := d.GetOk("placement_tenancy"); ok { + createLaunchConfigurationOpts.PlacementTenancy = aws.String(v.(string)) + } + + if v, ok := d.GetOk("associate_public_ip_address"); ok { + createLaunchConfigurationOpts.AssociatePublicIpAddress = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("key_name"); ok { + createLaunchConfigurationOpts.KeyName = aws.String(v.(string)) + } + if v, ok := d.GetOk("spot_price"); ok { + createLaunchConfigurationOpts.SpotPrice = aws.String(v.(string)) + } + + if v, ok := d.GetOk("security_groups"); ok { + createLaunchConfigurationOpts.SecurityGroups = expandStringList( + v.(*schema.Set).List(), + ) + } + + if v, ok := d.GetOk("vpc_classic_link_id"); ok { + createLaunchConfigurationOpts.ClassicLinkVPCId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("vpc_classic_link_security_groups"); ok { + createLaunchConfigurationOpts.ClassicLinkVPCSecurityGroups = expandStringList( + v.(*schema.Set).List(), + ) + } + + var blockDevices []*autoscaling.BlockDeviceMapping + + // We'll use this to detect if we're declaring it incorrectly as an ebs_block_device. + rootDeviceName, err := fetchRootDeviceName(d.Get("image_id").(string), ec2conn) + if err != nil { + return err + } + if rootDeviceName == nil { + // We do this so the value is empty so we don't have to do nil checks later + var blank string + rootDeviceName = &blank + } + + if v, ok := d.GetOk("ebs_block_device"); ok { + vL := v.(*schema.Set).List() + for _, v := range vL { + bd := v.(map[string]interface{}) + ebs := &autoscaling.Ebs{ + DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)), + } + + if v, ok := bd["snapshot_id"].(string); ok && v != "" { + ebs.SnapshotId = aws.String(v) + } + + if v, ok := bd["encrypted"].(bool); ok && v { + ebs.Encrypted = aws.Bool(v) + } + + if v, ok := bd["volume_size"].(int); ok && v != 0 { + ebs.VolumeSize = aws.Int64(int64(v)) + } + + if v, ok := bd["volume_type"].(string); ok && v != "" { + ebs.VolumeType = aws.String(v) + } + + if v, ok := bd["iops"].(int); ok && v > 0 { + ebs.Iops = aws.Int64(int64(v)) + } + + if *aws.String(bd["device_name"].(string)) == *rootDeviceName { + return fmt.Errorf("Root device (%s) declared as an 'ebs_block_device'. Use 'root_block_device' keyword.", *rootDeviceName) + } + + blockDevices = append(blockDevices, &autoscaling.BlockDeviceMapping{ + DeviceName: aws.String(bd["device_name"].(string)), + Ebs: ebs, + }) + } + } + + if v, ok := d.GetOk("ephemeral_block_device"); ok { + vL := v.(*schema.Set).List() + for _, v := range vL { + bd := v.(map[string]interface{}) + blockDevices = append(blockDevices, &autoscaling.BlockDeviceMapping{ + DeviceName: aws.String(bd["device_name"].(string)), + VirtualName: aws.String(bd["virtual_name"].(string)), + }) + } + } + + if v, ok := d.GetOk("root_block_device"); ok { + vL := v.([]interface{}) + for _, v := range vL { + bd := v.(map[string]interface{}) + ebs := &autoscaling.Ebs{ + DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)), + } + + if v, ok := bd["volume_size"].(int); ok && v != 0 { + ebs.VolumeSize = aws.Int64(int64(v)) + } + + if v, ok := bd["volume_type"].(string); ok && v != "" { + ebs.VolumeType = aws.String(v) + } + + if v, ok := bd["iops"].(int); ok && v > 0 { + ebs.Iops = aws.Int64(int64(v)) + } + + if dn, err := fetchRootDeviceName(d.Get("image_id").(string), ec2conn); err == nil { + if dn == nil { + return fmt.Errorf( + "Expected to find a Root Device name for AMI (%s), but got none", + d.Get("image_id").(string)) + } + blockDevices = append(blockDevices, &autoscaling.BlockDeviceMapping{ + DeviceName: dn, + Ebs: ebs, + }) + } else { + return err + } + } + } + + if len(blockDevices) > 0 { + createLaunchConfigurationOpts.BlockDeviceMappings = blockDevices + } + + var lcName string + if v, ok := d.GetOk("name"); ok { + lcName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + lcName = resource.PrefixedUniqueId(v.(string)) + } else { + lcName = resource.UniqueId() + } + createLaunchConfigurationOpts.LaunchConfigurationName = aws.String(lcName) + + log.Printf( + "[DEBUG] autoscaling create launch configuration: %s", createLaunchConfigurationOpts) + + // IAM profiles can take ~10 seconds to propagate in AWS: + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console + err = resource.Retry(90*time.Second, func() *resource.RetryError { + _, err := autoscalingconn.CreateLaunchConfiguration(&createLaunchConfigurationOpts) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if strings.Contains(awsErr.Message(), "Invalid IamInstanceProfile") { + return resource.RetryableError(err) + } + if strings.Contains(awsErr.Message(), "You are not authorized to perform this operation") { + return resource.RetryableError(err) + } + } + return resource.NonRetryableError(err) + } + return nil + }) + + if err != nil { + return fmt.Errorf("Error creating launch configuration: %s", err) + } + + d.SetId(lcName) + log.Printf("[INFO] launch configuration ID: %s", d.Id()) + + // We put a Retry here since sometimes eventual consistency bites + // us and we need to retry a few times to get the LC to load properly + return resource.Retry(30*time.Second, func() *resource.RetryError { + err := resourceAwsLaunchConfigurationRead(d, meta) + if err != nil { + return resource.RetryableError(err) + } + return nil + }) +} + +func resourceAwsLaunchConfigurationRead(d *schema.ResourceData, meta interface{}) error { + autoscalingconn := meta.(*AWSClient).autoscalingconn + ec2conn := meta.(*AWSClient).ec2conn + + describeOpts := autoscaling.DescribeLaunchConfigurationsInput{ + LaunchConfigurationNames: []*string{aws.String(d.Id())}, + } + + log.Printf("[DEBUG] launch configuration describe configuration: %s", describeOpts) + describConfs, err := autoscalingconn.DescribeLaunchConfigurations(&describeOpts) + if err != nil { + return fmt.Errorf("Error retrieving launch configuration: %s", err) + } + if len(describConfs.LaunchConfigurations) == 0 { + d.SetId("") + return nil + } + + // Verify AWS returned our launch configuration + if *describConfs.LaunchConfigurations[0].LaunchConfigurationName != d.Id() { + return fmt.Errorf( + "Unable to find launch configuration: %#v", + describConfs.LaunchConfigurations) + } + + lc := describConfs.LaunchConfigurations[0] + + d.Set("key_name", lc.KeyName) + d.Set("image_id", lc.ImageId) + d.Set("instance_type", lc.InstanceType) + d.Set("name", lc.LaunchConfigurationName) + + d.Set("iam_instance_profile", lc.IamInstanceProfile) + d.Set("ebs_optimized", lc.EbsOptimized) + d.Set("spot_price", lc.SpotPrice) + d.Set("enable_monitoring", lc.InstanceMonitoring.Enabled) + d.Set("security_groups", lc.SecurityGroups) + d.Set("associate_public_ip_address", lc.AssociatePublicIpAddress) + + d.Set("vpc_classic_link_id", lc.ClassicLinkVPCId) + d.Set("vpc_classic_link_security_groups", lc.ClassicLinkVPCSecurityGroups) + + if err := readLCBlockDevices(d, lc, ec2conn); err != nil { + return err + } + + return nil +} + +func resourceAwsLaunchConfigurationDelete(d *schema.ResourceData, meta interface{}) error { + autoscalingconn := meta.(*AWSClient).autoscalingconn + + log.Printf("[DEBUG] Launch Configuration destroy: %v", d.Id()) + _, err := autoscalingconn.DeleteLaunchConfiguration( + &autoscaling.DeleteLaunchConfigurationInput{ + LaunchConfigurationName: aws.String(d.Id()), + }) + if err != nil { + autoscalingerr, ok := err.(awserr.Error) + if ok && (autoscalingerr.Code() == "InvalidConfiguration.NotFound" || autoscalingerr.Code() == "ValidationError") { + log.Printf("[DEBUG] Launch configuration (%s) not found", d.Id()) + return nil + } + + return err + } + + return nil +} + +func readLCBlockDevices(d *schema.ResourceData, lc *autoscaling.LaunchConfiguration, ec2conn *ec2.EC2) error { + ibds, err := readBlockDevicesFromLaunchConfiguration(d, lc, ec2conn) + if err != nil { + return err + } + + if err := d.Set("ebs_block_device", ibds["ebs"]); err != nil { + return err + } + if err := d.Set("ephemeral_block_device", ibds["ephemeral"]); err != nil { + return err + } + if ibds["root"] != nil { + if err := d.Set("root_block_device", []interface{}{ibds["root"]}); err != nil { + return err + } + } else { + d.Set("root_block_device", []interface{}{}) + } + + return nil +} + +func readBlockDevicesFromLaunchConfiguration(d *schema.ResourceData, lc *autoscaling.LaunchConfiguration, ec2conn *ec2.EC2) ( + map[string]interface{}, error) { + blockDevices := make(map[string]interface{}) + blockDevices["ebs"] = make([]map[string]interface{}, 0) + blockDevices["ephemeral"] = make([]map[string]interface{}, 0) + blockDevices["root"] = nil + if len(lc.BlockDeviceMappings) == 0 { + return nil, nil + } + rootDeviceName, err := fetchRootDeviceName(d.Get("image_id").(string), ec2conn) + if err != nil { + return nil, err + } + if rootDeviceName == nil { + // We do this so the value is empty so we don't have to do nil checks later + var blank string + rootDeviceName = &blank + } + for _, bdm := range lc.BlockDeviceMappings { + bd := make(map[string]interface{}) + if bdm.Ebs != nil && bdm.Ebs.DeleteOnTermination != nil { + bd["delete_on_termination"] = *bdm.Ebs.DeleteOnTermination + } + if bdm.Ebs != nil && bdm.Ebs.VolumeSize != nil { + bd["volume_size"] = *bdm.Ebs.VolumeSize + } + if bdm.Ebs != nil && bdm.Ebs.VolumeType != nil { + bd["volume_type"] = *bdm.Ebs.VolumeType + } + if bdm.Ebs != nil && bdm.Ebs.Iops != nil { + bd["iops"] = *bdm.Ebs.Iops + } + + if bdm.DeviceName != nil && *bdm.DeviceName == *rootDeviceName { + blockDevices["root"] = bd + } else { + if bdm.Ebs != nil && bdm.Ebs.Encrypted != nil { + bd["encrypted"] = *bdm.Ebs.Encrypted + } + if bdm.DeviceName != nil { + bd["device_name"] = *bdm.DeviceName + } + if bdm.VirtualName != nil { + bd["virtual_name"] = *bdm.VirtualName + blockDevices["ephemeral"] = append(blockDevices["ephemeral"].([]map[string]interface{}), bd) + } else { + if bdm.Ebs != nil && bdm.Ebs.SnapshotId != nil { + bd["snapshot_id"] = *bdm.Ebs.SnapshotId + } + blockDevices["ebs"] = append(blockDevices["ebs"].([]map[string]interface{}), bd) + } + } + } + return blockDevices, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_cookie_stickiness_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_cookie_stickiness_policy.go new file mode 100644 index 000000000..026f98142 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_cookie_stickiness_policy.go @@ -0,0 +1,182 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsLBCookieStickinessPolicy() *schema.Resource { + return &schema.Resource{ + // There is no concept of "updating" an LB Stickiness policy in + // the AWS API. + Create: resourceAwsLBCookieStickinessPolicyCreate, + Read: resourceAwsLBCookieStickinessPolicyRead, + Delete: resourceAwsLBCookieStickinessPolicyDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "load_balancer": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "lb_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "cookie_expiration_period": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(int) + if value <= 0 { + es = append(es, fmt.Errorf( + "LB Cookie Expiration Period must be greater than zero if specified")) + } + return + }, + }, + }, + } +} + +func resourceAwsLBCookieStickinessPolicyCreate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + // Provision the LBStickinessPolicy + lbspOpts := &elb.CreateLBCookieStickinessPolicyInput{ + LoadBalancerName: aws.String(d.Get("load_balancer").(string)), + PolicyName: aws.String(d.Get("name").(string)), + } + + if v := d.Get("cookie_expiration_period").(int); v > 0 { + lbspOpts.CookieExpirationPeriod = aws.Int64(int64(v)) + } + + log.Printf("[DEBUG] LB Cookie Stickiness Policy opts: %#v", lbspOpts) + if _, err := elbconn.CreateLBCookieStickinessPolicy(lbspOpts); err != nil { + return fmt.Errorf("Error creating LBCookieStickinessPolicy: %s", err) + } + + setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String(d.Get("load_balancer").(string)), + LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))), + PolicyNames: []*string{aws.String(d.Get("name").(string))}, + } + + log.Printf("[DEBUG] LB Cookie Stickiness create configuration: %#v", setLoadBalancerOpts) + if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setLoadBalancerOpts); err != nil { + return fmt.Errorf("Error setting LBCookieStickinessPolicy: %s", err) + } + + d.SetId(fmt.Sprintf("%s:%d:%s", + *lbspOpts.LoadBalancerName, + *setLoadBalancerOpts.LoadBalancerPort, + *lbspOpts.PolicyName)) + return nil +} + +func resourceAwsLBCookieStickinessPolicyRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + lbName, lbPort, policyName := resourceAwsLBCookieStickinessPolicyParseId(d.Id()) + + request := &elb.DescribeLoadBalancerPoliciesInput{ + LoadBalancerName: aws.String(lbName), + PolicyNames: []*string{aws.String(policyName)}, + } + + getResp, err := elbconn.DescribeLoadBalancerPolicies(request) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok { + if ec2err.Code() == "PolicyNotFound" || ec2err.Code() == "LoadBalancerNotFound" { + d.SetId("") + } + return nil + } + return fmt.Errorf("Error retrieving policy: %s", err) + } + + if len(getResp.PolicyDescriptions) != 1 { + return fmt.Errorf("Unable to find policy %#v", getResp.PolicyDescriptions) + } + + // we know the policy exists now, but we have to check if it's assigned to a listener + assigned, err := resourceAwsELBSticknessPolicyAssigned(policyName, lbName, lbPort, elbconn) + if err != nil { + return err + } + if !assigned { + // policy exists, but isn't assigned to a listener + log.Printf("[DEBUG] policy '%s' exists, but isn't assigned to a listener", policyName) + d.SetId("") + return nil + } + + // We can get away with this because there's only one attribute, the + // cookie expiration, in these descriptions. + policyDesc := getResp.PolicyDescriptions[0] + cookieAttr := policyDesc.PolicyAttributeDescriptions[0] + if *cookieAttr.AttributeName != "CookieExpirationPeriod" { + return fmt.Errorf("Unable to find cookie expiration period.") + } + d.Set("cookie_expiration_period", cookieAttr.AttributeValue) + + d.Set("name", policyName) + d.Set("load_balancer", lbName) + d.Set("lb_port", lbPort) + + return nil +} + +func resourceAwsLBCookieStickinessPolicyDelete(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + lbName, _, policyName := resourceAwsLBCookieStickinessPolicyParseId(d.Id()) + + // Perversely, if we Set an empty list of PolicyNames, we detach the + // policies attached to a listener, which is required to delete the + // policy itself. + setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String(d.Get("load_balancer").(string)), + LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))), + PolicyNames: []*string{}, + } + + if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setLoadBalancerOpts); err != nil { + return fmt.Errorf("Error removing LBCookieStickinessPolicy: %s", err) + } + + request := &elb.DeleteLoadBalancerPolicyInput{ + LoadBalancerName: aws.String(lbName), + PolicyName: aws.String(policyName), + } + + if _, err := elbconn.DeleteLoadBalancerPolicy(request); err != nil { + return fmt.Errorf("Error deleting LB stickiness policy %s: %s", d.Id(), err) + } + return nil +} + +// resourceAwsLBCookieStickinessPolicyParseId takes an ID and parses it into +// it's constituent parts. You need three axes (LB name, policy name, and LB +// port) to create or identify a stickiness policy in AWS's API. +func resourceAwsLBCookieStickinessPolicyParseId(id string) (string, string, string) { + parts := strings.SplitN(id, ":", 3) + return parts[0], parts[1], parts[2] +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_ssl_negotiation_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_ssl_negotiation_policy.go new file mode 100644 index 000000000..64a9f98ce --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lb_ssl_negotiation_policy.go @@ -0,0 +1,189 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsLBSSLNegotiationPolicy() *schema.Resource { + return &schema.Resource{ + // There is no concept of "updating" an LB policy in + // the AWS API. + Create: resourceAwsLBSSLNegotiationPolicyCreate, + Read: resourceAwsLBSSLNegotiationPolicyRead, + Delete: resourceAwsLBSSLNegotiationPolicyDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "load_balancer": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "lb_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "attribute": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + return hashcode.String(buf.String()) + }, + }, + }, + } +} + +func resourceAwsLBSSLNegotiationPolicyCreate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + // Provision the SSLNegotiationPolicy + lbspOpts := &elb.CreateLoadBalancerPolicyInput{ + LoadBalancerName: aws.String(d.Get("load_balancer").(string)), + PolicyName: aws.String(d.Get("name").(string)), + PolicyTypeName: aws.String("SSLNegotiationPolicyType"), + } + + // Check for Policy Attributes + if v, ok := d.GetOk("attribute"); ok { + var err error + // Expand the "attribute" set to aws-sdk-go compat []*elb.PolicyAttribute + lbspOpts.PolicyAttributes, err = expandPolicyAttributes(v.(*schema.Set).List()) + if err != nil { + return err + } + } + + log.Printf("[DEBUG] Load Balancer Policy opts: %#v", lbspOpts) + if _, err := elbconn.CreateLoadBalancerPolicy(lbspOpts); err != nil { + return fmt.Errorf("Error creating Load Balancer Policy: %s", err) + } + + setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String(d.Get("load_balancer").(string)), + LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))), + PolicyNames: []*string{aws.String(d.Get("name").(string))}, + } + + log.Printf("[DEBUG] SSL Negotiation create configuration: %#v", setLoadBalancerOpts) + if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setLoadBalancerOpts); err != nil { + return fmt.Errorf("Error setting SSLNegotiationPolicy: %s", err) + } + + d.SetId(fmt.Sprintf("%s:%d:%s", + *lbspOpts.LoadBalancerName, + *setLoadBalancerOpts.LoadBalancerPort, + *lbspOpts.PolicyName)) + return nil +} + +func resourceAwsLBSSLNegotiationPolicyRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + lbName, lbPort, policyName := resourceAwsLBSSLNegotiationPolicyParseId(d.Id()) + + request := &elb.DescribeLoadBalancerPoliciesInput{ + LoadBalancerName: aws.String(lbName), + PolicyNames: []*string{aws.String(policyName)}, + } + + getResp, err := elbconn.DescribeLoadBalancerPolicies(request) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "PolicyNotFound" { + // The policy is gone. + d.SetId("") + return nil + } else if isLoadBalancerNotFound(err) { + // The ELB is gone now, so just remove it from the state + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving policy: %s", err) + } + + if len(getResp.PolicyDescriptions) != 1 { + return fmt.Errorf("Unable to find policy %#v", getResp.PolicyDescriptions) + } + + // We can get away with this because there's only one policy returned + policyDesc := getResp.PolicyDescriptions[0] + attributes := flattenPolicyAttributes(policyDesc.PolicyAttributeDescriptions) + d.Set("attributes", attributes) + + d.Set("name", policyName) + d.Set("load_balancer", lbName) + d.Set("lb_port", lbPort) + + return nil +} + +func resourceAwsLBSSLNegotiationPolicyDelete(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + lbName, _, policyName := resourceAwsLBSSLNegotiationPolicyParseId(d.Id()) + + // Perversely, if we Set an empty list of PolicyNames, we detach the + // policies attached to a listener, which is required to delete the + // policy itself. + setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String(d.Get("load_balancer").(string)), + LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))), + PolicyNames: []*string{}, + } + + if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setLoadBalancerOpts); err != nil { + return fmt.Errorf("Error removing SSLNegotiationPolicy: %s", err) + } + + request := &elb.DeleteLoadBalancerPolicyInput{ + LoadBalancerName: aws.String(lbName), + PolicyName: aws.String(policyName), + } + + if _, err := elbconn.DeleteLoadBalancerPolicy(request); err != nil { + return fmt.Errorf("Error deleting SSL negotiation policy %s: %s", d.Id(), err) + } + return nil +} + +// resourceAwsLBSSLNegotiationPolicyParseId takes an ID and parses it into +// it's constituent parts. You need three axes (LB name, policy name, and LB +// port) to create or identify an SSL negotiation policy in AWS's API. +func resourceAwsLBSSLNegotiationPolicyParseId(id string) (string, string, string) { + parts := strings.SplitN(id, ":", 3) + return parts[0], parts[1], parts[2] +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_domain.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_domain.go new file mode 100644 index 000000000..865eb59d9 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_domain.go @@ -0,0 +1,79 @@ +package aws + +import ( + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/lightsail" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsLightsailDomain() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLightsailDomainCreate, + Read: resourceAwsLightsailDomainRead, + Delete: resourceAwsLightsailDomainDelete, + + Schema: map[string]*schema.Schema{ + "domain_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsLightsailDomainCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lightsailconn + _, err := conn.CreateDomain(&lightsail.CreateDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + }) + + if err != nil { + return err + } + + d.SetId(d.Get("domain_name").(string)) + + return resourceAwsLightsailDomainRead(d, meta) +} + +func resourceAwsLightsailDomainRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lightsailconn + resp, err := conn.GetDomain(&lightsail.GetDomainInput{ + DomainName: aws.String(d.Id()), + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "NotFoundException" { + log.Printf("[WARN] Lightsail Domain (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return err + } + return err + } + + d.Set("arn", resp.Domain.Arn) + return nil +} + +func resourceAwsLightsailDomainDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lightsailconn + _, err := conn.DeleteDomain(&lightsail.DeleteDomainInput{ + DomainName: aws.String(d.Id()), + }) + + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_instance.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_instance.go new file mode 100644 index 000000000..34f249573 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_instance.go @@ -0,0 +1,264 @@ +package aws + +import ( + "fmt" + "log" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/lightsail" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsLightsailInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLightsailInstanceCreate, + Read: resourceAwsLightsailInstanceRead, + Delete: resourceAwsLightsailInstanceDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "availability_zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "blueprint_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "bundle_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + // Optional attributes + "key_pair_name": { + // Not compatible with aws_key_pair (yet) + // We'll need a new aws_lightsail_key_pair resource + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if old == "LightsailDefaultKeyPair" && new == "" { + return true + } + return false + }, + }, + + // cannot be retrieved from the API + "user_data": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + // additional info returned from the API + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "created_at": { + Type: schema.TypeString, + Computed: true, + }, + "cpu_count": { + Type: schema.TypeInt, + Computed: true, + }, + "ram_size": { + Type: schema.TypeInt, + Computed: true, + }, + "ipv6_address": { + Type: schema.TypeString, + Computed: true, + }, + "is_static_ip": { + Type: schema.TypeBool, + Computed: true, + }, + "private_ip_address": { + Type: schema.TypeString, + Computed: true, + }, + "public_ip_address": { + Type: schema.TypeString, + Computed: true, + }, + "username": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsLightsailInstanceCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lightsailconn + + iName := d.Get("name").(string) + + req := lightsail.CreateInstancesInput{ + AvailabilityZone: aws.String(d.Get("availability_zone").(string)), + BlueprintId: aws.String(d.Get("blueprint_id").(string)), + BundleId: aws.String(d.Get("bundle_id").(string)), + InstanceNames: aws.StringSlice([]string{iName}), + } + + if v, ok := d.GetOk("key_pair_name"); ok { + req.KeyPairName = aws.String(v.(string)) + } + if v, ok := d.GetOk("user_data"); ok { + req.UserData = aws.String(v.(string)) + } + + resp, err := conn.CreateInstances(&req) + if err != nil { + return err + } + + if len(resp.Operations) == 0 { + return fmt.Errorf("[ERR] No operations found for CreateInstance request") + } + + op := resp.Operations[0] + d.SetId(d.Get("name").(string)) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"Started"}, + Target: []string{"Completed", "Succeeded"}, + Refresh: resourceAwsLightsailOperationRefreshFunc(op.Id, meta), + Timeout: 10 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + // We don't return an error here because the Create call succeded + log.Printf("[ERR] Error waiting for instance (%s) to become ready: %s", d.Id(), err) + } + + return resourceAwsLightsailInstanceRead(d, meta) +} + +func resourceAwsLightsailInstanceRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lightsailconn + resp, err := conn.GetInstance(&lightsail.GetInstanceInput{ + InstanceName: aws.String(d.Id()), + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "NotFoundException" { + log.Printf("[WARN] Lightsail Instance (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return err + } + return err + } + + if resp == nil { + log.Printf("[WARN] Lightsail Instance (%s) not found, nil response from server, removing from state", d.Id()) + d.SetId("") + return nil + } + + i := resp.Instance + + d.Set("availability_zone", i.Location.AvailabilityZone) + d.Set("blueprint_id", i.BlueprintId) + d.Set("bundle_id", i.BundleId) + d.Set("key_pair_name", i.SshKeyName) + d.Set("name", i.Name) + + // additional attributes + d.Set("arn", i.Arn) + d.Set("username", i.Username) + d.Set("created_at", i.CreatedAt.Format(time.RFC3339)) + d.Set("cpu_count", i.Hardware.CpuCount) + d.Set("ram_size", strconv.FormatFloat(*i.Hardware.RamSizeInGb, 'f', 0, 64)) + d.Set("ipv6_address", i.Ipv6Address) + d.Set("is_static_ip", i.IsStaticIp) + d.Set("private_ip_address", i.PrivateIpAddress) + d.Set("public_ip_address", i.PublicIpAddress) + + return nil +} + +func resourceAwsLightsailInstanceDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lightsailconn + resp, err := conn.DeleteInstance(&lightsail.DeleteInstanceInput{ + InstanceName: aws.String(d.Id()), + }) + + if err != nil { + return err + } + + op := resp.Operations[0] + + stateConf := &resource.StateChangeConf{ + Pending: []string{"Started"}, + Target: []string{"Completed", "Succeeded"}, + Refresh: resourceAwsLightsailOperationRefreshFunc(op.Id, meta), + Timeout: 10 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for instance (%s) to become destroyed: %s", + d.Id(), err) + } + + d.SetId("") + return nil +} + +// method to check the status of an Operation, which is returned from +// Create/Delete methods. +// Status's are an aws.OperationStatus enum: +// - NotStarted +// - Started +// - Failed +// - Completed +// - Succeeded (not documented?) +func resourceAwsLightsailOperationRefreshFunc( + oid *string, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + conn := meta.(*AWSClient).lightsailconn + log.Printf("[DEBUG] Checking if Lightsail Operation (%s) is Completed", *oid) + o, err := conn.GetOperation(&lightsail.GetOperationInput{ + OperationId: oid, + }) + if err != nil { + return o, "FAILED", err + } + + if o.Operation == nil { + return nil, "Failed", fmt.Errorf("[ERR] Error retrieving Operation info for operation (%s)", *oid) + } + + log.Printf("[DEBUG] Lightsail Operation (%s) is currently %q", *oid, *o.Operation.Status) + return o, *o.Operation.Status, nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_key_pair.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_key_pair.go new file mode 100644 index 000000000..24138aaa9 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_key_pair.go @@ -0,0 +1,225 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/lightsail" + "github.com/hashicorp/terraform/helper/encryption" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsLightsailKeyPair() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLightsailKeyPairCreate, + Read: resourceAwsLightsailKeyPairRead, + Delete: resourceAwsLightsailKeyPairDelete, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + }, + "name_prefix": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + // optional fields + "pgp_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + // additional info returned from the API + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + // fields returned from CreateKey + "fingerprint": { + Type: schema.TypeString, + Computed: true, + }, + "public_key": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "private_key": { + Type: schema.TypeString, + Computed: true, + }, + + // encrypted fields if pgp_key is given + "encrypted_fingerprint": { + Type: schema.TypeString, + Computed: true, + }, + "encrypted_private_key": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsLightsailKeyPairCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lightsailconn + + var kName string + if v, ok := d.GetOk("name"); ok { + kName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + kName = resource.PrefixedUniqueId(v.(string)) + } else { + kName = resource.UniqueId() + } + + var pubKey string + var op *lightsail.Operation + if pubKeyInterface, ok := d.GetOk("public_key"); ok { + pubKey = pubKeyInterface.(string) + } + + if pubKey == "" { + // creating new key + resp, err := conn.CreateKeyPair(&lightsail.CreateKeyPairInput{ + KeyPairName: aws.String(kName), + }) + if err != nil { + return err + } + if resp.Operation == nil { + return fmt.Errorf("[ERR] No operation found for CreateKeyPair response") + } + if resp.KeyPair == nil { + return fmt.Errorf("[ERR] No KeyPair information found for CreateKeyPair response") + } + d.SetId(kName) + + // private_key and public_key are only available in the response from + // CreateKey pair. Here we set the public_key, and encrypt the private_key + // if a pgp_key is given, else we store the private_key in state + d.Set("public_key", resp.PublicKeyBase64) + + // encrypt private key if pgp_key is given + pgpKey, err := encryption.RetrieveGPGKey(d.Get("pgp_key").(string)) + if err != nil { + return err + } + if pgpKey != "" { + fingerprint, encrypted, err := encryption.EncryptValue(pgpKey, *resp.PrivateKeyBase64, "Lightsail Private Key") + if err != nil { + return err + } + + d.Set("encrypted_fingerprint", fingerprint) + d.Set("encrypted_private_key", encrypted) + } else { + d.Set("private_key", resp.PrivateKeyBase64) + } + + op = resp.Operation + } else { + // importing key + resp, err := conn.ImportKeyPair(&lightsail.ImportKeyPairInput{ + KeyPairName: aws.String(kName), + PublicKeyBase64: aws.String(pubKey), + }) + + if err != nil { + log.Printf("[ERR] Error importing key: %s", err) + return err + } + d.SetId(kName) + + op = resp.Operation + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"Started"}, + Target: []string{"Completed", "Succeeded"}, + Refresh: resourceAwsLightsailOperationRefreshFunc(op.Id, meta), + Timeout: 10 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err := stateConf.WaitForState() + if err != nil { + // We don't return an error here because the Create call succeded + log.Printf("[ERR] Error waiting for KeyPair (%s) to become ready: %s", d.Id(), err) + } + + return resourceAwsLightsailKeyPairRead(d, meta) +} + +func resourceAwsLightsailKeyPairRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lightsailconn + + resp, err := conn.GetKeyPair(&lightsail.GetKeyPairInput{ + KeyPairName: aws.String(d.Id()), + }) + + if err != nil { + log.Printf("[WARN] Error getting KeyPair (%s): %s", d.Id(), err) + // check for known not found error + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "NotFoundException" { + log.Printf("[WARN] Lightsail KeyPair (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + } + return err + } + + d.Set("arn", resp.KeyPair.Arn) + d.Set("name", resp.KeyPair.Name) + d.Set("fingerprint", resp.KeyPair.Fingerprint) + + return nil +} + +func resourceAwsLightsailKeyPairDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lightsailconn + resp, err := conn.DeleteKeyPair(&lightsail.DeleteKeyPairInput{ + KeyPairName: aws.String(d.Id()), + }) + + if err != nil { + return err + } + + op := resp.Operation + stateConf := &resource.StateChangeConf{ + Pending: []string{"Started"}, + Target: []string{"Completed", "Succeeded"}, + Refresh: resourceAwsLightsailOperationRefreshFunc(op.Id, meta), + Timeout: 10 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for KeyPair (%s) to become destroyed: %s", + d.Id(), err) + } + + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_static_ip.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_static_ip.go new file mode 100644 index 000000000..1f593ad40 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_static_ip.go @@ -0,0 +1,98 @@ +package aws + +import ( + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/lightsail" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsLightsailStaticIp() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLightsailStaticIpCreate, + Read: resourceAwsLightsailStaticIpRead, + Delete: resourceAwsLightsailStaticIpDelete, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "support_code": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsLightsailStaticIpCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lightsailconn + + name := d.Get("name").(string) + log.Printf("[INFO] Allocating Lightsail Static IP: %q", name) + out, err := conn.AllocateStaticIp(&lightsail.AllocateStaticIpInput{ + StaticIpName: aws.String(name), + }) + if err != nil { + return err + } + log.Printf("[INFO] Lightsail Static IP allocated: %s", *out) + + d.SetId(name) + + return resourceAwsLightsailStaticIpRead(d, meta) +} + +func resourceAwsLightsailStaticIpRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lightsailconn + + name := d.Get("name").(string) + log.Printf("[INFO] Reading Lightsail Static IP: %q", name) + out, err := conn.GetStaticIp(&lightsail.GetStaticIpInput{ + StaticIpName: aws.String(name), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "NotFoundException" { + log.Printf("[WARN] Lightsail Static IP (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + } + return err + } + log.Printf("[INFO] Received Lightsail Static IP: %s", *out) + + d.Set("arn", out.StaticIp.Arn) + d.Set("ip_address", out.StaticIp.IpAddress) + d.Set("support_code", out.StaticIp.SupportCode) + + return nil +} + +func resourceAwsLightsailStaticIpDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lightsailconn + + name := d.Get("name").(string) + log.Printf("[INFO] Deleting Lightsail Static IP: %q", name) + out, err := conn.ReleaseStaticIp(&lightsail.ReleaseStaticIpInput{ + StaticIpName: aws.String(name), + }) + if err != nil { + return err + } + log.Printf("[INFO] Deleted Lightsail Static IP: %s", *out) + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_static_ip_attachment.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_static_ip_attachment.go new file mode 100644 index 000000000..766ccff55 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lightsail_static_ip_attachment.go @@ -0,0 +1,96 @@ +package aws + +import ( + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/lightsail" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsLightsailStaticIpAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLightsailStaticIpAttachmentCreate, + Read: resourceAwsLightsailStaticIpAttachmentRead, + Delete: resourceAwsLightsailStaticIpAttachmentDelete, + + Schema: map[string]*schema.Schema{ + "static_ip_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "instance_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsLightsailStaticIpAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lightsailconn + + staticIpName := d.Get("static_ip_name").(string) + log.Printf("[INFO] Attaching Lightsail Static IP: %q", staticIpName) + out, err := conn.AttachStaticIp(&lightsail.AttachStaticIpInput{ + StaticIpName: aws.String(staticIpName), + InstanceName: aws.String(d.Get("instance_name").(string)), + }) + if err != nil { + return err + } + log.Printf("[INFO] Lightsail Static IP attached: %s", *out) + + d.SetId(staticIpName) + + return resourceAwsLightsailStaticIpAttachmentRead(d, meta) +} + +func resourceAwsLightsailStaticIpAttachmentRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lightsailconn + + staticIpName := d.Get("static_ip_name").(string) + log.Printf("[INFO] Reading Lightsail Static IP: %q", staticIpName) + out, err := conn.GetStaticIp(&lightsail.GetStaticIpInput{ + StaticIpName: aws.String(staticIpName), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "NotFoundException" { + log.Printf("[WARN] Lightsail Static IP (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + } + return err + } + if !*out.StaticIp.IsAttached { + log.Printf("[WARN] Lightsail Static IP (%s) is not attached, removing from state", d.Id()) + d.SetId("") + return nil + } + + log.Printf("[INFO] Received Lightsail Static IP: %s", *out) + + d.Set("instance_name", out.StaticIp.AttachedTo) + + return nil +} + +func resourceAwsLightsailStaticIpAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lightsailconn + + name := d.Get("static_ip_name").(string) + log.Printf("[INFO] Detaching Lightsail Static IP: %q", name) + out, err := conn.DetachStaticIp(&lightsail.DetachStaticIpInput{ + StaticIpName: aws.String(name), + }) + if err != nil { + return err + } + log.Printf("[INFO] Detached Lightsail Static IP: %s", *out) + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_backend_server_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_backend_server_policy.go new file mode 100644 index 000000000..325c4fd1a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_backend_server_policy.go @@ -0,0 +1,138 @@ +package aws + +import ( + "fmt" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsLoadBalancerBackendServerPolicies() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLoadBalancerBackendServerPoliciesCreate, + Read: resourceAwsLoadBalancerBackendServerPoliciesRead, + Update: resourceAwsLoadBalancerBackendServerPoliciesCreate, + Delete: resourceAwsLoadBalancerBackendServerPoliciesDelete, + + Schema: map[string]*schema.Schema{ + "load_balancer_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "policy_names": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + }, + + "instance_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + } +} + +func resourceAwsLoadBalancerBackendServerPoliciesCreate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + loadBalancerName := d.Get("load_balancer_name") + + policyNames := []*string{} + if v, ok := d.GetOk("policy_names"); ok { + policyNames = expandStringList(v.(*schema.Set).List()) + } + + setOpts := &elb.SetLoadBalancerPoliciesForBackendServerInput{ + LoadBalancerName: aws.String(loadBalancerName.(string)), + InstancePort: aws.Int64(int64(d.Get("instance_port").(int))), + PolicyNames: policyNames, + } + + if _, err := elbconn.SetLoadBalancerPoliciesForBackendServer(setOpts); err != nil { + return fmt.Errorf("Error setting LoadBalancerPoliciesForBackendServer: %s", err) + } + + d.SetId(fmt.Sprintf("%s:%s", *setOpts.LoadBalancerName, strconv.FormatInt(*setOpts.InstancePort, 10))) + return resourceAwsLoadBalancerBackendServerPoliciesRead(d, meta) +} + +func resourceAwsLoadBalancerBackendServerPoliciesRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + loadBalancerName, instancePort := resourceAwsLoadBalancerBackendServerPoliciesParseId(d.Id()) + + describeElbOpts := &elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{aws.String(loadBalancerName)}, + } + + describeResp, err := elbconn.DescribeLoadBalancers(describeElbOpts) + + if err != nil { + if ec2err, ok := err.(awserr.Error); ok { + if ec2err.Code() == "LoadBalancerNotFound" { + d.SetId("") + return fmt.Errorf("LoadBalancerNotFound: %s", err) + } + } + return fmt.Errorf("Error retrieving ELB description: %s", err) + } + + if len(describeResp.LoadBalancerDescriptions) != 1 { + return fmt.Errorf("Unable to find ELB: %#v", describeResp.LoadBalancerDescriptions) + } + + lb := describeResp.LoadBalancerDescriptions[0] + + policyNames := []*string{} + for _, backendServer := range lb.BackendServerDescriptions { + if instancePort != strconv.Itoa(int(*backendServer.InstancePort)) { + continue + } + + for _, name := range backendServer.PolicyNames { + policyNames = append(policyNames, name) + } + } + + d.Set("load_balancer_name", loadBalancerName) + d.Set("instance_port", instancePort) + d.Set("policy_names", flattenStringList(policyNames)) + + return nil +} + +func resourceAwsLoadBalancerBackendServerPoliciesDelete(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + loadBalancerName, instancePort := resourceAwsLoadBalancerBackendServerPoliciesParseId(d.Id()) + + instancePortInt, err := strconv.ParseInt(instancePort, 10, 64) + if err != nil { + return fmt.Errorf("Error parsing instancePort as integer: %s", err) + } + + setOpts := &elb.SetLoadBalancerPoliciesForBackendServerInput{ + LoadBalancerName: aws.String(loadBalancerName), + InstancePort: aws.Int64(instancePortInt), + PolicyNames: []*string{}, + } + + if _, err := elbconn.SetLoadBalancerPoliciesForBackendServer(setOpts); err != nil { + return fmt.Errorf("Error setting LoadBalancerPoliciesForBackendServer: %s", err) + } + + d.SetId("") + return nil +} + +func resourceAwsLoadBalancerBackendServerPoliciesParseId(id string) (string, string) { + parts := strings.SplitN(id, ":", 2) + return parts[0], parts[1] +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_listener_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_listener_policy.go new file mode 100644 index 000000000..d1c8cacbb --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_listener_policy.go @@ -0,0 +1,138 @@ +package aws + +import ( + "fmt" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsLoadBalancerListenerPolicies() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLoadBalancerListenerPoliciesCreate, + Read: resourceAwsLoadBalancerListenerPoliciesRead, + Update: resourceAwsLoadBalancerListenerPoliciesCreate, + Delete: resourceAwsLoadBalancerListenerPoliciesDelete, + + Schema: map[string]*schema.Schema{ + "load_balancer_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "policy_names": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + }, + + "load_balancer_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + } +} + +func resourceAwsLoadBalancerListenerPoliciesCreate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + loadBalancerName := d.Get("load_balancer_name") + + policyNames := []*string{} + if v, ok := d.GetOk("policy_names"); ok { + policyNames = expandStringList(v.(*schema.Set).List()) + } + + setOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String(loadBalancerName.(string)), + LoadBalancerPort: aws.Int64(int64(d.Get("load_balancer_port").(int))), + PolicyNames: policyNames, + } + + if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setOpts); err != nil { + return fmt.Errorf("Error setting LoadBalancerPoliciesOfListener: %s", err) + } + + d.SetId(fmt.Sprintf("%s:%s", *setOpts.LoadBalancerName, strconv.FormatInt(*setOpts.LoadBalancerPort, 10))) + return resourceAwsLoadBalancerListenerPoliciesRead(d, meta) +} + +func resourceAwsLoadBalancerListenerPoliciesRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + loadBalancerName, loadBalancerPort := resourceAwsLoadBalancerListenerPoliciesParseId(d.Id()) + + describeElbOpts := &elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{aws.String(loadBalancerName)}, + } + + describeResp, err := elbconn.DescribeLoadBalancers(describeElbOpts) + + if err != nil { + if ec2err, ok := err.(awserr.Error); ok { + if ec2err.Code() == "LoadBalancerNotFound" { + d.SetId("") + return fmt.Errorf("LoadBalancerNotFound: %s", err) + } + } + return fmt.Errorf("Error retrieving ELB description: %s", err) + } + + if len(describeResp.LoadBalancerDescriptions) != 1 { + return fmt.Errorf("Unable to find ELB: %#v", describeResp.LoadBalancerDescriptions) + } + + lb := describeResp.LoadBalancerDescriptions[0] + + policyNames := []*string{} + for _, listener := range lb.ListenerDescriptions { + if loadBalancerPort != strconv.Itoa(int(*listener.Listener.LoadBalancerPort)) { + continue + } + + for _, name := range listener.PolicyNames { + policyNames = append(policyNames, name) + } + } + + d.Set("load_balancer_name", loadBalancerName) + d.Set("load_balancer_port", loadBalancerPort) + d.Set("policy_names", flattenStringList(policyNames)) + + return nil +} + +func resourceAwsLoadBalancerListenerPoliciesDelete(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + loadBalancerName, loadBalancerPort := resourceAwsLoadBalancerListenerPoliciesParseId(d.Id()) + + loadBalancerPortInt, err := strconv.ParseInt(loadBalancerPort, 10, 64) + if err != nil { + return fmt.Errorf("Error parsing loadBalancerPort as integer: %s", err) + } + + setOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String(loadBalancerName), + LoadBalancerPort: aws.Int64(loadBalancerPortInt), + PolicyNames: []*string{}, + } + + if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setOpts); err != nil { + return fmt.Errorf("Error setting LoadBalancerPoliciesOfListener: %s", err) + } + + d.SetId("") + return nil +} + +func resourceAwsLoadBalancerListenerPoliciesParseId(id string) (string, string) { + parts := strings.SplitN(id, ":", 2) + return parts[0], parts[1] +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_policy.go new file mode 100644 index 000000000..8305cf992 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_load_balancer_policy.go @@ -0,0 +1,352 @@ +package aws + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsLoadBalancerPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLoadBalancerPolicyCreate, + Read: resourceAwsLoadBalancerPolicyRead, + Update: resourceAwsLoadBalancerPolicyUpdate, + Delete: resourceAwsLoadBalancerPolicyDelete, + + Schema: map[string]*schema.Schema{ + "load_balancer_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "policy_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "policy_type_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "policy_attribute": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsLoadBalancerPolicyCreate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + attributes := []*elb.PolicyAttribute{} + if attributedata, ok := d.GetOk("policy_attribute"); ok { + attributeSet := attributedata.(*schema.Set).List() + for _, attribute := range attributeSet { + data := attribute.(map[string]interface{}) + attributes = append(attributes, &elb.PolicyAttribute{ + AttributeName: aws.String(data["name"].(string)), + AttributeValue: aws.String(data["value"].(string)), + }) + } + } + + lbspOpts := &elb.CreateLoadBalancerPolicyInput{ + LoadBalancerName: aws.String(d.Get("load_balancer_name").(string)), + PolicyName: aws.String(d.Get("policy_name").(string)), + PolicyTypeName: aws.String(d.Get("policy_type_name").(string)), + PolicyAttributes: attributes, + } + + if _, err := elbconn.CreateLoadBalancerPolicy(lbspOpts); err != nil { + return fmt.Errorf("Error creating LoadBalancerPolicy: %s", err) + } + + d.SetId(fmt.Sprintf("%s:%s", + *lbspOpts.LoadBalancerName, + *lbspOpts.PolicyName)) + return resourceAwsLoadBalancerPolicyRead(d, meta) +} + +func resourceAwsLoadBalancerPolicyRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + loadBalancerName, policyName := resourceAwsLoadBalancerPolicyParseId(d.Id()) + + request := &elb.DescribeLoadBalancerPoliciesInput{ + LoadBalancerName: aws.String(loadBalancerName), + PolicyNames: []*string{aws.String(policyName)}, + } + + getResp, err := elbconn.DescribeLoadBalancerPolicies(request) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "PolicyNotFound" { + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving policy: %s", err) + } + + if len(getResp.PolicyDescriptions) != 1 { + return fmt.Errorf("Unable to find policy %#v", getResp.PolicyDescriptions) + } + + policyDesc := getResp.PolicyDescriptions[0] + policyTypeName := policyDesc.PolicyTypeName + policyAttributes := policyDesc.PolicyAttributeDescriptions + + attributes := []map[string]string{} + for _, a := range policyAttributes { + pair := make(map[string]string) + pair["name"] = *a.AttributeName + pair["value"] = *a.AttributeValue + if (*policyTypeName == "SSLNegotiationPolicyType") && (*a.AttributeValue == "false") { + continue + } + attributes = append(attributes, pair) + } + + d.Set("policy_name", policyName) + d.Set("policy_type_name", policyTypeName) + d.Set("load_balancer_name", loadBalancerName) + d.Set("policy_attribute", attributes) + + return nil +} + +func resourceAwsLoadBalancerPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + reassignments := Reassignment{} + + loadBalancerName, policyName := resourceAwsLoadBalancerPolicyParseId(d.Id()) + + assigned, err := resourceAwsLoadBalancerPolicyAssigned(policyName, loadBalancerName, elbconn) + if err != nil { + return fmt.Errorf("Error determining assignment status of Load Balancer Policy %s: %s", policyName, err) + } + + if assigned { + reassignments, err = resourceAwsLoadBalancerPolicyUnassign(policyName, loadBalancerName, elbconn) + if err != nil { + return fmt.Errorf("Error unassigning Load Balancer Policy %s: %s", policyName, err) + } + } + + request := &elb.DeleteLoadBalancerPolicyInput{ + LoadBalancerName: aws.String(loadBalancerName), + PolicyName: aws.String(policyName), + } + + if _, err := elbconn.DeleteLoadBalancerPolicy(request); err != nil { + return fmt.Errorf("Error deleting Load Balancer Policy %s: %s", d.Id(), err) + } + + err = resourceAwsLoadBalancerPolicyCreate(d, meta) + + for _, listenerAssignment := range reassignments.listenerPolicies { + if _, err := elbconn.SetLoadBalancerPoliciesOfListener(listenerAssignment); err != nil { + return fmt.Errorf("Error setting LoadBalancerPoliciesOfListener: %s", err) + } + } + + for _, backendServerAssignment := range reassignments.backendServerPolicies { + if _, err := elbconn.SetLoadBalancerPoliciesForBackendServer(backendServerAssignment); err != nil { + return fmt.Errorf("Error setting LoadBalancerPoliciesForBackendServer: %s", err) + } + } + + return resourceAwsLoadBalancerPolicyRead(d, meta) +} + +func resourceAwsLoadBalancerPolicyDelete(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + loadBalancerName, policyName := resourceAwsLoadBalancerPolicyParseId(d.Id()) + + assigned, err := resourceAwsLoadBalancerPolicyAssigned(policyName, loadBalancerName, elbconn) + if err != nil { + return fmt.Errorf("Error determining assignment status of Load Balancer Policy %s: %s", policyName, err) + } + + if assigned { + _, err := resourceAwsLoadBalancerPolicyUnassign(policyName, loadBalancerName, elbconn) + if err != nil { + return fmt.Errorf("Error unassigning Load Balancer Policy %s: %s", policyName, err) + } + } + + request := &elb.DeleteLoadBalancerPolicyInput{ + LoadBalancerName: aws.String(loadBalancerName), + PolicyName: aws.String(policyName), + } + + if _, err := elbconn.DeleteLoadBalancerPolicy(request); err != nil { + return fmt.Errorf("Error deleting Load Balancer Policy %s: %s", d.Id(), err) + } + + d.SetId("") + return nil +} + +func resourceAwsLoadBalancerPolicyParseId(id string) (string, string) { + parts := strings.SplitN(id, ":", 2) + return parts[0], parts[1] +} + +func resourceAwsLoadBalancerPolicyAssigned(policyName, loadBalancerName string, elbconn *elb.ELB) (bool, error) { + describeElbOpts := &elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{aws.String(loadBalancerName)}, + } + + describeResp, err := elbconn.DescribeLoadBalancers(describeElbOpts) + + if err != nil { + if ec2err, ok := err.(awserr.Error); ok { + if ec2err.Code() == "LoadBalancerNotFound" { + return false, nil + } + } + return false, fmt.Errorf("Error retrieving ELB description: %s", err) + } + + if len(describeResp.LoadBalancerDescriptions) != 1 { + return false, fmt.Errorf("Unable to find ELB: %#v", describeResp.LoadBalancerDescriptions) + } + + lb := describeResp.LoadBalancerDescriptions[0] + assigned := false + for _, backendServer := range lb.BackendServerDescriptions { + for _, name := range backendServer.PolicyNames { + if policyName == *name { + assigned = true + break + } + } + } + + for _, listener := range lb.ListenerDescriptions { + for _, name := range listener.PolicyNames { + if policyName == *name { + assigned = true + break + } + } + } + + return assigned, nil +} + +type Reassignment struct { + backendServerPolicies []*elb.SetLoadBalancerPoliciesForBackendServerInput + listenerPolicies []*elb.SetLoadBalancerPoliciesOfListenerInput +} + +func resourceAwsLoadBalancerPolicyUnassign(policyName, loadBalancerName string, elbconn *elb.ELB) (Reassignment, error) { + reassignments := Reassignment{} + + describeElbOpts := &elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{aws.String(loadBalancerName)}, + } + + describeResp, err := elbconn.DescribeLoadBalancers(describeElbOpts) + + if err != nil { + if ec2err, ok := err.(awserr.Error); ok { + if ec2err.Code() == "LoadBalancerNotFound" { + return reassignments, nil + } + } + return reassignments, fmt.Errorf("Error retrieving ELB description: %s", err) + } + + if len(describeResp.LoadBalancerDescriptions) != 1 { + return reassignments, fmt.Errorf("Unable to find ELB: %#v", describeResp.LoadBalancerDescriptions) + } + + lb := describeResp.LoadBalancerDescriptions[0] + + for _, backendServer := range lb.BackendServerDescriptions { + policies := []*string{} + + for _, name := range backendServer.PolicyNames { + if policyName != *name { + policies = append(policies, name) + } + } + + if len(backendServer.PolicyNames) != len(policies) { + setOpts := &elb.SetLoadBalancerPoliciesForBackendServerInput{ + LoadBalancerName: aws.String(loadBalancerName), + InstancePort: aws.Int64(*backendServer.InstancePort), + PolicyNames: policies, + } + + reassignOpts := &elb.SetLoadBalancerPoliciesForBackendServerInput{ + LoadBalancerName: aws.String(loadBalancerName), + InstancePort: aws.Int64(*backendServer.InstancePort), + PolicyNames: backendServer.PolicyNames, + } + + reassignments.backendServerPolicies = append(reassignments.backendServerPolicies, reassignOpts) + + _, err = elbconn.SetLoadBalancerPoliciesForBackendServer(setOpts) + if err != nil { + return reassignments, fmt.Errorf("Error Setting Load Balancer Policies for Backend Server: %s", err) + } + } + } + + for _, listener := range lb.ListenerDescriptions { + policies := []*string{} + + for _, name := range listener.PolicyNames { + if policyName != *name { + policies = append(policies, name) + } + } + + if len(listener.PolicyNames) != len(policies) { + setOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String(loadBalancerName), + LoadBalancerPort: aws.Int64(*listener.Listener.LoadBalancerPort), + PolicyNames: policies, + } + + reassignOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String(loadBalancerName), + LoadBalancerPort: aws.Int64(*listener.Listener.LoadBalancerPort), + PolicyNames: listener.PolicyNames, + } + + reassignments.listenerPolicies = append(reassignments.listenerPolicies, reassignOpts) + + _, err = elbconn.SetLoadBalancerPoliciesOfListener(setOpts) + if err != nil { + return reassignments, fmt.Errorf("Error Setting Load Balancer Policies of Listener: %s", err) + } + } + } + + return reassignments, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_main_route_table_association.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_main_route_table_association.go new file mode 100644 index 000000000..aabecda54 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_main_route_table_association.go @@ -0,0 +1,169 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsMainRouteTableAssociation() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsMainRouteTableAssociationCreate, + Read: resourceAwsMainRouteTableAssociationRead, + Update: resourceAwsMainRouteTableAssociationUpdate, + Delete: resourceAwsMainRouteTableAssociationDelete, + + Schema: map[string]*schema.Schema{ + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "route_table_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + // We use this field to record the main route table that is automatically + // created when the VPC is created. We need this to be able to "destroy" + // our main route table association, which we do by returning this route + // table to its original place as the Main Route Table for the VPC. + "original_route_table_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsMainRouteTableAssociationCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + vpcId := d.Get("vpc_id").(string) + routeTableId := d.Get("route_table_id").(string) + + log.Printf("[INFO] Creating main route table association: %s => %s", vpcId, routeTableId) + + mainAssociation, err := findMainRouteTableAssociation(conn, vpcId) + if err != nil { + return err + } + + resp, err := conn.ReplaceRouteTableAssociation(&ec2.ReplaceRouteTableAssociationInput{ + AssociationId: mainAssociation.RouteTableAssociationId, + RouteTableId: aws.String(routeTableId), + }) + if err != nil { + return err + } + + d.Set("original_route_table_id", mainAssociation.RouteTableId) + d.SetId(*resp.NewAssociationId) + log.Printf("[INFO] New main route table association ID: %s", d.Id()) + + return nil +} + +func resourceAwsMainRouteTableAssociationRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + mainAssociation, err := findMainRouteTableAssociation( + conn, + d.Get("vpc_id").(string)) + if err != nil { + return err + } + + if mainAssociation == nil || *mainAssociation.RouteTableAssociationId != d.Id() { + // It seems it doesn't exist anymore, so clear the ID + d.SetId("") + } + + return nil +} + +// Update is almost exactly like Create, except we want to retain the +// original_route_table_id - this needs to stay recorded as the AWS-created +// table from VPC creation. +func resourceAwsMainRouteTableAssociationUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + vpcId := d.Get("vpc_id").(string) + routeTableId := d.Get("route_table_id").(string) + + log.Printf("[INFO] Updating main route table association: %s => %s", vpcId, routeTableId) + + resp, err := conn.ReplaceRouteTableAssociation(&ec2.ReplaceRouteTableAssociationInput{ + AssociationId: aws.String(d.Id()), + RouteTableId: aws.String(routeTableId), + }) + if err != nil { + return err + } + + d.SetId(*resp.NewAssociationId) + log.Printf("[INFO] New main route table association ID: %s", d.Id()) + + return nil +} + +func resourceAwsMainRouteTableAssociationDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + vpcId := d.Get("vpc_id").(string) + originalRouteTableId := d.Get("original_route_table_id").(string) + + log.Printf("[INFO] Deleting main route table association by resetting Main Route Table for VPC: %s to its original Route Table: %s", + vpcId, + originalRouteTableId) + + resp, err := conn.ReplaceRouteTableAssociation(&ec2.ReplaceRouteTableAssociationInput{ + AssociationId: aws.String(d.Id()), + RouteTableId: aws.String(originalRouteTableId), + }) + if err != nil { + return err + } + + log.Printf("[INFO] Resulting Association ID: %s", *resp.NewAssociationId) + + return nil +} + +func findMainRouteTableAssociation(conn *ec2.EC2, vpcId string) (*ec2.RouteTableAssociation, error) { + mainRouteTable, err := findMainRouteTable(conn, vpcId) + if err != nil { + return nil, err + } + if mainRouteTable == nil { + return nil, nil + } + + for _, a := range mainRouteTable.Associations { + if *a.Main { + return a, nil + } + } + return nil, fmt.Errorf("Could not find main routing table association for VPC: %s", vpcId) +} + +func findMainRouteTable(conn *ec2.EC2, vpcId string) (*ec2.RouteTable, error) { + mainFilter := &ec2.Filter{ + Name: aws.String("association.main"), + Values: []*string{aws.String("true")}, + } + vpcFilter := &ec2.Filter{ + Name: aws.String("vpc-id"), + Values: []*string{aws.String(vpcId)}, + } + routeResp, err := conn.DescribeRouteTables(&ec2.DescribeRouteTablesInput{ + Filters: []*ec2.Filter{mainFilter, vpcFilter}, + }) + if err != nil { + return nil, err + } else if len(routeResp.RouteTables) != 1 { + return nil, nil + } + + return routeResp.RouteTables[0], nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_nat_gateway.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_nat_gateway.go new file mode 100644 index 000000000..1ec5e986e --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_nat_gateway.go @@ -0,0 +1,195 @@ +package aws + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsNatGateway() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsNatGatewayCreate, + Read: resourceAwsNatGatewayRead, + Delete: resourceAwsNatGatewayDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "allocation_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "subnet_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "network_interface_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "private_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "public_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceAwsNatGatewayCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + // Create the NAT Gateway + createOpts := &ec2.CreateNatGatewayInput{ + AllocationId: aws.String(d.Get("allocation_id").(string)), + SubnetId: aws.String(d.Get("subnet_id").(string)), + } + + log.Printf("[DEBUG] Create NAT Gateway: %s", *createOpts) + natResp, err := conn.CreateNatGateway(createOpts) + if err != nil { + return fmt.Errorf("Error creating NAT Gateway: %s", err) + } + + // Get the ID and store it + ng := natResp.NatGateway + d.SetId(*ng.NatGatewayId) + log.Printf("[INFO] NAT Gateway ID: %s", d.Id()) + + // Wait for the NAT Gateway to become available + log.Printf("[DEBUG] Waiting for NAT Gateway (%s) to become available", d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"available"}, + Refresh: NGStateRefreshFunc(conn, d.Id()), + Timeout: 10 * time.Minute, + } + + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for NAT Gateway (%s) to become available: %s", d.Id(), err) + } + + // Update our attributes and return + return resourceAwsNatGatewayRead(d, meta) +} + +func resourceAwsNatGatewayRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + // Refresh the NAT Gateway state + ngRaw, state, err := NGStateRefreshFunc(conn, d.Id())() + if err != nil { + return err + } + + status := map[string]bool{ + "deleted": true, + "deleting": true, + "failed": true, + } + + if _, ok := status[strings.ToLower(state)]; ngRaw == nil || ok { + log.Printf("[INFO] Removing %s from Terraform state as it is not found or in the deleted state.", d.Id()) + d.SetId("") + return nil + } + + // Set NAT Gateway attributes + ng := ngRaw.(*ec2.NatGateway) + d.Set("subnet_id", ng.SubnetId) + + // Address + address := ng.NatGatewayAddresses[0] + d.Set("allocation_id", address.AllocationId) + d.Set("network_interface_id", address.NetworkInterfaceId) + d.Set("private_ip", address.PrivateIp) + d.Set("public_ip", address.PublicIp) + + return nil +} + +func resourceAwsNatGatewayDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + deleteOpts := &ec2.DeleteNatGatewayInput{ + NatGatewayId: aws.String(d.Id()), + } + log.Printf("[INFO] Deleting NAT Gateway: %s", d.Id()) + + _, err := conn.DeleteNatGateway(deleteOpts) + if err != nil { + ec2err, ok := err.(awserr.Error) + if !ok { + return err + } + + if ec2err.Code() == "NatGatewayNotFound" { + return nil + } + + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting"}, + Target: []string{"deleted"}, + Refresh: NGStateRefreshFunc(conn, d.Id()), + Timeout: 30 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + _, stateErr := stateConf.WaitForState() + if stateErr != nil { + return fmt.Errorf("Error waiting for NAT Gateway (%s) to delete: %s", d.Id(), err) + } + + return nil +} + +// NGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// a NAT Gateway. +func NGStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + opts := &ec2.DescribeNatGatewaysInput{ + NatGatewayIds: []*string{aws.String(id)}, + } + resp, err := conn.DescribeNatGateways(opts) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "NatGatewayNotFound" { + resp = nil + } else { + log.Printf("Error on NGStateRefresh: %s", err) + return nil, "", err + } + } + + if resp == nil { + // Sometimes AWS just has consistency issues and doesn't see + // our instance yet. Return an empty state. + return nil, "", nil + } + + ng := resp.NatGateways[0] + return ng, *ng.State, nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_acl.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_acl.go new file mode 100644 index 000000000..4777f4707 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_acl.go @@ -0,0 +1,648 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsNetworkAcl() *schema.Resource { + + return &schema.Resource{ + Create: resourceAwsNetworkAclCreate, + Read: resourceAwsNetworkAclRead, + Delete: resourceAwsNetworkAclDelete, + Update: resourceAwsNetworkAclUpdate, + Importer: &schema.ResourceImporter{ + State: resourceAwsNetworkAclImportState, + }, + + Schema: map[string]*schema.Schema{ + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Computed: false, + }, + "subnet_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: false, + Deprecated: "Attribute subnet_id is deprecated on network_acl resources. Use subnet_ids instead", + }, + "subnet_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ConflictsWith: []string{"subnet_id"}, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "ingress": { + Type: schema.TypeSet, + Required: false, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from_port": { + Type: schema.TypeInt, + Required: true, + }, + "to_port": { + Type: schema.TypeInt, + Required: true, + }, + "rule_no": { + Type: schema.TypeInt, + Required: true, + }, + "action": { + Type: schema.TypeString, + Required: true, + }, + "protocol": { + Type: schema.TypeString, + Required: true, + }, + "cidr_block": { + Type: schema.TypeString, + Optional: true, + }, + "ipv6_cidr_block": { + Type: schema.TypeString, + Optional: true, + }, + "icmp_type": { + Type: schema.TypeInt, + Optional: true, + }, + "icmp_code": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + Set: resourceAwsNetworkAclEntryHash, + }, + "egress": { + Type: schema.TypeSet, + Required: false, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from_port": { + Type: schema.TypeInt, + Required: true, + }, + "to_port": { + Type: schema.TypeInt, + Required: true, + }, + "rule_no": { + Type: schema.TypeInt, + Required: true, + }, + "action": { + Type: schema.TypeString, + Required: true, + }, + "protocol": { + Type: schema.TypeString, + Required: true, + }, + "cidr_block": { + Type: schema.TypeString, + Optional: true, + }, + "ipv6_cidr_block": { + Type: schema.TypeString, + Optional: true, + }, + "icmp_type": { + Type: schema.TypeInt, + Optional: true, + }, + "icmp_code": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + Set: resourceAwsNetworkAclEntryHash, + }, + "tags": tagsSchema(), + }, + } +} + +func resourceAwsNetworkAclCreate(d *schema.ResourceData, meta interface{}) error { + + conn := meta.(*AWSClient).ec2conn + + // Create the Network Acl + createOpts := &ec2.CreateNetworkAclInput{ + VpcId: aws.String(d.Get("vpc_id").(string)), + } + + log.Printf("[DEBUG] Network Acl create config: %#v", createOpts) + resp, err := conn.CreateNetworkAcl(createOpts) + if err != nil { + return fmt.Errorf("Error creating network acl: %s", err) + } + + // Get the ID and store it + networkAcl := resp.NetworkAcl + d.SetId(*networkAcl.NetworkAclId) + log.Printf("[INFO] Network Acl ID: %s", *networkAcl.NetworkAclId) + + // Update rules and subnet association once acl is created + return resourceAwsNetworkAclUpdate(d, meta) +} + +func resourceAwsNetworkAclRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{ + NetworkAclIds: []*string{aws.String(d.Id())}, + }) + + if err != nil { + if ec2err, ok := err.(awserr.Error); ok { + if ec2err.Code() == "InvalidNetworkAclID.NotFound" { + log.Printf("[DEBUG] Network ACL (%s) not found", d.Id()) + d.SetId("") + return nil + } + } + return err + } + if resp == nil { + return nil + } + + networkAcl := resp.NetworkAcls[0] + var ingressEntries []*ec2.NetworkAclEntry + var egressEntries []*ec2.NetworkAclEntry + + // separate the ingress and egress rules + for _, e := range networkAcl.Entries { + // Skip the default rules added by AWS. They can be neither + // configured or deleted by users. + if *e.RuleNumber == awsDefaultAclRuleNumberIpv4 || + *e.RuleNumber == awsDefaultAclRuleNumberIpv6 { + continue + } + + if *e.Egress == true { + egressEntries = append(egressEntries, e) + } else { + ingressEntries = append(ingressEntries, e) + } + } + + d.Set("vpc_id", networkAcl.VpcId) + d.Set("tags", tagsToMap(networkAcl.Tags)) + + var s []string + for _, a := range networkAcl.Associations { + s = append(s, *a.SubnetId) + } + sort.Strings(s) + if err := d.Set("subnet_ids", s); err != nil { + return err + } + + if err := d.Set("ingress", networkAclEntriesToMapList(ingressEntries)); err != nil { + return err + } + if err := d.Set("egress", networkAclEntriesToMapList(egressEntries)); err != nil { + return err + } + + return nil +} + +func resourceAwsNetworkAclUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + d.Partial(true) + + if d.HasChange("ingress") { + err := updateNetworkAclEntries(d, "ingress", conn) + if err != nil { + return err + } + } + + if d.HasChange("egress") { + err := updateNetworkAclEntries(d, "egress", conn) + if err != nil { + return err + } + } + + if d.HasChange("subnet_id") { + //associate new subnet with the acl. + _, n := d.GetChange("subnet_id") + newSubnet := n.(string) + association, err := findNetworkAclAssociation(newSubnet, conn) + if err != nil { + return fmt.Errorf("Failed to update acl %s with subnet %s: %s", d.Id(), newSubnet, err) + } + _, err = conn.ReplaceNetworkAclAssociation(&ec2.ReplaceNetworkAclAssociationInput{ + AssociationId: association.NetworkAclAssociationId, + NetworkAclId: aws.String(d.Id()), + }) + if err != nil { + return err + } + } + + if d.HasChange("subnet_ids") { + o, n := d.GetChange("subnet_ids") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + remove := os.Difference(ns).List() + add := ns.Difference(os).List() + + if len(remove) > 0 { + // A Network ACL is required for each subnet. In order to disassociate a + // subnet from this ACL, we must associate it with the default ACL. + defaultAcl, err := getDefaultNetworkAcl(d.Get("vpc_id").(string), conn) + if err != nil { + return fmt.Errorf("Failed to find Default ACL for VPC %s", d.Get("vpc_id").(string)) + } + for _, r := range remove { + association, err := findNetworkAclAssociation(r.(string), conn) + if err != nil { + return fmt.Errorf("Failed to find acl association: acl %s with subnet %s: %s", d.Id(), r, err) + } + log.Printf("DEBUG] Replacing Network Acl Association (%s) with Default Network ACL ID (%s)", *association.NetworkAclAssociationId, *defaultAcl.NetworkAclId) + _, err = conn.ReplaceNetworkAclAssociation(&ec2.ReplaceNetworkAclAssociationInput{ + AssociationId: association.NetworkAclAssociationId, + NetworkAclId: defaultAcl.NetworkAclId, + }) + if err != nil { + return err + } + } + } + + if len(add) > 0 { + for _, a := range add { + association, err := findNetworkAclAssociation(a.(string), conn) + if err != nil { + return fmt.Errorf("Failed to find acl association: acl %s with subnet %s: %s", d.Id(), a, err) + } + _, err = conn.ReplaceNetworkAclAssociation(&ec2.ReplaceNetworkAclAssociationInput{ + AssociationId: association.NetworkAclAssociationId, + NetworkAclId: aws.String(d.Id()), + }) + if err != nil { + return err + } + } + } + + } + + if err := setTags(conn, d); err != nil { + return err + } else { + d.SetPartial("tags") + } + + d.Partial(false) + return resourceAwsNetworkAclRead(d, meta) +} + +func updateNetworkAclEntries(d *schema.ResourceData, entryType string, conn *ec2.EC2) error { + if d.HasChange(entryType) { + o, n := d.GetChange(entryType) + + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + toBeDeleted, err := expandNetworkAclEntries(os.Difference(ns).List(), entryType) + if err != nil { + return err + } + for _, remove := range toBeDeleted { + // AWS includes default rules with all network ACLs that can be + // neither modified nor destroyed. They have a custom rule + // number that is out of bounds for any other rule. If we + // encounter it, just continue. There's no work to be done. + if *remove.RuleNumber == awsDefaultAclRuleNumberIpv4 || + *remove.RuleNumber == awsDefaultAclRuleNumberIpv6 { + continue + } + + // Delete old Acl + log.Printf("[DEBUG] Destroying Network ACL Entry number (%d)", int(*remove.RuleNumber)) + _, err := conn.DeleteNetworkAclEntry(&ec2.DeleteNetworkAclEntryInput{ + NetworkAclId: aws.String(d.Id()), + RuleNumber: remove.RuleNumber, + Egress: remove.Egress, + }) + if err != nil { + return fmt.Errorf("Error deleting %s entry: %s", entryType, err) + } + } + + toBeCreated, err := expandNetworkAclEntries(ns.Difference(os).List(), entryType) + if err != nil { + return err + } + for _, add := range toBeCreated { + // Protocol -1 rules don't store ports in AWS. Thus, they'll always + // hash differently when being read out of the API. Force the user + // to set from_port and to_port to 0 for these rules, to keep the + // hashing consistent. + if *add.Protocol == "-1" { + to := *add.PortRange.To + from := *add.PortRange.From + expected := &expectedPortPair{ + to_port: 0, + from_port: 0, + } + if ok := validatePorts(to, from, *expected); !ok { + return fmt.Errorf( + "to_port (%d) and from_port (%d) must both be 0 to use the the 'all' \"-1\" protocol!", + to, from) + } + } + + if add.CidrBlock != nil && *add.CidrBlock != "" { + // AWS mutates the CIDR block into a network implied by the IP and + // mask provided. This results in hashing inconsistencies between + // the local config file and the state returned by the API. Error + // if the user provides a CIDR block with an inappropriate mask + if err := validateCIDRBlock(*add.CidrBlock); err != nil { + return err + } + } + + createOpts := &ec2.CreateNetworkAclEntryInput{ + NetworkAclId: aws.String(d.Id()), + Egress: add.Egress, + PortRange: add.PortRange, + Protocol: add.Protocol, + RuleAction: add.RuleAction, + RuleNumber: add.RuleNumber, + IcmpTypeCode: add.IcmpTypeCode, + } + + if add.CidrBlock != nil && *add.CidrBlock != "" { + createOpts.CidrBlock = add.CidrBlock + } + + if add.Ipv6CidrBlock != nil && *add.Ipv6CidrBlock != "" { + createOpts.Ipv6CidrBlock = add.Ipv6CidrBlock + } + + // Add new Acl entry + _, connErr := conn.CreateNetworkAclEntry(createOpts) + if connErr != nil { + return fmt.Errorf("Error creating %s entry: %s", entryType, connErr) + } + } + } + return nil +} + +func resourceAwsNetworkAclDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + log.Printf("[INFO] Deleting Network Acl: %s", d.Id()) + retryErr := resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteNetworkAcl(&ec2.DeleteNetworkAclInput{ + NetworkAclId: aws.String(d.Id()), + }) + if err != nil { + ec2err := err.(awserr.Error) + switch ec2err.Code() { + case "InvalidNetworkAclID.NotFound": + return nil + case "DependencyViolation": + // In case of dependency violation, we remove the association between subnet and network acl. + // This means the subnet is attached to default acl of vpc. + var associations []*ec2.NetworkAclAssociation + if v, ok := d.GetOk("subnet_id"); ok { + + a, err := findNetworkAclAssociation(v.(string), conn) + if err != nil { + return resource.NonRetryableError(err) + } + associations = append(associations, a) + } else if v, ok := d.GetOk("subnet_ids"); ok { + ids := v.(*schema.Set).List() + for _, i := range ids { + a, err := findNetworkAclAssociation(i.(string), conn) + if err != nil { + return resource.NonRetryableError(err) + } + associations = append(associations, a) + } + } + + log.Printf("[DEBUG] Replacing network associations for Network ACL (%s): %s", d.Id(), associations) + defaultAcl, err := getDefaultNetworkAcl(d.Get("vpc_id").(string), conn) + if err != nil { + return resource.NonRetryableError(err) + } + + for _, a := range associations { + log.Printf("DEBUG] Replacing Network Acl Association (%s) with Default Network ACL ID (%s)", *a.NetworkAclAssociationId, *defaultAcl.NetworkAclId) + _, replaceErr := conn.ReplaceNetworkAclAssociation(&ec2.ReplaceNetworkAclAssociationInput{ + AssociationId: a.NetworkAclAssociationId, + NetworkAclId: defaultAcl.NetworkAclId, + }) + if replaceErr != nil { + if replaceEc2err, ok := replaceErr.(awserr.Error); ok { + // It's possible that during an attempt to replace this + // association, the Subnet in question has already been moved to + // another ACL. This can happen if you're destroying a network acl + // and simultaneously re-associating it's subnet(s) with another + // ACL; Terraform may have already re-associated the subnet(s) by + // the time we attempt to destroy them, even between the time we + // list them and then try to destroy them. In this case, the + // association we're trying to replace will no longer exist and + // this call will fail. Here we trap that error and fail + // gracefully; the association we tried to replace gone, we trust + // someone else has taken ownership. + if replaceEc2err.Code() == "InvalidAssociationID.NotFound" { + log.Printf("[WARN] Network Association (%s) no longer found; Network Association likely updated or removed externally, removing from state", *a.NetworkAclAssociationId) + continue + } + } + log.Printf("[ERR] Non retry-able error in replacing associations for Network ACL (%s): %s", d.Id(), replaceErr) + return resource.NonRetryableError(replaceErr) + } + } + return resource.RetryableError(fmt.Errorf("Dependencies found and cleaned up, retrying")) + default: + // Any other error, we want to quit the retry loop immediately + return resource.NonRetryableError(err) + } + } + log.Printf("[Info] Deleted network ACL %s successfully", d.Id()) + return nil + }) + + if retryErr != nil { + return fmt.Errorf("[ERR] Error destroying Network ACL (%s): %s", d.Id(), retryErr) + } + return nil +} + +func resourceAwsNetworkAclEntryHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%d-", m["from_port"].(int))) + buf.WriteString(fmt.Sprintf("%d-", m["to_port"].(int))) + buf.WriteString(fmt.Sprintf("%d-", m["rule_no"].(int))) + buf.WriteString(fmt.Sprintf("%s-", m["action"].(string))) + + // The AWS network ACL API only speaks protocol numbers, and that's + // all we store. Never hash a protocol name. + protocol := m["protocol"].(string) + if _, err := strconv.Atoi(m["protocol"].(string)); err != nil { + // We're a protocol name. Look up the number. + buf.WriteString(fmt.Sprintf("%d-", protocolIntegers()[protocol])) + } else { + // We're a protocol number. Pass the value through. + buf.WriteString(fmt.Sprintf("%s-", protocol)) + } + + if v, ok := m["cidr_block"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["ipv6_cidr_block"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["ssl_certificate_id"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["icmp_type"]; ok { + buf.WriteString(fmt.Sprintf("%d-", v.(int))) + } + if v, ok := m["icmp_code"]; ok { + buf.WriteString(fmt.Sprintf("%d-", v.(int))) + } + + return hashcode.String(buf.String()) +} + +func getDefaultNetworkAcl(vpc_id string, conn *ec2.EC2) (defaultAcl *ec2.NetworkAcl, err error) { + resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("default"), + Values: []*string{aws.String("true")}, + }, + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(vpc_id)}, + }, + }, + }) + + if err != nil { + return nil, err + } + return resp.NetworkAcls[0], nil +} + +func findNetworkAclAssociation(subnetId string, conn *ec2.EC2) (networkAclAssociation *ec2.NetworkAclAssociation, err error) { + resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("association.subnet-id"), + Values: []*string{aws.String(subnetId)}, + }, + }, + }) + + if err != nil { + return nil, err + } + if resp.NetworkAcls != nil && len(resp.NetworkAcls) > 0 { + for _, association := range resp.NetworkAcls[0].Associations { + if *association.SubnetId == subnetId { + return association, nil + } + } + } + return nil, fmt.Errorf("could not find association for subnet: %s ", subnetId) +} + +// networkAclEntriesToMapList turns ingress/egress rules read from AWS into a list +// of maps. +func networkAclEntriesToMapList(networkAcls []*ec2.NetworkAclEntry) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(networkAcls)) + for _, entry := range networkAcls { + acl := make(map[string]interface{}) + acl["rule_no"] = *entry.RuleNumber + acl["action"] = *entry.RuleAction + if entry.CidrBlock != nil { + acl["cidr_block"] = *entry.CidrBlock + } + if entry.Ipv6CidrBlock != nil { + acl["ipv6_cidr_block"] = *entry.Ipv6CidrBlock + } + // The AWS network ACL API only speaks protocol numbers, and + // that's all we record. + if _, err := strconv.Atoi(*entry.Protocol); err != nil { + // We're a protocol name. Look up the number. + acl["protocol"] = protocolIntegers()[*entry.Protocol] + } else { + // We're a protocol number. Pass through. + acl["protocol"] = *entry.Protocol + } + + acl["protocol"] = *entry.Protocol + if entry.PortRange != nil { + acl["from_port"] = *entry.PortRange.From + acl["to_port"] = *entry.PortRange.To + } + + if entry.IcmpTypeCode != nil { + acl["icmp_type"] = *entry.IcmpTypeCode.Type + acl["icmp_code"] = *entry.IcmpTypeCode.Code + } + + result = append(result, acl) + } + + return result +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_acl_rule.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_acl_rule.go new file mode 100644 index 000000000..d3aa099fc --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_acl_rule.go @@ -0,0 +1,308 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsNetworkAclRule() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsNetworkAclRuleCreate, + Read: resourceAwsNetworkAclRuleRead, + Delete: resourceAwsNetworkAclRuleDelete, + + Schema: map[string]*schema.Schema{ + "network_acl_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "rule_number": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "egress": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + "protocol": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if old == "all" && new == "-1" || old == "-1" && new == "all" { + return true + } + return false + }, + }, + "rule_action": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "cidr_block": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "ipv6_cidr_block": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "from_port": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "to_port": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "icmp_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateICMPArgumentValue, + }, + "icmp_code": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateICMPArgumentValue, + }, + }, + } +} + +func resourceAwsNetworkAclRuleCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + protocol := d.Get("protocol").(string) + p, protocolErr := strconv.Atoi(protocol) + if protocolErr != nil { + var ok bool + p, ok = protocolIntegers()[protocol] + if !ok { + return fmt.Errorf("Invalid Protocol %s for rule %d", protocol, d.Get("rule_number").(int)) + } + } + log.Printf("[INFO] Transformed Protocol %s into %d", protocol, p) + + params := &ec2.CreateNetworkAclEntryInput{ + NetworkAclId: aws.String(d.Get("network_acl_id").(string)), + Egress: aws.Bool(d.Get("egress").(bool)), + RuleNumber: aws.Int64(int64(d.Get("rule_number").(int))), + Protocol: aws.String(strconv.Itoa(p)), + RuleAction: aws.String(d.Get("rule_action").(string)), + PortRange: &ec2.PortRange{ + From: aws.Int64(int64(d.Get("from_port").(int))), + To: aws.Int64(int64(d.Get("to_port").(int))), + }, + } + + cidr, hasCidr := d.GetOk("cidr_block") + ipv6Cidr, hasIpv6Cidr := d.GetOk("ipv6_cidr_block") + + if hasCidr == false && hasIpv6Cidr == false { + return fmt.Errorf("Either `cidr_block` or `ipv6_cidr_block` must be defined") + } + + if hasCidr { + params.CidrBlock = aws.String(cidr.(string)) + } + + if hasIpv6Cidr { + params.Ipv6CidrBlock = aws.String(ipv6Cidr.(string)) + } + + // Specify additional required fields for ICMP. For the list + // of ICMP codes and types, see: http://www.nthelp.com/icmp.html + if p == 1 { + params.IcmpTypeCode = &ec2.IcmpTypeCode{} + if v, ok := d.GetOk("icmp_type"); ok { + icmpType, err := strconv.Atoi(v.(string)) + if err != nil { + return fmt.Errorf("Unable to parse ICMP type %s for rule %d", v, d.Get("rule_number").(int)) + } + params.IcmpTypeCode.Type = aws.Int64(int64(icmpType)) + log.Printf("[DEBUG] Got ICMP type %d for rule %d", icmpType, d.Get("rule_number").(int)) + } + if v, ok := d.GetOk("icmp_code"); ok { + icmpCode, err := strconv.Atoi(v.(string)) + if err != nil { + return fmt.Errorf("Unable to parse ICMP code %s for rule %d", v, d.Get("rule_number").(int)) + } + params.IcmpTypeCode.Code = aws.Int64(int64(icmpCode)) + log.Printf("[DEBUG] Got ICMP code %d for rule %d", icmpCode, d.Get("rule_number").(int)) + } + } + + log.Printf("[INFO] Creating Network Acl Rule: %d (%t)", d.Get("rule_number").(int), d.Get("egress").(bool)) + _, err := conn.CreateNetworkAclEntry(params) + if err != nil { + return fmt.Errorf("Error Creating Network Acl Rule: %s", err.Error()) + } + d.SetId(networkAclIdRuleNumberEgressHash(d.Get("network_acl_id").(string), d.Get("rule_number").(int), d.Get("egress").(bool), d.Get("protocol").(string))) + + // It appears it might be a while until the newly created rule is visible via the + // API (see issue GH-4721). Retry the `findNetworkAclRule` function until it is + // visible (which in most cases is likely immediately). + err = resource.Retry(3*time.Minute, func() *resource.RetryError { + r, findErr := findNetworkAclRule(d, meta) + if findErr != nil { + return resource.RetryableError(findErr) + } + if r == nil { + err := fmt.Errorf("Network ACL rule (%s) not found", d.Id()) + return resource.RetryableError(err) + } + + return nil + }) + if err != nil { + return fmt.Errorf("Created Network ACL Rule was not visible in API within 3 minute period. Running 'terraform apply' again will resume infrastructure creation.") + } + + return resourceAwsNetworkAclRuleRead(d, meta) +} + +func resourceAwsNetworkAclRuleRead(d *schema.ResourceData, meta interface{}) error { + resp, err := findNetworkAclRule(d, meta) + if err != nil { + return err + } + if resp == nil { + log.Printf("[DEBUG] Network ACL rule (%s) not found", d.Id()) + d.SetId("") + return nil + } + + d.Set("rule_number", resp.RuleNumber) + d.Set("cidr_block", resp.CidrBlock) + d.Set("ipv6_cidr_block", resp.Ipv6CidrBlock) + d.Set("egress", resp.Egress) + if resp.IcmpTypeCode != nil { + d.Set("icmp_code", resp.IcmpTypeCode.Code) + d.Set("icmp_type", resp.IcmpTypeCode.Type) + } + if resp.PortRange != nil { + d.Set("from_port", resp.PortRange.From) + d.Set("to_port", resp.PortRange.To) + } + + d.Set("rule_action", resp.RuleAction) + + p, protocolErr := strconv.Atoi(*resp.Protocol) + log.Printf("[INFO] Converting the protocol %v", p) + if protocolErr == nil { + var ok bool + protocol, ok := protocolStrings(protocolIntegers())[p] + if !ok { + return fmt.Errorf("Invalid Protocol %s for rule %d", *resp.Protocol, d.Get("rule_number").(int)) + } + log.Printf("[INFO] Transformed Protocol %s back into %s", *resp.Protocol, protocol) + d.Set("protocol", protocol) + } + + return nil +} + +func resourceAwsNetworkAclRuleDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + params := &ec2.DeleteNetworkAclEntryInput{ + NetworkAclId: aws.String(d.Get("network_acl_id").(string)), + RuleNumber: aws.Int64(int64(d.Get("rule_number").(int))), + Egress: aws.Bool(d.Get("egress").(bool)), + } + + log.Printf("[INFO] Deleting Network Acl Rule: %s", d.Id()) + _, err := conn.DeleteNetworkAclEntry(params) + if err != nil { + return fmt.Errorf("Error Deleting Network Acl Rule: %s", err.Error()) + } + + return nil +} + +func findNetworkAclRule(d *schema.ResourceData, meta interface{}) (*ec2.NetworkAclEntry, error) { + conn := meta.(*AWSClient).ec2conn + + filters := make([]*ec2.Filter, 0, 2) + ruleNumberFilter := &ec2.Filter{ + Name: aws.String("entry.rule-number"), + Values: []*string{aws.String(fmt.Sprintf("%d", d.Get("rule_number").(int)))}, + } + filters = append(filters, ruleNumberFilter) + egressFilter := &ec2.Filter{ + Name: aws.String("entry.egress"), + Values: []*string{aws.String(fmt.Sprintf("%v", d.Get("egress").(bool)))}, + } + filters = append(filters, egressFilter) + params := &ec2.DescribeNetworkAclsInput{ + NetworkAclIds: []*string{aws.String(d.Get("network_acl_id").(string))}, + Filters: filters, + } + + log.Printf("[INFO] Describing Network Acl: %s", d.Get("network_acl_id").(string)) + log.Printf("[INFO] Describing Network Acl with the Filters %#v", params) + resp, err := conn.DescribeNetworkAcls(params) + if err != nil { + return nil, fmt.Errorf("Error Finding Network Acl Rule %d: %s", d.Get("rule_number").(int), err.Error()) + } + + if resp == nil || len(resp.NetworkAcls) == 0 || resp.NetworkAcls[0] == nil { + // Missing NACL rule. + return nil, nil + } + if len(resp.NetworkAcls) > 1 { + return nil, fmt.Errorf( + "Expected to find one Network ACL, got: %#v", + resp.NetworkAcls) + } + networkAcl := resp.NetworkAcls[0] + if networkAcl.Entries != nil { + for _, i := range networkAcl.Entries { + if *i.RuleNumber == int64(d.Get("rule_number").(int)) && *i.Egress == d.Get("egress").(bool) { + return i, nil + } + } + } + return nil, fmt.Errorf( + "Expected the Network ACL to have Entries, got: %#v", + networkAcl) + +} + +func networkAclIdRuleNumberEgressHash(networkAclId string, ruleNumber int, egress bool, protocol string) string { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("%s-", networkAclId)) + buf.WriteString(fmt.Sprintf("%d-", ruleNumber)) + buf.WriteString(fmt.Sprintf("%t-", egress)) + buf.WriteString(fmt.Sprintf("%s-", protocol)) + return fmt.Sprintf("nacl-%d", hashcode.String(buf.String())) +} + +func validateICMPArgumentValue(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + _, err := strconv.Atoi(value) + if len(value) == 0 || err != nil { + errors = append(errors, fmt.Errorf("%q must be an integer value: %q", k, value)) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_interface.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_interface.go new file mode 100644 index 000000000..857237141 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_interface.go @@ -0,0 +1,429 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsNetworkInterface() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsNetworkInterfaceCreate, + Read: resourceAwsNetworkInterfaceRead, + Update: resourceAwsNetworkInterfaceUpdate, + Delete: resourceAwsNetworkInterfaceDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + + "subnet_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "private_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "private_ips": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "private_ips_count": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "security_groups": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "source_dest_check": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "attachment": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "device_index": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + "attachment_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Set: resourceAwsEniAttachmentHash, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsNetworkInterfaceCreate(d *schema.ResourceData, meta interface{}) error { + + conn := meta.(*AWSClient).ec2conn + + request := &ec2.CreateNetworkInterfaceInput{ + SubnetId: aws.String(d.Get("subnet_id").(string)), + } + + security_groups := d.Get("security_groups").(*schema.Set).List() + if len(security_groups) != 0 { + request.Groups = expandStringList(security_groups) + } + + private_ips := d.Get("private_ips").(*schema.Set).List() + if len(private_ips) != 0 { + request.PrivateIpAddresses = expandPrivateIPAddresses(private_ips) + } + + if v, ok := d.GetOk("description"); ok { + request.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("private_ips_count"); ok { + request.SecondaryPrivateIpAddressCount = aws.Int64(int64(v.(int))) + } + + log.Printf("[DEBUG] Creating network interface") + resp, err := conn.CreateNetworkInterface(request) + if err != nil { + return fmt.Errorf("Error creating ENI: %s", err) + } + + d.SetId(*resp.NetworkInterface.NetworkInterfaceId) + log.Printf("[INFO] ENI ID: %s", d.Id()) + return resourceAwsNetworkInterfaceUpdate(d, meta) +} + +func resourceAwsNetworkInterfaceRead(d *schema.ResourceData, meta interface{}) error { + + conn := meta.(*AWSClient).ec2conn + describe_network_interfaces_request := &ec2.DescribeNetworkInterfacesInput{ + NetworkInterfaceIds: []*string{aws.String(d.Id())}, + } + describeResp, err := conn.DescribeNetworkInterfaces(describe_network_interfaces_request) + + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidNetworkInterfaceID.NotFound" { + // The ENI is gone now, so just remove it from the state + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving ENI: %s", err) + } + if len(describeResp.NetworkInterfaces) != 1 { + return fmt.Errorf("Unable to find ENI: %#v", describeResp.NetworkInterfaces) + } + + eni := describeResp.NetworkInterfaces[0] + d.Set("subnet_id", eni.SubnetId) + d.Set("private_ip", eni.PrivateIpAddress) + d.Set("private_ips", flattenNetworkInterfacesPrivateIPAddresses(eni.PrivateIpAddresses)) + d.Set("security_groups", flattenGroupIdentifiers(eni.Groups)) + d.Set("source_dest_check", eni.SourceDestCheck) + + if eni.Description != nil { + d.Set("description", eni.Description) + } + + // Tags + d.Set("tags", tagsToMap(eni.TagSet)) + + if eni.Attachment != nil { + attachment := []map[string]interface{}{flattenAttachment(eni.Attachment)} + d.Set("attachment", attachment) + } else { + d.Set("attachment", nil) + } + + return nil +} + +func networkInterfaceAttachmentRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + describe_network_interfaces_request := &ec2.DescribeNetworkInterfacesInput{ + NetworkInterfaceIds: []*string{aws.String(id)}, + } + describeResp, err := conn.DescribeNetworkInterfaces(describe_network_interfaces_request) + + if err != nil { + log.Printf("[ERROR] Could not find network interface %s. %s", id, err) + return nil, "", err + } + + eni := describeResp.NetworkInterfaces[0] + hasAttachment := strconv.FormatBool(eni.Attachment != nil) + log.Printf("[DEBUG] ENI %s has attachment state %s", id, hasAttachment) + return eni, hasAttachment, nil + } +} + +func resourceAwsNetworkInterfaceDetach(oa *schema.Set, meta interface{}, eniId string) error { + // if there was an old attachment, remove it + if oa != nil && len(oa.List()) > 0 { + old_attachment := oa.List()[0].(map[string]interface{}) + detach_request := &ec2.DetachNetworkInterfaceInput{ + AttachmentId: aws.String(old_attachment["attachment_id"].(string)), + Force: aws.Bool(true), + } + conn := meta.(*AWSClient).ec2conn + _, detach_err := conn.DetachNetworkInterface(detach_request) + if detach_err != nil { + if awsErr, _ := detach_err.(awserr.Error); awsErr.Code() != "InvalidAttachmentID.NotFound" { + return fmt.Errorf("Error detaching ENI: %s", detach_err) + } + } + + log.Printf("[DEBUG] Waiting for ENI (%s) to become dettached", eniId) + stateConf := &resource.StateChangeConf{ + Pending: []string{"true"}, + Target: []string{"false"}, + Refresh: networkInterfaceAttachmentRefreshFunc(conn, eniId), + Timeout: 10 * time.Minute, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for ENI (%s) to become dettached: %s", eniId, err) + } + } + + return nil +} + +func resourceAwsNetworkInterfaceUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + d.Partial(true) + + if d.HasChange("attachment") { + oa, na := d.GetChange("attachment") + + detach_err := resourceAwsNetworkInterfaceDetach(oa.(*schema.Set), meta, d.Id()) + if detach_err != nil { + return detach_err + } + + // if there is a new attachment, attach it + if na != nil && len(na.(*schema.Set).List()) > 0 { + new_attachment := na.(*schema.Set).List()[0].(map[string]interface{}) + di := new_attachment["device_index"].(int) + attach_request := &ec2.AttachNetworkInterfaceInput{ + DeviceIndex: aws.Int64(int64(di)), + InstanceId: aws.String(new_attachment["instance"].(string)), + NetworkInterfaceId: aws.String(d.Id()), + } + _, attach_err := conn.AttachNetworkInterface(attach_request) + if attach_err != nil { + return fmt.Errorf("Error attaching ENI: %s", attach_err) + } + } + + d.SetPartial("attachment") + } + + if d.HasChange("private_ips") { + o, n := d.GetChange("private_ips") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + // Unassign old IP addresses + unassignIps := os.Difference(ns) + if unassignIps.Len() != 0 { + input := &ec2.UnassignPrivateIpAddressesInput{ + NetworkInterfaceId: aws.String(d.Id()), + PrivateIpAddresses: expandStringList(unassignIps.List()), + } + _, err := conn.UnassignPrivateIpAddresses(input) + if err != nil { + return fmt.Errorf("Failure to unassign Private IPs: %s", err) + } + } + + // Assign new IP addresses + assignIps := ns.Difference(os) + if assignIps.Len() != 0 { + input := &ec2.AssignPrivateIpAddressesInput{ + NetworkInterfaceId: aws.String(d.Id()), + PrivateIpAddresses: expandStringList(assignIps.List()), + } + _, err := conn.AssignPrivateIpAddresses(input) + if err != nil { + return fmt.Errorf("Failure to assign Private IPs: %s", err) + } + } + + d.SetPartial("private_ips") + } + + request := &ec2.ModifyNetworkInterfaceAttributeInput{ + NetworkInterfaceId: aws.String(d.Id()), + SourceDestCheck: &ec2.AttributeBooleanValue{Value: aws.Bool(d.Get("source_dest_check").(bool))}, + } + + _, err := conn.ModifyNetworkInterfaceAttribute(request) + if err != nil { + return fmt.Errorf("Failure updating ENI: %s", err) + } + + d.SetPartial("source_dest_check") + + if d.HasChange("private_ips_count") { + o, n := d.GetChange("private_ips_count") + private_ips := d.Get("private_ips").(*schema.Set).List() + private_ips_filtered := private_ips[:0] + primary_ip := d.Get("private_ip") + + for _, ip := range private_ips { + if ip != primary_ip { + private_ips_filtered = append(private_ips_filtered, ip) + } + } + + if o != nil && o != 0 && n != nil && n != len(private_ips_filtered) { + + diff := n.(int) - o.(int) + + // Surplus of IPs, add the diff + if diff > 0 { + input := &ec2.AssignPrivateIpAddressesInput{ + NetworkInterfaceId: aws.String(d.Id()), + SecondaryPrivateIpAddressCount: aws.Int64(int64(diff)), + } + _, err := conn.AssignPrivateIpAddresses(input) + if err != nil { + return fmt.Errorf("Failure to assign Private IPs: %s", err) + } + } + + if diff < 0 { + input := &ec2.UnassignPrivateIpAddressesInput{ + NetworkInterfaceId: aws.String(d.Id()), + PrivateIpAddresses: expandStringList(private_ips_filtered[0:int(math.Abs(float64(diff)))]), + } + _, err := conn.UnassignPrivateIpAddresses(input) + if err != nil { + return fmt.Errorf("Failure to unassign Private IPs: %s", err) + } + } + + d.SetPartial("private_ips_count") + } + } + + if d.HasChange("security_groups") { + request := &ec2.ModifyNetworkInterfaceAttributeInput{ + NetworkInterfaceId: aws.String(d.Id()), + Groups: expandStringList(d.Get("security_groups").(*schema.Set).List()), + } + + _, err := conn.ModifyNetworkInterfaceAttribute(request) + if err != nil { + return fmt.Errorf("Failure updating ENI: %s", err) + } + + d.SetPartial("security_groups") + } + + if d.HasChange("description") { + request := &ec2.ModifyNetworkInterfaceAttributeInput{ + NetworkInterfaceId: aws.String(d.Id()), + Description: &ec2.AttributeValue{Value: aws.String(d.Get("description").(string))}, + } + + _, err := conn.ModifyNetworkInterfaceAttribute(request) + if err != nil { + return fmt.Errorf("Failure updating ENI: %s", err) + } + + d.SetPartial("description") + } + + if err := setTags(conn, d); err != nil { + return err + } else { + d.SetPartial("tags") + } + + d.Partial(false) + + return resourceAwsNetworkInterfaceRead(d, meta) +} + +func resourceAwsNetworkInterfaceDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + log.Printf("[INFO] Deleting ENI: %s", d.Id()) + + detach_err := resourceAwsNetworkInterfaceDetach(d.Get("attachment").(*schema.Set), meta, d.Id()) + if detach_err != nil { + return detach_err + } + + deleteEniOpts := ec2.DeleteNetworkInterfaceInput{ + NetworkInterfaceId: aws.String(d.Id()), + } + if _, err := conn.DeleteNetworkInterface(&deleteEniOpts); err != nil { + return fmt.Errorf("Error deleting ENI: %s", err) + } + + return nil +} + +func resourceAwsEniAttachmentHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["instance"].(string))) + buf.WriteString(fmt.Sprintf("%d-", m["device_index"].(int))) + return hashcode.String(buf.String()) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_interface_attachment.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_interface_attachment.go new file mode 100644 index 000000000..c37b0d18f --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_network_interface_attachment.go @@ -0,0 +1,166 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsNetworkInterfaceAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsNetworkInterfaceAttachmentCreate, + Read: resourceAwsNetworkInterfaceAttachmentRead, + Delete: resourceAwsNetworkInterfaceAttachmentDelete, + + Schema: map[string]*schema.Schema{ + "device_index": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "instance_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "network_interface_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "attachment_id": { + Type: schema.TypeString, + Computed: true, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsNetworkInterfaceAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + device_index := d.Get("device_index").(int) + instance_id := d.Get("instance_id").(string) + network_interface_id := d.Get("network_interface_id").(string) + + opts := &ec2.AttachNetworkInterfaceInput{ + DeviceIndex: aws.Int64(int64(device_index)), + InstanceId: aws.String(instance_id), + NetworkInterfaceId: aws.String(network_interface_id), + } + + log.Printf("[DEBUG] Attaching network interface (%s) to instance (%s)", network_interface_id, instance_id) + resp, err := conn.AttachNetworkInterface(opts) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + return fmt.Errorf("Error attaching network interface (%s) to instance (%s), message: \"%s\", code: \"%s\"", + network_interface_id, instance_id, awsErr.Message(), awsErr.Code()) + } + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"false"}, + Target: []string{"true"}, + Refresh: networkInterfaceAttachmentRefreshFunc(conn, network_interface_id), + Timeout: 5 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for Volume (%s) to attach to Instance: %s, error: %s", network_interface_id, instance_id, err) + } + + d.SetId(*resp.AttachmentId) + return resourceAwsNetworkInterfaceAttachmentRead(d, meta) +} + +func resourceAwsNetworkInterfaceAttachmentRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + interfaceId := d.Get("network_interface_id").(string) + + req := &ec2.DescribeNetworkInterfacesInput{ + NetworkInterfaceIds: []*string{aws.String(interfaceId)}, + } + + resp, err := conn.DescribeNetworkInterfaces(req) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidNetworkInterfaceID.NotFound" { + // The ENI is gone now, so just remove the attachment from the state + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving ENI: %s", err) + } + if len(resp.NetworkInterfaces) != 1 { + return fmt.Errorf("Unable to find ENI (%s): %#v", interfaceId, resp.NetworkInterfaces) + } + + eni := resp.NetworkInterfaces[0] + + if eni.Attachment == nil { + // Interface is no longer attached, remove from state + d.SetId("") + return nil + } + + d.Set("attachment_id", eni.Attachment.AttachmentId) + d.Set("device_index", eni.Attachment.DeviceIndex) + d.Set("instance_id", eni.Attachment.InstanceId) + d.Set("network_interface_id", eni.NetworkInterfaceId) + d.Set("status", eni.Attachment.Status) + + return nil +} + +func resourceAwsNetworkInterfaceAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + interfaceId := d.Get("network_interface_id").(string) + + detach_request := &ec2.DetachNetworkInterfaceInput{ + AttachmentId: aws.String(d.Id()), + Force: aws.Bool(true), + } + + _, detach_err := conn.DetachNetworkInterface(detach_request) + if detach_err != nil { + if awsErr, _ := detach_err.(awserr.Error); awsErr.Code() != "InvalidAttachmentID.NotFound" { + return fmt.Errorf("Error detaching ENI: %s", detach_err) + } + } + + log.Printf("[DEBUG] Waiting for ENI (%s) to become dettached", interfaceId) + stateConf := &resource.StateChangeConf{ + Pending: []string{"true"}, + Target: []string{"false"}, + Refresh: networkInterfaceAttachmentRefreshFunc(conn, interfaceId), + Timeout: 10 * time.Minute, + } + + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for ENI (%s) to become dettached: %s", interfaceId, err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_application.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_application.go new file mode 100644 index 000000000..7333018e5 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_application.go @@ -0,0 +1,633 @@ +package aws + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/opsworks" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsOpsworksApplication() *schema.Resource { + return &schema.Resource{ + + Create: resourceAwsOpsworksApplicationCreate, + Read: resourceAwsOpsworksApplicationRead, + Update: resourceAwsOpsworksApplicationUpdate, + Delete: resourceAwsOpsworksApplicationDelete, + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "short_name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + // aws-flow-ruby | java | rails | php | nodejs | static | other + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + expected := [7]string{"aws-flow-ruby", "java", "rails", "php", "nodejs", "static", "other"} + + found := false + for _, b := range expected { + if b == value { + found = true + } + } + if !found { + errors = append(errors, fmt.Errorf( + "%q has to be one of [aws-flow-ruby, java, rails, php, nodejs, static, other]", k)) + } + return + }, + }, + "stack_id": { + Type: schema.TypeString, + Required: true, + }, + // TODO: the following 4 vals are really part of the Attributes array. We should validate that only ones relevant to the chosen type are set, perhaps. (what is the default type? how do they map?) + "document_root": { + Type: schema.TypeString, + Optional: true, + //Default: "public", + }, + "rails_env": { + Type: schema.TypeString, + Optional: true, + //Default: "production", + }, + "auto_bundle_on_deploy": { + Type: schema.TypeString, + Optional: true, + //Default: true, + }, + "aws_flow_ruby_settings": { + Type: schema.TypeString, + Optional: true, + }, + "app_source": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + }, + + "url": { + Type: schema.TypeString, + Optional: true, + }, + + "username": { + Type: schema.TypeString, + Optional: true, + }, + + "password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, + + "revision": { + Type: schema.TypeString, + Optional: true, + }, + + "ssh_key": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + // AutoSelectOpsworksMysqlInstance, OpsworksMysqlInstance, or RdsDbInstance. + // anything beside auto select will lead into failure in case the instance doesn't exist + // XXX: validation? + "data_source_type": { + Type: schema.TypeString, + Optional: true, + }, + "data_source_database_name": { + Type: schema.TypeString, + Optional: true, + }, + "data_source_arn": { + Type: schema.TypeString, + Optional: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "domains": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "environment": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + "secure": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + }, + }, + }, + "enable_ssl": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "ssl_configuration": { + Type: schema.TypeList, + Optional: true, + //Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "certificate": { + Type: schema.TypeString, + Required: true, + StateFunc: func(v interface{}) string { + switch v.(type) { + case string: + return strings.TrimSpace(v.(string)) + default: + return "" + } + }, + }, + "private_key": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + StateFunc: func(v interface{}) string { + switch v.(type) { + case string: + return strings.TrimSpace(v.(string)) + default: + return "" + } + }, + }, + "chain": { + Type: schema.TypeString, + Optional: true, + StateFunc: func(v interface{}) string { + switch v.(type) { + case string: + return strings.TrimSpace(v.(string)) + default: + return "" + } + }, + }, + }, + }, + }, + }, + } +} + +func resourceAwsOpsworksApplicationValidate(d *schema.ResourceData) error { + appSourceCount := d.Get("app_source.#").(int) + if appSourceCount > 1 { + return fmt.Errorf("Only one app_source is permitted.") + } + + sslCount := d.Get("ssl_configuration.#").(int) + if sslCount > 1 { + return fmt.Errorf("Only one ssl_configuration is permitted.") + } + + if d.Get("type") == opsworks.AppTypeNodejs || d.Get("type") == opsworks.AppTypeJava { + // allowed attributes: none + if d.Get("document_root").(string) != "" || d.Get("rails_env").(string) != "" || d.Get("auto_bundle_on_deploy").(string) != "" || d.Get("aws_flow_ruby_settings").(string) != "" { + return fmt.Errorf("No additional attributes are allowed for app type '%s'.", d.Get("type").(string)) + } + } else if d.Get("type") == opsworks.AppTypeRails { + // allowed attributes: document_root, rails_env, auto_bundle_on_deploy + if d.Get("aws_flow_ruby_settings").(string) != "" { + return fmt.Errorf("Only 'document_root, rails_env, auto_bundle_on_deploy' are allowed for app type '%s'.", opsworks.AppTypeRails) + } + // rails_env is required + if _, ok := d.GetOk("rails_env"); !ok { + return fmt.Errorf("Set rails_env must be set if type is set to rails.") + } + } else if d.Get("type") == opsworks.AppTypePhp || d.Get("type") == opsworks.AppTypeStatic || d.Get("type") == opsworks.AppTypeOther { + log.Printf("[DEBUG] the app type is : %s", d.Get("type").(string)) + log.Printf("[DEBUG] the attributes are: document_root '%s', rails_env '%s', auto_bundle_on_deploy '%s', aws_flow_ruby_settings '%s'", d.Get("document_root").(string), d.Get("rails_env").(string), d.Get("auto_bundle_on_deploy").(string), d.Get("aws_flow_ruby_settings").(string)) + // allowed attributes: document_root + if d.Get("rails_env").(string) != "" || d.Get("auto_bundle_on_deploy").(string) != "" || d.Get("aws_flow_ruby_settings").(string) != "" { + return fmt.Errorf("Only 'document_root' is allowed for app type '%s'.", d.Get("type").(string)) + } + } else if d.Get("type") == opsworks.AppTypeAwsFlowRuby { + // allowed attributes: aws_flow_ruby_settings + if d.Get("document_root").(string) != "" || d.Get("rails_env").(string) != "" || d.Get("auto_bundle_on_deploy").(string) != "" { + return fmt.Errorf("Only 'aws_flow_ruby_settings' is allowed for app type '%s'.", d.Get("type").(string)) + } + } + + return nil +} + +func resourceAwsOpsworksApplicationRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + req := &opsworks.DescribeAppsInput{ + AppIds: []*string{ + aws.String(d.Id()), + }, + } + + log.Printf("[DEBUG] Reading OpsWorks app: %s", d.Id()) + + resp, err := client.DescribeApps(req) + if err != nil { + if awserr, ok := err.(awserr.Error); ok { + if awserr.Code() == "ResourceNotFoundException" { + log.Printf("[INFO] App not found: %s", d.Id()) + d.SetId("") + return nil + } + } + return err + } + + app := resp.Apps[0] + + d.Set("name", app.Name) + d.Set("stack_id", app.StackId) + d.Set("type", app.Type) + d.Set("description", app.Description) + d.Set("domains", flattenStringList(app.Domains)) + d.Set("enable_ssl", app.EnableSsl) + resourceAwsOpsworksSetApplicationSsl(d, app.SslConfiguration) + resourceAwsOpsworksSetApplicationSource(d, app.AppSource) + resourceAwsOpsworksSetApplicationDataSources(d, app.DataSources) + resourceAwsOpsworksSetApplicationEnvironmentVariable(d, app.Environment) + resourceAwsOpsworksSetApplicationAttributes(d, app.Attributes) + return nil +} + +func resourceAwsOpsworksApplicationCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + err := resourceAwsOpsworksApplicationValidate(d) + if err != nil { + return err + } + + req := &opsworks.CreateAppInput{ + Name: aws.String(d.Get("name").(string)), + Shortname: aws.String(d.Get("short_name").(string)), + StackId: aws.String(d.Get("stack_id").(string)), + Type: aws.String(d.Get("type").(string)), + Description: aws.String(d.Get("description").(string)), + Domains: expandStringList(d.Get("domains").([]interface{})), + EnableSsl: aws.Bool(d.Get("enable_ssl").(bool)), + SslConfiguration: resourceAwsOpsworksApplicationSsl(d), + AppSource: resourceAwsOpsworksApplicationSource(d), + DataSources: resourceAwsOpsworksApplicationDataSources(d), + Environment: resourceAwsOpsworksApplicationEnvironmentVariable(d), + Attributes: resourceAwsOpsworksApplicationAttributes(d), + } + + var resp *opsworks.CreateAppOutput + err = resource.Retry(2*time.Minute, func() *resource.RetryError { + var cerr error + resp, cerr = client.CreateApp(req) + if cerr != nil { + log.Printf("[INFO] client error") + if opserr, ok := cerr.(awserr.Error); ok { + // XXX: handle errors + log.Printf("[ERROR] OpsWorks error: %s message: %s", opserr.Code(), opserr.Message()) + return resource.RetryableError(cerr) + } + return resource.NonRetryableError(cerr) + } + return nil + }) + + if err != nil { + return err + } + + appID := *resp.AppId + d.SetId(appID) + d.Set("id", appID) + + return resourceAwsOpsworksApplicationRead(d, meta) +} + +func resourceAwsOpsworksApplicationUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + err := resourceAwsOpsworksApplicationValidate(d) + if err != nil { + return err + } + + req := &opsworks.UpdateAppInput{ + AppId: aws.String(d.Id()), + Name: aws.String(d.Get("name").(string)), + Type: aws.String(d.Get("type").(string)), + Description: aws.String(d.Get("description").(string)), + Domains: expandStringList(d.Get("domains").([]interface{})), + EnableSsl: aws.Bool(d.Get("enable_ssl").(bool)), + SslConfiguration: resourceAwsOpsworksApplicationSsl(d), + AppSource: resourceAwsOpsworksApplicationSource(d), + DataSources: resourceAwsOpsworksApplicationDataSources(d), + Environment: resourceAwsOpsworksApplicationEnvironmentVariable(d), + Attributes: resourceAwsOpsworksApplicationAttributes(d), + } + + log.Printf("[DEBUG] Updating OpsWorks layer: %s", d.Id()) + + err = resource.Retry(2*time.Minute, func() *resource.RetryError { + _, cerr := client.UpdateApp(req) + if cerr != nil { + log.Printf("[INFO] client error") + if opserr, ok := cerr.(awserr.Error); ok { + // XXX: handle errors + log.Printf("[ERROR] OpsWorks error: %s message: %s", opserr.Code(), opserr.Message()) + return resource.NonRetryableError(cerr) + } + return resource.RetryableError(cerr) + } + return nil + }) + + if err != nil { + return err + } + return resourceAwsOpsworksApplicationRead(d, meta) +} + +func resourceAwsOpsworksApplicationDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + req := &opsworks.DeleteAppInput{ + AppId: aws.String(d.Id()), + } + + log.Printf("[DEBUG] Deleting OpsWorks application: %s", d.Id()) + + _, err := client.DeleteApp(req) + return err +} + +func resourceAwsOpsworksSetApplicationEnvironmentVariable(d *schema.ResourceData, v []*opsworks.EnvironmentVariable) { + log.Printf("[DEBUG] envs: %s %d", v, len(v)) + if len(v) == 0 { + d.Set("environment", nil) + return + } + newValue := make([]*map[string]interface{}, len(v)) + + for i := 0; i < len(v); i++ { + config := v[i] + data := make(map[string]interface{}) + newValue[i] = &data + + if config.Key != nil { + data["key"] = *config.Key + } + if config.Value != nil { + data["value"] = *config.Value + } + if config.Secure != nil { + + if bool(*config.Secure) { + data["secure"] = &opsworksTrueString + } else { + data["secure"] = &opsworksFalseString + } + } + log.Printf("[DEBUG] v: %s", data) + } + + d.Set("environment", newValue) +} + +func resourceAwsOpsworksApplicationEnvironmentVariable(d *schema.ResourceData) []*opsworks.EnvironmentVariable { + environmentVariables := d.Get("environment").(*schema.Set).List() + result := make([]*opsworks.EnvironmentVariable, len(environmentVariables)) + + for i := 0; i < len(environmentVariables); i++ { + env := environmentVariables[i].(map[string]interface{}) + + result[i] = &opsworks.EnvironmentVariable{ + Key: aws.String(env["key"].(string)), + Value: aws.String(env["value"].(string)), + Secure: aws.Bool(env["secure"].(bool)), + } + } + return result +} + +func resourceAwsOpsworksApplicationSource(d *schema.ResourceData) *opsworks.Source { + count := d.Get("app_source.#").(int) + if count == 0 { + return nil + } + + return &opsworks.Source{ + Type: aws.String(d.Get("app_source.0.type").(string)), + Url: aws.String(d.Get("app_source.0.url").(string)), + Username: aws.String(d.Get("app_source.0.username").(string)), + Password: aws.String(d.Get("app_source.0.password").(string)), + Revision: aws.String(d.Get("app_source.0.revision").(string)), + SshKey: aws.String(d.Get("app_source.0.ssh_key").(string)), + } +} + +func resourceAwsOpsworksSetApplicationSource(d *schema.ResourceData, v *opsworks.Source) { + nv := make([]interface{}, 0, 1) + if v != nil { + m := make(map[string]interface{}) + if v.Type != nil { + m["type"] = *v.Type + } + if v.Url != nil { + m["url"] = *v.Url + } + if v.Username != nil { + m["username"] = *v.Username + } + if v.Password != nil { + m["password"] = *v.Password + } + if v.Revision != nil { + m["revision"] = *v.Revision + } + nv = append(nv, m) + } + + err := d.Set("app_source", nv) + if err != nil { + // should never happen + panic(err) + } +} + +func resourceAwsOpsworksApplicationDataSources(d *schema.ResourceData) []*opsworks.DataSource { + arn := d.Get("data_source_arn").(string) + databaseName := d.Get("data_source_database_name").(string) + databaseType := d.Get("data_source_type").(string) + + result := make([]*opsworks.DataSource, 1) + + if len(arn) > 0 || len(databaseName) > 0 || len(databaseType) > 0 { + result[0] = &opsworks.DataSource{ + Arn: aws.String(arn), + DatabaseName: aws.String(databaseName), + Type: aws.String(databaseType), + } + } + return result +} + +func resourceAwsOpsworksSetApplicationDataSources(d *schema.ResourceData, v []*opsworks.DataSource) { + d.Set("data_source_arn", nil) + d.Set("data_source_database_name", nil) + d.Set("data_source_type", nil) + + if len(v) == 0 { + return + } + + d.Set("data_source_arn", v[0].Arn) + d.Set("data_source_database_name", v[0].DatabaseName) + d.Set("data_source_type", v[0].Type) +} + +func resourceAwsOpsworksApplicationSsl(d *schema.ResourceData) *opsworks.SslConfiguration { + count := d.Get("ssl_configuration.#").(int) + if count == 0 { + return nil + } + + return &opsworks.SslConfiguration{ + PrivateKey: aws.String(d.Get("ssl_configuration.0.private_key").(string)), + Certificate: aws.String(d.Get("ssl_configuration.0.certificate").(string)), + Chain: aws.String(d.Get("ssl_configuration.0.chain").(string)), + } +} + +func resourceAwsOpsworksSetApplicationSsl(d *schema.ResourceData, v *opsworks.SslConfiguration) { + nv := make([]interface{}, 0, 1) + set := false + if v != nil { + m := make(map[string]interface{}) + if v.PrivateKey != nil { + m["private_key"] = *v.PrivateKey + set = true + } + if v.Certificate != nil { + m["certificate"] = *v.Certificate + set = true + } + if v.Chain != nil { + m["chain"] = *v.Chain + set = true + } + if set { + nv = append(nv, m) + } + } + + err := d.Set("ssl_configuration", nv) + if err != nil { + // should never happen + panic(err) + } +} + +func resourceAwsOpsworksApplicationAttributes(d *schema.ResourceData) map[string]*string { + attributes := make(map[string]*string) + + if val := d.Get("document_root").(string); len(val) > 0 { + attributes[opsworks.AppAttributesKeysDocumentRoot] = aws.String(val) + } + if val := d.Get("aws_flow_ruby_settings").(string); len(val) > 0 { + attributes[opsworks.AppAttributesKeysAwsFlowRubySettings] = aws.String(val) + } + if val := d.Get("rails_env").(string); len(val) > 0 { + attributes[opsworks.AppAttributesKeysRailsEnv] = aws.String(val) + } + if val := d.Get("auto_bundle_on_deploy").(string); len(val) > 0 { + if val == "1" { + val = "true" + } else if val == "0" { + val = "false" + } + attributes[opsworks.AppAttributesKeysAutoBundleOnDeploy] = aws.String(val) + } + + return attributes +} + +func resourceAwsOpsworksSetApplicationAttributes(d *schema.ResourceData, v map[string]*string) { + d.Set("document_root", nil) + d.Set("rails_env", nil) + d.Set("aws_flow_ruby_settings", nil) + d.Set("auto_bundle_on_deploy", nil) + + if d.Get("type") == opsworks.AppTypeNodejs || d.Get("type") == opsworks.AppTypeJava { + return + } else if d.Get("type") == opsworks.AppTypeRails { + if val, ok := v[opsworks.AppAttributesKeysDocumentRoot]; ok { + d.Set("document_root", val) + } + if val, ok := v[opsworks.AppAttributesKeysRailsEnv]; ok { + d.Set("rails_env", val) + } + if val, ok := v[opsworks.AppAttributesKeysAutoBundleOnDeploy]; ok { + d.Set("auto_bundle_on_deploy", val) + } + return + } else if d.Get("type") == opsworks.AppTypePhp || d.Get("type") == opsworks.AppTypeStatic || d.Get("type") == opsworks.AppTypeOther { + if val, ok := v[opsworks.AppAttributesKeysDocumentRoot]; ok { + d.Set("document_root", val) + } + return + } else if d.Get("type") == opsworks.AppTypeAwsFlowRuby { + if val, ok := v[opsworks.AppAttributesKeysAwsFlowRubySettings]; ok { + d.Set("aws_flow_ruby_settings", val) + } + return + } + + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_custom_layer.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_custom_layer.go new file mode 100644 index 000000000..59de60db6 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_custom_layer.go @@ -0,0 +1,17 @@ +package aws + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsOpsworksCustomLayer() *schema.Resource { + layerType := &opsworksLayerType{ + TypeName: "custom", + CustomShortName: true, + + // The "custom" layer type has no additional attributes + Attributes: map[string]*opsworksLayerTypeAttribute{}, + } + + return layerType.SchemaResource() +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_ganglia_layer.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_ganglia_layer.go new file mode 100644 index 000000000..1aadefe5d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_ganglia_layer.go @@ -0,0 +1,33 @@ +package aws + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsOpsworksGangliaLayer() *schema.Resource { + layerType := &opsworksLayerType{ + TypeName: "monitoring-master", + DefaultLayerName: "Ganglia", + + Attributes: map[string]*opsworksLayerTypeAttribute{ + "url": { + AttrName: "GangliaUrl", + Type: schema.TypeString, + Default: "/ganglia", + }, + "username": { + AttrName: "GangliaUser", + Type: schema.TypeString, + Default: "opsworks", + }, + "password": { + AttrName: "GangliaPassword", + Type: schema.TypeString, + Required: true, + WriteOnly: true, + }, + }, + } + + return layerType.SchemaResource() +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_haproxy_layer.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_haproxy_layer.go new file mode 100644 index 000000000..91e843257 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_haproxy_layer.go @@ -0,0 +1,48 @@ +package aws + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsOpsworksHaproxyLayer() *schema.Resource { + layerType := &opsworksLayerType{ + TypeName: "lb", + DefaultLayerName: "HAProxy", + + Attributes: map[string]*opsworksLayerTypeAttribute{ + "stats_enabled": { + AttrName: "EnableHaproxyStats", + Type: schema.TypeBool, + Default: true, + }, + "stats_url": { + AttrName: "HaproxyStatsUrl", + Type: schema.TypeString, + Default: "/haproxy?stats", + }, + "stats_user": { + AttrName: "HaproxyStatsUser", + Type: schema.TypeString, + Default: "opsworks", + }, + "stats_password": { + AttrName: "HaproxyStatsPassword", + Type: schema.TypeString, + WriteOnly: true, + Required: true, + }, + "healthcheck_url": { + AttrName: "HaproxyHealthCheckUrl", + Type: schema.TypeString, + Default: "/", + }, + "healthcheck_method": { + AttrName: "HaproxyHealthCheckMethod", + Type: schema.TypeString, + Default: "OPTIONS", + }, + }, + } + + return layerType.SchemaResource() +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_instance.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_instance.go new file mode 100644 index 000000000..ab7a7f471 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_instance.go @@ -0,0 +1,1058 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/opsworks" +) + +func resourceAwsOpsworksInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsOpsworksInstanceCreate, + Read: resourceAwsOpsworksInstanceRead, + Update: resourceAwsOpsworksInstanceUpdate, + Delete: resourceAwsOpsworksInstanceDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsOpsworksInstanceImport, + }, + + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + + "agent_version": { + Type: schema.TypeString, + Optional: true, + Default: "INHERIT", + }, + + "ami_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "architecture": { + Type: schema.TypeString, + Optional: true, + Default: "x86_64", + ValidateFunc: validateArchitecture, + }, + + "auto_scaling_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAutoScalingType, + }, + + "availability_zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "created_at": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "delete_ebs": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "delete_eip": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "ebs_optimized": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "ec2_instance_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "ecs_cluster_arn": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "elastic_ip": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "hostname": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "infrastructure_class": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "install_updates_on_boot": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "instance_profile_arn": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "instance_type": { + Type: schema.TypeString, + Optional: true, + }, + + "last_service_error_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "layer_ids": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "os": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "platform": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "private_dns": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "private_ip": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "public_dns": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "public_ip": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "registered_by": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "reported_agent_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "reported_os_family": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "reported_os_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "reported_os_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "root_device_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + ValidateFunc: validateRootDeviceType, + }, + + "root_device_volume_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "security_group_ids": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "ssh_host_dsa_key_fingerprint": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "ssh_host_rsa_key_fingerprint": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "ssh_key_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "stack_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "state": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateState, + }, + + "status": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "subnet_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "tenancy": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateTenancy, + }, + + "virtualization_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateVirtualizationType, + }, + + "ebs_block_device": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delete_on_termination": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "device_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "iops": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "snapshot_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "volume_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "volume_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["snapshot_id"].(string))) + return hashcode.String(buf.String()) + }, + }, + "ephemeral_block_device": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "device_name": { + Type: schema.TypeString, + Required: true, + }, + + "virtual_name": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["virtual_name"].(string))) + return hashcode.String(buf.String()) + }, + }, + + "root_block_device": { + // TODO: This is a set because we don't support singleton + // sub-resources today. We'll enforce that the set only ever has + // length zero or one below. When TF gains support for + // sub-resources this can be converted. + Type: schema.TypeSet, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + // "You can only modify the volume size, volume type, and Delete on + // Termination flag on the block device mapping entry for the root + // device volume." - bit.ly/ec2bdmap + Schema: map[string]*schema.Schema{ + "delete_on_termination": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "iops": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "volume_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "volume_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, + Set: func(v interface{}) int { + // there can be only one root device; no need to hash anything + return 0 + }, + }, + }, + } +} + +func validateArchitecture(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "x86_64" && value != "i386" { + errors = append(errors, fmt.Errorf( + "%q must be one of \"x86_64\" or \"i386\"", k)) + } + return +} + +func validateTenancy(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "dedicated" && value != "default" && value != "host" { + errors = append(errors, fmt.Errorf( + "%q must be one of \"dedicated\", \"default\" or \"host\"", k)) + } + return +} + +func validateAutoScalingType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "load" && value != "timer" { + errors = append(errors, fmt.Errorf( + "%q must be one of \"load\" or \"timer\"", k)) + } + return +} + +func validateRootDeviceType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "ebs" && value != "instance-store" { + errors = append(errors, fmt.Errorf( + "%q must be one of \"ebs\" or \"instance-store\"", k)) + } + return +} + +func validateState(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "running" && value != "stopped" { + errors = append(errors, fmt.Errorf( + "%q must be one of \"running\" or \"stopped\"", k)) + } + return +} + +func validateVirtualizationType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "paravirtual" && value != "hvm" { + errors = append(errors, fmt.Errorf( + "%q must be one of \"paravirtual\" or \"hvm\"", k)) + } + return +} + +func resourceAwsOpsworksInstanceValidate(d *schema.ResourceData) error { + if d.HasChange("ami_id") { + if v, ok := d.GetOk("os"); ok { + if v.(string) != "Custom" { + return fmt.Errorf("OS must be \"Custom\" when using using a custom ami_id") + } + } + + if _, ok := d.GetOk("root_block_device"); ok { + return fmt.Errorf("Cannot specify root_block_device when using a custom ami_id.") + } + + if _, ok := d.GetOk("ebs_block_device"); ok { + return fmt.Errorf("Cannot specify ebs_block_device when using a custom ami_id.") + } + + if _, ok := d.GetOk("ephemeral_block_device"); ok { + return fmt.Errorf("Cannot specify ephemeral_block_device when using a custom ami_id.") + } + } + return nil +} + +func resourceAwsOpsworksInstanceRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + req := &opsworks.DescribeInstancesInput{ + InstanceIds: []*string{ + aws.String(d.Id()), + }, + } + + log.Printf("[DEBUG] Reading OpsWorks instance: %s", d.Id()) + + resp, err := client.DescribeInstances(req) + if err != nil { + if awserr, ok := err.(awserr.Error); ok { + if awserr.Code() == "ResourceNotFoundException" { + d.SetId("") + return nil + } + } + return err + } + + // If nothing was found, then return no state + if len(resp.Instances) == 0 { + d.SetId("") + return nil + } + instance := resp.Instances[0] + + if instance.InstanceId == nil { + d.SetId("") + return nil + } + instanceId := *instance.InstanceId + + d.SetId(instanceId) + d.Set("agent_version", instance.AgentVersion) + d.Set("ami_id", instance.AmiId) + d.Set("architecture", instance.Architecture) + d.Set("auto_scaling_type", instance.AutoScalingType) + d.Set("availability_zone", instance.AvailabilityZone) + d.Set("created_at", instance.CreatedAt) + d.Set("ebs_optimized", instance.EbsOptimized) + d.Set("ec2_instance_id", instance.Ec2InstanceId) + d.Set("ecs_cluster_arn", instance.EcsClusterArn) + d.Set("elastic_ip", instance.ElasticIp) + d.Set("hostname", instance.Hostname) + d.Set("infrastructure_class", instance.InfrastructureClass) + d.Set("install_updates_on_boot", instance.InstallUpdatesOnBoot) + d.Set("id", instanceId) + d.Set("instance_profile_arn", instance.InstanceProfileArn) + d.Set("instance_type", instance.InstanceType) + d.Set("last_service_error_id", instance.LastServiceErrorId) + var layerIds []string + for _, v := range instance.LayerIds { + layerIds = append(layerIds, *v) + } + layerIds, err = sortListBasedonTFFile(layerIds, d, "layer_ids") + if err != nil { + return fmt.Errorf("[DEBUG] Error sorting layer_ids attribute: %#v", err) + } + if err := d.Set("layer_ids", layerIds); err != nil { + return fmt.Errorf("[DEBUG] Error setting layer_ids attribute: %#v, error: %#v", layerIds, err) + } + d.Set("os", instance.Os) + d.Set("platform", instance.Platform) + d.Set("private_dns", instance.PrivateDns) + d.Set("private_ip", instance.PrivateIp) + d.Set("public_dns", instance.PublicDns) + d.Set("public_ip", instance.PublicIp) + d.Set("registered_by", instance.RegisteredBy) + d.Set("reported_agent_version", instance.ReportedAgentVersion) + d.Set("reported_os_family", instance.ReportedOs.Family) + d.Set("reported_os_name", instance.ReportedOs.Name) + d.Set("reported_os_version", instance.ReportedOs.Version) + d.Set("root_device_type", instance.RootDeviceType) + d.Set("root_device_volume_id", instance.RootDeviceVolumeId) + d.Set("ssh_host_dsa_key_fingerprint", instance.SshHostDsaKeyFingerprint) + d.Set("ssh_host_rsa_key_fingerprint", instance.SshHostRsaKeyFingerprint) + d.Set("ssh_key_name", instance.SshKeyName) + d.Set("stack_id", instance.StackId) + d.Set("status", instance.Status) + d.Set("subnet_id", instance.SubnetId) + d.Set("tenancy", instance.Tenancy) + d.Set("virtualization_type", instance.VirtualizationType) + + // Read BlockDeviceMapping + ibds, err := readOpsworksBlockDevices(d, instance, meta) + if err != nil { + return err + } + + if err := d.Set("ebs_block_device", ibds["ebs"]); err != nil { + return err + } + if err := d.Set("ephemeral_block_device", ibds["ephemeral"]); err != nil { + return err + } + if ibds["root"] != nil { + if err := d.Set("root_block_device", []interface{}{ibds["root"]}); err != nil { + return err + } + } else { + d.Set("root_block_device", []interface{}{}) + } + + // Read Security Groups + sgs := make([]string, 0, len(instance.SecurityGroupIds)) + for _, sg := range instance.SecurityGroupIds { + sgs = append(sgs, *sg) + } + if err := d.Set("security_group_ids", sgs); err != nil { + return err + } + + return nil +} + +func resourceAwsOpsworksInstanceCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + err := resourceAwsOpsworksInstanceValidate(d) + if err != nil { + return err + } + + req := &opsworks.CreateInstanceInput{ + AgentVersion: aws.String(d.Get("agent_version").(string)), + Architecture: aws.String(d.Get("architecture").(string)), + EbsOptimized: aws.Bool(d.Get("ebs_optimized").(bool)), + InstallUpdatesOnBoot: aws.Bool(d.Get("install_updates_on_boot").(bool)), + InstanceType: aws.String(d.Get("instance_type").(string)), + LayerIds: expandStringList(d.Get("layer_ids").([]interface{})), + StackId: aws.String(d.Get("stack_id").(string)), + } + + if v, ok := d.GetOk("ami_id"); ok { + req.AmiId = aws.String(v.(string)) + req.Os = aws.String("Custom") + } + + if v, ok := d.GetOk("auto_scaling_type"); ok { + req.AutoScalingType = aws.String(v.(string)) + } + + if v, ok := d.GetOk("availability_zone"); ok { + req.AvailabilityZone = aws.String(v.(string)) + } + + if v, ok := d.GetOk("hostname"); ok { + req.Hostname = aws.String(v.(string)) + } + + if v, ok := d.GetOk("os"); ok { + req.Os = aws.String(v.(string)) + } + + if v, ok := d.GetOk("root_device_type"); ok { + req.RootDeviceType = aws.String(v.(string)) + } + + if v, ok := d.GetOk("ssh_key_name"); ok { + req.SshKeyName = aws.String(v.(string)) + } + + if v, ok := d.GetOk("subnet_id"); ok { + req.SubnetId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("tenancy"); ok { + req.Tenancy = aws.String(v.(string)) + } + + if v, ok := d.GetOk("virtualization_type"); ok { + req.VirtualizationType = aws.String(v.(string)) + } + + var blockDevices []*opsworks.BlockDeviceMapping + + if v, ok := d.GetOk("ebs_block_device"); ok { + vL := v.(*schema.Set).List() + for _, v := range vL { + bd := v.(map[string]interface{}) + ebs := &opsworks.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)), + } + + if v, ok := bd["snapshot_id"].(string); ok && v != "" { + ebs.SnapshotId = aws.String(v) + } + + if v, ok := bd["volume_size"].(int); ok && v != 0 { + ebs.VolumeSize = aws.Int64(int64(v)) + } + + if v, ok := bd["volume_type"].(string); ok && v != "" { + ebs.VolumeType = aws.String(v) + } + + if v, ok := bd["iops"].(int); ok && v > 0 { + ebs.Iops = aws.Int64(int64(v)) + } + + blockDevices = append(blockDevices, &opsworks.BlockDeviceMapping{ + DeviceName: aws.String(bd["device_name"].(string)), + Ebs: ebs, + }) + } + } + + if v, ok := d.GetOk("ephemeral_block_device"); ok { + vL := v.(*schema.Set).List() + for _, v := range vL { + bd := v.(map[string]interface{}) + blockDevices = append(blockDevices, &opsworks.BlockDeviceMapping{ + DeviceName: aws.String(bd["device_name"].(string)), + VirtualName: aws.String(bd["virtual_name"].(string)), + }) + } + } + + if v, ok := d.GetOk("root_block_device"); ok { + vL := v.(*schema.Set).List() + if len(vL) > 1 { + return fmt.Errorf("Cannot specify more than one root_block_device.") + } + for _, v := range vL { + bd := v.(map[string]interface{}) + ebs := &opsworks.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)), + } + + if v, ok := bd["volume_size"].(int); ok && v != 0 { + ebs.VolumeSize = aws.Int64(int64(v)) + } + + if v, ok := bd["volume_type"].(string); ok && v != "" { + ebs.VolumeType = aws.String(v) + } + + if v, ok := bd["iops"].(int); ok && v > 0 { + ebs.Iops = aws.Int64(int64(v)) + } + + blockDevices = append(blockDevices, &opsworks.BlockDeviceMapping{ + DeviceName: aws.String("ROOT_DEVICE"), + Ebs: ebs, + }) + } + } + + if len(blockDevices) > 0 { + req.BlockDeviceMappings = blockDevices + } + + log.Printf("[DEBUG] Creating OpsWorks instance") + + var resp *opsworks.CreateInstanceOutput + + resp, err = client.CreateInstance(req) + if err != nil { + return err + } + + if resp.InstanceId == nil { + return fmt.Errorf("Error launching instance: no instance returned in response") + } + + instanceId := *resp.InstanceId + d.SetId(instanceId) + d.Set("id", instanceId) + + if v, ok := d.GetOk("state"); ok && v.(string) == "running" { + err := startOpsworksInstance(d, meta, true) + if err != nil { + return err + } + } + + return resourceAwsOpsworksInstanceRead(d, meta) +} + +func resourceAwsOpsworksInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + err := resourceAwsOpsworksInstanceValidate(d) + if err != nil { + return err + } + + req := &opsworks.UpdateInstanceInput{ + AgentVersion: aws.String(d.Get("agent_version").(string)), + Architecture: aws.String(d.Get("architecture").(string)), + InstanceId: aws.String(d.Get("id").(string)), + InstallUpdatesOnBoot: aws.Bool(d.Get("install_updates_on_boot").(bool)), + } + + if v, ok := d.GetOk("ami_id"); ok { + req.AmiId = aws.String(v.(string)) + req.Os = aws.String("Custom") + } + + if v, ok := d.GetOk("auto_scaling_type"); ok { + req.AutoScalingType = aws.String(v.(string)) + } + + if v, ok := d.GetOk("hostname"); ok { + req.Hostname = aws.String(v.(string)) + } + + if v, ok := d.GetOk("instance_type"); ok { + req.InstanceType = aws.String(v.(string)) + } + + if v, ok := d.GetOk("layer_ids"); ok { + req.LayerIds = expandStringList(v.([]interface{})) + } + + if v, ok := d.GetOk("os"); ok { + req.Os = aws.String(v.(string)) + } + + if v, ok := d.GetOk("ssh_key_name"); ok { + req.SshKeyName = aws.String(v.(string)) + } + + log.Printf("[DEBUG] Updating OpsWorks instance: %s", d.Id()) + + _, err = client.UpdateInstance(req) + if err != nil { + return err + } + + var status string + + if v, ok := d.GetOk("status"); ok { + status = v.(string) + } else { + status = "stopped" + } + + if v, ok := d.GetOk("state"); ok { + state := v.(string) + if state == "running" { + if status == "stopped" || status == "stopping" || status == "shutting_down" { + err := startOpsworksInstance(d, meta, false) + if err != nil { + return err + } + } + } else { + if status != "stopped" && status != "stopping" && status != "shutting_down" { + err := stopOpsworksInstance(d, meta, true) + if err != nil { + return err + } + } + } + } + + return resourceAwsOpsworksInstanceRead(d, meta) +} + +func resourceAwsOpsworksInstanceDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + if v, ok := d.GetOk("status"); ok && v.(string) != "stopped" { + err := stopOpsworksInstance(d, meta, true) + if err != nil { + return err + } + } + + req := &opsworks.DeleteInstanceInput{ + InstanceId: aws.String(d.Id()), + DeleteElasticIp: aws.Bool(d.Get("delete_eip").(bool)), + DeleteVolumes: aws.Bool(d.Get("delete_ebs").(bool)), + } + + log.Printf("[DEBUG] Deleting OpsWorks instance: %s", d.Id()) + + _, err := client.DeleteInstance(req) + if err != nil { + return err + } + + d.SetId("") + return nil +} + +func resourceAwsOpsworksInstanceImport( + d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + // Neither delete_eip nor delete_ebs can be fetched + // from any API call, so we need to default to the values + // we set in the schema by default + d.Set("delete_ebs", true) + d.Set("delete_eip", true) + return []*schema.ResourceData{d}, nil +} + +func startOpsworksInstance(d *schema.ResourceData, meta interface{}, wait bool) error { + client := meta.(*AWSClient).opsworksconn + + instanceId := d.Get("id").(string) + + req := &opsworks.StartInstanceInput{ + InstanceId: aws.String(instanceId), + } + + log.Printf("[DEBUG] Starting OpsWorks instance: %s", instanceId) + + _, err := client.StartInstance(req) + + if err != nil { + return err + } + + if wait { + log.Printf("[DEBUG] Waiting for instance (%s) to become running", instanceId) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"requested", "pending", "booting", "running_setup"}, + Target: []string{"online"}, + Refresh: OpsworksInstanceStateRefreshFunc(client, instanceId), + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for instance (%s) to become stopped: %s", + instanceId, err) + } + } + + return nil +} + +func stopOpsworksInstance(d *schema.ResourceData, meta interface{}, wait bool) error { + client := meta.(*AWSClient).opsworksconn + + instanceId := d.Get("id").(string) + + req := &opsworks.StopInstanceInput{ + InstanceId: aws.String(instanceId), + } + + log.Printf("[DEBUG] Stopping OpsWorks instance: %s", instanceId) + + _, err := client.StopInstance(req) + + if err != nil { + return err + } + + if wait { + log.Printf("[DEBUG] Waiting for instance (%s) to become stopped", instanceId) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"stopping", "terminating", "shutting_down", "terminated"}, + Target: []string{"stopped"}, + Refresh: OpsworksInstanceStateRefreshFunc(client, instanceId), + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for instance (%s) to become stopped: %s", + instanceId, err) + } + } + + return nil +} + +func readOpsworksBlockDevices(d *schema.ResourceData, instance *opsworks.Instance, meta interface{}) ( + map[string]interface{}, error) { + + blockDevices := make(map[string]interface{}) + blockDevices["ebs"] = make([]map[string]interface{}, 0) + blockDevices["ephemeral"] = make([]map[string]interface{}, 0) + blockDevices["root"] = nil + + if len(instance.BlockDeviceMappings) == 0 { + return nil, nil + } + + for _, bdm := range instance.BlockDeviceMappings { + bd := make(map[string]interface{}) + if bdm.Ebs != nil && bdm.Ebs.DeleteOnTermination != nil { + bd["delete_on_termination"] = *bdm.Ebs.DeleteOnTermination + } + if bdm.Ebs != nil && bdm.Ebs.VolumeSize != nil { + bd["volume_size"] = *bdm.Ebs.VolumeSize + } + if bdm.Ebs != nil && bdm.Ebs.VolumeType != nil { + bd["volume_type"] = *bdm.Ebs.VolumeType + } + if bdm.Ebs != nil && bdm.Ebs.Iops != nil { + bd["iops"] = *bdm.Ebs.Iops + } + if bdm.DeviceName != nil && *bdm.DeviceName == "ROOT_DEVICE" { + blockDevices["root"] = bd + } else { + if bdm.DeviceName != nil { + bd["device_name"] = *bdm.DeviceName + } + if bdm.VirtualName != nil { + bd["virtual_name"] = *bdm.VirtualName + blockDevices["ephemeral"] = append(blockDevices["ephemeral"].([]map[string]interface{}), bd) + } else { + if bdm.Ebs != nil && bdm.Ebs.SnapshotId != nil { + bd["snapshot_id"] = *bdm.Ebs.SnapshotId + } + blockDevices["ebs"] = append(blockDevices["ebs"].([]map[string]interface{}), bd) + } + } + } + return blockDevices, nil +} + +func OpsworksInstanceStateRefreshFunc(conn *opsworks.OpsWorks, instanceID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeInstances(&opsworks.DescribeInstancesInput{ + InstanceIds: []*string{aws.String(instanceID)}, + }) + if err != nil { + if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" { + // Set this to nil as if we didn't find anything. + resp = nil + } else { + log.Printf("Error on OpsworksInstanceStateRefresh: %s", err) + return nil, "", err + } + } + + if resp == nil || len(resp.Instances) == 0 { + // Sometimes AWS just has consistency issues and doesn't see + // our instance yet. Return an empty state. + return nil, "", nil + } + + i := resp.Instances[0] + return i, *i.Status, nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_java_app_layer.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_java_app_layer.go new file mode 100644 index 000000000..14679658f --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_java_app_layer.go @@ -0,0 +1,42 @@ +package aws + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsOpsworksJavaAppLayer() *schema.Resource { + layerType := &opsworksLayerType{ + TypeName: "java-app", + DefaultLayerName: "Java App Server", + + Attributes: map[string]*opsworksLayerTypeAttribute{ + "jvm_type": { + AttrName: "Jvm", + Type: schema.TypeString, + Default: "openjdk", + }, + "jvm_version": { + AttrName: "JvmVersion", + Type: schema.TypeString, + Default: "7", + }, + "jvm_options": { + AttrName: "JvmOptions", + Type: schema.TypeString, + Default: "", + }, + "app_server": { + AttrName: "JavaAppServer", + Type: schema.TypeString, + Default: "tomcat", + }, + "app_server_version": { + AttrName: "JavaAppServerVersion", + Type: schema.TypeString, + Default: "7", + }, + }, + } + + return layerType.SchemaResource() +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_memcached_layer.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_memcached_layer.go new file mode 100644 index 000000000..301d73924 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_memcached_layer.go @@ -0,0 +1,22 @@ +package aws + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsOpsworksMemcachedLayer() *schema.Resource { + layerType := &opsworksLayerType{ + TypeName: "memcached", + DefaultLayerName: "Memcached", + + Attributes: map[string]*opsworksLayerTypeAttribute{ + "allocated_memory": { + AttrName: "MemcachedMemory", + Type: schema.TypeInt, + Default: 512, + }, + }, + } + + return layerType.SchemaResource() +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_mysql_layer.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_mysql_layer.go new file mode 100644 index 000000000..560641a4e --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_mysql_layer.go @@ -0,0 +1,27 @@ +package aws + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsOpsworksMysqlLayer() *schema.Resource { + layerType := &opsworksLayerType{ + TypeName: "db-master", + DefaultLayerName: "MySQL", + + Attributes: map[string]*opsworksLayerTypeAttribute{ + "root_password": { + AttrName: "MysqlRootPassword", + Type: schema.TypeString, + WriteOnly: true, + }, + "root_password_on_all_instances": { + AttrName: "MysqlRootPasswordUbiquitous", + Type: schema.TypeBool, + Default: true, + }, + }, + } + + return layerType.SchemaResource() +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_nodejs_app_layer.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_nodejs_app_layer.go new file mode 100644 index 000000000..d11261b63 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_nodejs_app_layer.go @@ -0,0 +1,22 @@ +package aws + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsOpsworksNodejsAppLayer() *schema.Resource { + layerType := &opsworksLayerType{ + TypeName: "nodejs-app", + DefaultLayerName: "Node.js App Server", + + Attributes: map[string]*opsworksLayerTypeAttribute{ + "nodejs_version": { + AttrName: "NodejsVersion", + Type: schema.TypeString, + Default: "0.10.38", + }, + }, + } + + return layerType.SchemaResource() +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_permission.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_permission.go new file mode 100644 index 000000000..6e4d5f2d1 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_permission.go @@ -0,0 +1,156 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/opsworks" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsOpsworksPermission() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsOpsworksSetPermission, + Update: resourceAwsOpsworksSetPermission, + Delete: resourceAwsOpsworksPermissionDelete, + Read: resourceAwsOpsworksPermissionRead, + + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "allow_ssh": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + }, + "allow_sudo": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + }, + "user_arn": { + Type: schema.TypeString, + Required: true, + }, + // one of deny, show, deploy, manage, iam_only + "level": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + expected := [5]string{"deny", "show", "deploy", "manage", "iam_only"} + + found := false + for _, b := range expected { + if b == value { + found = true + } + } + if !found { + errors = append(errors, fmt.Errorf( + "%q has to be one of [deny, show, deploy, manage, iam_only]", k)) + } + return + }, + }, + "stack_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + } +} + +func resourceAwsOpsworksPermissionDelete(d *schema.ResourceData, meta interface{}) error { + return nil +} + +func resourceAwsOpsworksPermissionRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + req := &opsworks.DescribePermissionsInput{ + IamUserArn: aws.String(d.Get("user_arn").(string)), + StackId: aws.String(d.Get("stack_id").(string)), + } + + log.Printf("[DEBUG] Reading OpsWorks prermissions for: %s on stack: %s", d.Get("user_arn"), d.Get("stack_id")) + + resp, err := client.DescribePermissions(req) + if err != nil { + if awserr, ok := err.(awserr.Error); ok { + if awserr.Code() == "ResourceNotFoundException" { + log.Printf("[INFO] Permission not found") + d.SetId("") + return nil + } + } + return err + } + + found := false + id := "" + for _, permission := range resp.Permissions { + id = *permission.IamUserArn + *permission.StackId + + if d.Get("user_arn").(string)+d.Get("stack_id").(string) == id { + found = true + d.SetId(id) + d.Set("id", id) + d.Set("allow_ssh", permission.AllowSsh) + d.Set("allow_sudo", permission.AllowSudo) + d.Set("user_arn", permission.IamUserArn) + d.Set("stack_id", permission.StackId) + d.Set("level", permission.Level) + } + + } + + if false == found { + d.SetId("") + log.Printf("[INFO] The correct permission could not be found for: %s on stack: %s", d.Get("user_arn"), d.Get("stack_id")) + } + + return nil +} + +func resourceAwsOpsworksSetPermission(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + req := &opsworks.SetPermissionInput{ + AllowSudo: aws.Bool(d.Get("allow_sudo").(bool)), + AllowSsh: aws.Bool(d.Get("allow_ssh").(bool)), + Level: aws.String(d.Get("level").(string)), + IamUserArn: aws.String(d.Get("user_arn").(string)), + StackId: aws.String(d.Get("stack_id").(string)), + } + + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + var cerr error + _, cerr = client.SetPermission(req) + if cerr != nil { + log.Printf("[INFO] client error") + if opserr, ok := cerr.(awserr.Error); ok { + // XXX: handle errors + log.Printf("[ERROR] OpsWorks error: %s message: %s", opserr.Code(), opserr.Message()) + return resource.RetryableError(cerr) + } + return resource.NonRetryableError(cerr) + } + return nil + }) + + if err != nil { + return err + } + + return resourceAwsOpsworksPermissionRead(d, meta) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_php_app_layer.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_php_app_layer.go new file mode 100644 index 000000000..c3176af5b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_php_app_layer.go @@ -0,0 +1,16 @@ +package aws + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsOpsworksPhpAppLayer() *schema.Resource { + layerType := &opsworksLayerType{ + TypeName: "php-app", + DefaultLayerName: "PHP App Server", + + Attributes: map[string]*opsworksLayerTypeAttribute{}, + } + + return layerType.SchemaResource() +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_rails_app_layer.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_rails_app_layer.go new file mode 100644 index 000000000..55f869c6d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_rails_app_layer.go @@ -0,0 +1,47 @@ +package aws + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsOpsworksRailsAppLayer() *schema.Resource { + layerType := &opsworksLayerType{ + TypeName: "rails-app", + DefaultLayerName: "Rails App Server", + + Attributes: map[string]*opsworksLayerTypeAttribute{ + "ruby_version": { + AttrName: "RubyVersion", + Type: schema.TypeString, + Default: "2.0.0", + }, + "app_server": { + AttrName: "RailsStack", + Type: schema.TypeString, + Default: "apache_passenger", + }, + "passenger_version": { + AttrName: "PassengerVersion", + Type: schema.TypeString, + Default: "4.0.46", + }, + "rubygems_version": { + AttrName: "RubygemsVersion", + Type: schema.TypeString, + Default: "2.2.2", + }, + "manage_bundler": { + AttrName: "ManageBundler", + Type: schema.TypeBool, + Default: true, + }, + "bundler_version": { + AttrName: "BundlerVersion", + Type: schema.TypeString, + Default: "1.5.3", + }, + }, + } + + return layerType.SchemaResource() +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_rds_db_instance.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_rds_db_instance.go new file mode 100644 index 000000000..d1aee9030 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_rds_db_instance.go @@ -0,0 +1,202 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/opsworks" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsOpsworksRdsDbInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsOpsworksRdsDbInstanceRegister, + Update: resourceAwsOpsworksRdsDbInstanceUpdate, + Delete: resourceAwsOpsworksRdsDbInstanceDeregister, + Read: resourceAwsOpsworksRdsDbInstanceRead, + + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "stack_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "rds_db_instance_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "db_password": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + }, + "db_user": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceAwsOpsworksRdsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + d.Partial(true) + + d.SetPartial("rds_db_instance_arn") + req := &opsworks.UpdateRdsDbInstanceInput{ + RdsDbInstanceArn: aws.String(d.Get("rds_db_instance_arn").(string)), + } + + requestUpdate := false + if d.HasChange("db_user") { + d.SetPartial("db_user") + req.DbUser = aws.String(d.Get("db_user").(string)) + requestUpdate = true + } + if d.HasChange("db_password") { + d.SetPartial("db_password") + req.DbPassword = aws.String(d.Get("db_password").(string)) + requestUpdate = true + } + + if true == requestUpdate { + log.Printf("[DEBUG] Opsworks RDS DB Instance Modification request: %s", req) + + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + var cerr error + _, cerr = client.UpdateRdsDbInstance(req) + if cerr != nil { + log.Printf("[INFO] client error") + if opserr, ok := cerr.(awserr.Error); ok { + log.Printf("[ERROR] OpsWorks error: %s message: %s", opserr.Code(), opserr.Message()) + } + return resource.NonRetryableError(cerr) + } + return nil + }) + + if err != nil { + return err + } + + } + + d.Partial(false) + + return resourceAwsOpsworksRdsDbInstanceRead(d, meta) +} + +func resourceAwsOpsworksRdsDbInstanceDeregister(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + req := &opsworks.DeregisterRdsDbInstanceInput{ + RdsDbInstanceArn: aws.String(d.Get("rds_db_instance_arn").(string)), + } + + log.Printf("[DEBUG] Unregistering rds db instance '%s' from stack: %s", d.Get("rds_db_instance_arn"), d.Get("stack_id")) + + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + var cerr error + _, cerr = client.DeregisterRdsDbInstance(req) + if cerr != nil { + log.Printf("[INFO] client error") + if opserr, ok := cerr.(awserr.Error); ok { + if opserr.Code() == "ResourceNotFoundException" { + log.Printf("[INFO] The db instance could not be found. Remove it from state.") + d.SetId("") + + return nil + } + log.Printf("[ERROR] OpsWorks error: %s message: %s", opserr.Code(), opserr.Message()) + } + return resource.NonRetryableError(cerr) + } + + return nil + }) + + if err != nil { + return err + } + + return nil +} + +func resourceAwsOpsworksRdsDbInstanceRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + req := &opsworks.DescribeRdsDbInstancesInput{ + StackId: aws.String(d.Get("stack_id").(string)), + } + + log.Printf("[DEBUG] Reading OpsWorks registerd rds db instances for stack: %s", d.Get("stack_id")) + + resp, err := client.DescribeRdsDbInstances(req) + if err != nil { + return err + } + + found := false + id := "" + for _, instance := range resp.RdsDbInstances { + id = fmt.Sprintf("%s%s", *instance.RdsDbInstanceArn, *instance.StackId) + + if fmt.Sprintf("%s%s", d.Get("rds_db_instance_arn").(string), d.Get("stack_id").(string)) == id { + found = true + d.SetId(id) + d.Set("id", id) + d.Set("stack_id", instance.StackId) + d.Set("rds_db_instance_arn", instance.RdsDbInstanceArn) + d.Set("db_user", instance.DbUser) + } + + } + + if false == found { + d.SetId("") + log.Printf("[INFO] The rds instance '%s' could not be found for stack: '%s'", d.Get("rds_db_instance_arn"), d.Get("stack_id")) + } + + return nil +} + +func resourceAwsOpsworksRdsDbInstanceRegister(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + req := &opsworks.RegisterRdsDbInstanceInput{ + StackId: aws.String(d.Get("stack_id").(string)), + RdsDbInstanceArn: aws.String(d.Get("rds_db_instance_arn").(string)), + DbUser: aws.String(d.Get("db_user").(string)), + DbPassword: aws.String(d.Get("db_password").(string)), + } + + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + var cerr error + _, cerr = client.RegisterRdsDbInstance(req) + if cerr != nil { + log.Printf("[INFO] client error") + if opserr, ok := cerr.(awserr.Error); ok { + log.Printf("[ERROR] OpsWorks error: %s message: %s", opserr.Code(), opserr.Message()) + } + return resource.NonRetryableError(cerr) + } + + return nil + }) + + if err != nil { + return err + } + + return resourceAwsOpsworksRdsDbInstanceRead(d, meta) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_stack.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_stack.go new file mode 100644 index 000000000..496670506 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_stack.go @@ -0,0 +1,591 @@ +package aws + +import ( + "fmt" + "log" + "os" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/opsworks" +) + +func resourceAwsOpsworksStack() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsOpsworksStackCreate, + Read: resourceAwsOpsworksStackRead, + Update: resourceAwsOpsworksStackUpdate, + Delete: resourceAwsOpsworksStackDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "agent_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "id": { + Type: schema.TypeString, + Computed: true, + }, + + "name": { + Type: schema.TypeString, + Required: true, + }, + + "region": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + + "service_role_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "default_instance_profile_arn": { + Type: schema.TypeString, + Required: true, + }, + + "color": { + Type: schema.TypeString, + Optional: true, + }, + + "configuration_manager_name": { + Type: schema.TypeString, + Optional: true, + Default: "Chef", + }, + + "configuration_manager_version": { + Type: schema.TypeString, + Optional: true, + Default: "11.10", + }, + + "manage_berkshelf": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "berkshelf_version": { + Type: schema.TypeString, + Optional: true, + Default: "3.2.0", + }, + + "custom_cookbooks_source": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + }, + + "url": { + Type: schema.TypeString, + Required: true, + }, + + "username": { + Type: schema.TypeString, + Optional: true, + }, + + "password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, + + "revision": { + Type: schema.TypeString, + Optional: true, + }, + + "ssh_key": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "custom_json": { + Type: schema.TypeString, + Optional: true, + }, + + "default_availability_zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "default_os": { + Type: schema.TypeString, + Optional: true, + Default: "Ubuntu 12.04 LTS", + }, + + "default_root_device_type": { + Type: schema.TypeString, + Optional: true, + Default: "instance-store", + }, + + "default_ssh_key_name": { + Type: schema.TypeString, + Optional: true, + }, + + "default_subnet_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "hostname_theme": { + Type: schema.TypeString, + Optional: true, + Default: "Layer_Dependent", + }, + + "use_custom_cookbooks": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "use_opsworks_security_groups": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "vpc_id": { + Type: schema.TypeString, + ForceNew: true, + Computed: true, + Optional: true, + }, + + "stack_endpoint": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsOpsworksStackValidate(d *schema.ResourceData) error { + cookbooksSourceCount := d.Get("custom_cookbooks_source.#").(int) + if cookbooksSourceCount > 1 { + return fmt.Errorf("Only one custom_cookbooks_source is permitted") + } + + vpcId := d.Get("vpc_id").(string) + if vpcId != "" { + if d.Get("default_subnet_id").(string) == "" { + return fmt.Errorf("default_subnet_id must be set if vpc_id is set") + } + } else { + if d.Get("default_availability_zone").(string) == "" { + return fmt.Errorf("either vpc_id or default_availability_zone must be set") + } + } + + return nil +} + +func resourceAwsOpsworksStackCustomCookbooksSource(d *schema.ResourceData) *opsworks.Source { + count := d.Get("custom_cookbooks_source.#").(int) + if count == 0 { + return nil + } + + return &opsworks.Source{ + Type: aws.String(d.Get("custom_cookbooks_source.0.type").(string)), + Url: aws.String(d.Get("custom_cookbooks_source.0.url").(string)), + Username: aws.String(d.Get("custom_cookbooks_source.0.username").(string)), + Password: aws.String(d.Get("custom_cookbooks_source.0.password").(string)), + Revision: aws.String(d.Get("custom_cookbooks_source.0.revision").(string)), + SshKey: aws.String(d.Get("custom_cookbooks_source.0.ssh_key").(string)), + } +} + +func resourceAwsOpsworksSetStackCustomCookbooksSource(d *schema.ResourceData, v *opsworks.Source) { + nv := make([]interface{}, 0, 1) + if v != nil && v.Type != nil && *v.Type != "" { + m := make(map[string]interface{}) + if v.Type != nil { + m["type"] = *v.Type + } + if v.Url != nil { + m["url"] = *v.Url + } + if v.Username != nil { + m["username"] = *v.Username + } + if v.Revision != nil { + m["revision"] = *v.Revision + } + // v.Password will, on read, contain the placeholder string + // "*****FILTERED*****", so we ignore it on read and let persist + // the value already in the state. + nv = append(nv, m) + } + + err := d.Set("custom_cookbooks_source", nv) + if err != nil { + // should never happen + panic(err) + } +} + +func resourceAwsOpsworksStackRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + var conErr error + if v := d.Get("stack_endpoint").(string); v != "" { + client, conErr = opsworksConnForRegion(v, meta) + if conErr != nil { + return conErr + } + } + + req := &opsworks.DescribeStacksInput{ + StackIds: []*string{ + aws.String(d.Id()), + }, + } + + log.Printf("[DEBUG] Reading OpsWorks stack: %s", d.Id()) + + // notFound represents the number of times we've called DescribeStacks looking + // for this Stack. If it's not found in the the default region we're in, we + // check us-east-1 in the event this stack was created with Terraform before + // version 0.9 + // See https://github.com/hashicorp/terraform/issues/12842 + var notFound int + var resp *opsworks.DescribeStacksOutput + var dErr error + + for { + resp, dErr = client.DescribeStacks(req) + if dErr != nil { + if awserr, ok := dErr.(awserr.Error); ok { + if awserr.Code() == "ResourceNotFoundException" { + if notFound < 1 { + // If we haven't already, try us-east-1, legacy connection + notFound++ + var connErr error + client, connErr = opsworksConnForRegion("us-east-1", meta) + if connErr != nil { + return connErr + } + // start again from the top of the FOR loop, but with a client + // configured to talk to us-east-1 + continue + } + + // We've tried both the original and us-east-1 endpoint, and the stack + // is still not found + log.Printf("[DEBUG] OpsWorks stack (%s) not found", d.Id()) + d.SetId("") + return nil + } + // not ResoureNotFoundException, fall through to returning error + } + return dErr + } + // If the stack was found, set the stack_endpoint + if client.Config.Region != nil && *client.Config.Region != "" { + log.Printf("[DEBUG] Setting stack_endpoint for (%s) to (%s)", d.Id(), *client.Config.Region) + if err := d.Set("stack_endpoint", *client.Config.Region); err != nil { + log.Printf("[WARN] Error setting stack_endpoint: %s", err) + } + } + log.Printf("[DEBUG] Breaking stack endpoint search, found stack for (%s)", d.Id()) + // Break the FOR loop + break + } + + stack := resp.Stacks[0] + d.Set("agent_version", stack.AgentVersion) + d.Set("name", stack.Name) + d.Set("region", stack.Region) + d.Set("default_instance_profile_arn", stack.DefaultInstanceProfileArn) + d.Set("service_role_arn", stack.ServiceRoleArn) + d.Set("default_availability_zone", stack.DefaultAvailabilityZone) + d.Set("default_os", stack.DefaultOs) + d.Set("default_root_device_type", stack.DefaultRootDeviceType) + d.Set("default_ssh_key_name", stack.DefaultSshKeyName) + d.Set("default_subnet_id", stack.DefaultSubnetId) + d.Set("hostname_theme", stack.HostnameTheme) + d.Set("use_custom_cookbooks", stack.UseCustomCookbooks) + if stack.CustomJson != nil { + d.Set("custom_json", stack.CustomJson) + } + d.Set("use_opsworks_security_groups", stack.UseOpsworksSecurityGroups) + d.Set("vpc_id", stack.VpcId) + if color, ok := stack.Attributes["Color"]; ok { + d.Set("color", color) + } + if stack.ConfigurationManager != nil { + d.Set("configuration_manager_name", stack.ConfigurationManager.Name) + d.Set("configuration_manager_version", stack.ConfigurationManager.Version) + } + if stack.ChefConfiguration != nil { + d.Set("berkshelf_version", stack.ChefConfiguration.BerkshelfVersion) + d.Set("manage_berkshelf", stack.ChefConfiguration.ManageBerkshelf) + } + resourceAwsOpsworksSetStackCustomCookbooksSource(d, stack.CustomCookbooksSource) + + return nil +} + +// opsworksConn will return a connection for the stack_endpoint in the +// configuration. Stacks can only be accessed or managed within the endpoint +// in which they are created, so we allow users to specify an original endpoint +// for Stacks created before multiple endpoints were offered (Terraform v0.9.0). +// See: +// - https://github.com/hashicorp/terraform/pull/12688 +// - https://github.com/hashicorp/terraform/issues/12842 +func opsworksConnForRegion(region string, meta interface{}) (*opsworks.OpsWorks, error) { + originalConn := meta.(*AWSClient).opsworksconn + + // Regions are the same, no need to reconfigure + if originalConn.Config.Region != nil && *originalConn.Config.Region == region { + return originalConn, nil + } + + // Set up base session + sess, err := session.NewSession(&originalConn.Config) + if err != nil { + return nil, errwrap.Wrapf("Error creating AWS session: {{err}}", err) + } + + sess.Handlers.Build.PushBackNamed(addTerraformVersionToUserAgent) + + if extraDebug := os.Getenv("TERRAFORM_AWS_AUTHFAILURE_DEBUG"); extraDebug != "" { + sess.Handlers.UnmarshalError.PushFrontNamed(debugAuthFailure) + } + + newSession := sess.Copy(&aws.Config{Region: aws.String(region)}) + newOpsworksconn := opsworks.New(newSession) + + log.Printf("[DEBUG] Returning new OpsWorks client") + return newOpsworksconn, nil +} + +func resourceAwsOpsworksStackCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + err := resourceAwsOpsworksStackValidate(d) + if err != nil { + return err + } + + req := &opsworks.CreateStackInput{ + DefaultInstanceProfileArn: aws.String(d.Get("default_instance_profile_arn").(string)), + Name: aws.String(d.Get("name").(string)), + Region: aws.String(d.Get("region").(string)), + ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)), + DefaultOs: aws.String(d.Get("default_os").(string)), + UseOpsworksSecurityGroups: aws.Bool(d.Get("use_opsworks_security_groups").(bool)), + } + req.ConfigurationManager = &opsworks.StackConfigurationManager{ + Name: aws.String(d.Get("configuration_manager_name").(string)), + Version: aws.String(d.Get("configuration_manager_version").(string)), + } + inVpc := false + if vpcId, ok := d.GetOk("vpc_id"); ok { + req.VpcId = aws.String(vpcId.(string)) + inVpc = true + } + if defaultSubnetId, ok := d.GetOk("default_subnet_id"); ok { + req.DefaultSubnetId = aws.String(defaultSubnetId.(string)) + } + if defaultAvailabilityZone, ok := d.GetOk("default_availability_zone"); ok { + req.DefaultAvailabilityZone = aws.String(defaultAvailabilityZone.(string)) + } + if defaultRootDeviceType, ok := d.GetOk("default_root_device_type"); ok { + req.DefaultRootDeviceType = aws.String(defaultRootDeviceType.(string)) + } + + log.Printf("[DEBUG] Creating OpsWorks stack: %s", req) + + var resp *opsworks.CreateStackOutput + err = resource.Retry(20*time.Minute, func() *resource.RetryError { + var cerr error + resp, cerr = client.CreateStack(req) + if cerr != nil { + if opserr, ok := cerr.(awserr.Error); ok { + // If Terraform is also managing the service IAM role, + // it may have just been created and not yet be + // propagated. + // AWS doesn't provide a machine-readable code for this + // specific error, so we're forced to do fragile message + // matching. + // The full error we're looking for looks something like + // the following: + // Service Role Arn: [...] is not yet propagated, please try again in a couple of minutes + propErr := "not yet propagated" + trustErr := "not the necessary trust relationship" + validateErr := "validate IAM role permission" + if opserr.Code() == "ValidationException" && (strings.Contains(opserr.Message(), trustErr) || strings.Contains(opserr.Message(), propErr) || strings.Contains(opserr.Message(), validateErr)) { + log.Printf("[INFO] Waiting for service IAM role to propagate") + return resource.RetryableError(cerr) + } + } + return resource.NonRetryableError(cerr) + } + return nil + }) + if err != nil { + return err + } + + stackId := *resp.StackId + d.SetId(stackId) + d.Set("id", stackId) + + if inVpc && *req.UseOpsworksSecurityGroups { + // For VPC-based stacks, OpsWorks asynchronously creates some default + // security groups which must exist before layers can be created. + // Unfortunately it doesn't tell us what the ids of these are, so + // we can't actually check for them. Instead, we just wait a nominal + // amount of time for their creation to complete. + log.Print("[INFO] Waiting for OpsWorks built-in security groups to be created") + time.Sleep(30 * time.Second) + } + + return resourceAwsOpsworksStackUpdate(d, meta) +} + +func resourceAwsOpsworksStackUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + var conErr error + if v := d.Get("stack_endpoint").(string); v != "" { + client, conErr = opsworksConnForRegion(v, meta) + if conErr != nil { + return conErr + } + } + + err := resourceAwsOpsworksStackValidate(d) + if err != nil { + return err + } + + req := &opsworks.UpdateStackInput{ + CustomJson: aws.String(d.Get("custom_json").(string)), + DefaultInstanceProfileArn: aws.String(d.Get("default_instance_profile_arn").(string)), + DefaultRootDeviceType: aws.String(d.Get("default_root_device_type").(string)), + DefaultSshKeyName: aws.String(d.Get("default_ssh_key_name").(string)), + Name: aws.String(d.Get("name").(string)), + ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)), + StackId: aws.String(d.Id()), + UseCustomCookbooks: aws.Bool(d.Get("use_custom_cookbooks").(bool)), + UseOpsworksSecurityGroups: aws.Bool(d.Get("use_opsworks_security_groups").(bool)), + Attributes: make(map[string]*string), + CustomCookbooksSource: resourceAwsOpsworksStackCustomCookbooksSource(d), + } + if v, ok := d.GetOk("agent_version"); ok { + req.AgentVersion = aws.String(v.(string)) + } + if v, ok := d.GetOk("default_os"); ok { + req.DefaultOs = aws.String(v.(string)) + } + if v, ok := d.GetOk("default_subnet_id"); ok { + req.DefaultSubnetId = aws.String(v.(string)) + } + if v, ok := d.GetOk("default_availability_zone"); ok { + req.DefaultAvailabilityZone = aws.String(v.(string)) + } + if v, ok := d.GetOk("hostname_theme"); ok { + req.HostnameTheme = aws.String(v.(string)) + } + if v, ok := d.GetOk("color"); ok { + req.Attributes["Color"] = aws.String(v.(string)) + } + + req.ChefConfiguration = &opsworks.ChefConfiguration{ + BerkshelfVersion: aws.String(d.Get("berkshelf_version").(string)), + ManageBerkshelf: aws.Bool(d.Get("manage_berkshelf").(bool)), + } + + req.ConfigurationManager = &opsworks.StackConfigurationManager{ + Name: aws.String(d.Get("configuration_manager_name").(string)), + Version: aws.String(d.Get("configuration_manager_version").(string)), + } + + log.Printf("[DEBUG] Updating OpsWorks stack: %s", req) + + _, err = client.UpdateStack(req) + if err != nil { + return err + } + + return resourceAwsOpsworksStackRead(d, meta) +} + +func resourceAwsOpsworksStackDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + var conErr error + if v := d.Get("stack_endpoint").(string); v != "" { + client, conErr = opsworksConnForRegion(v, meta) + if conErr != nil { + return conErr + } + } + + req := &opsworks.DeleteStackInput{ + StackId: aws.String(d.Id()), + } + + log.Printf("[DEBUG] Deleting OpsWorks stack: %s", d.Id()) + + _, err := client.DeleteStack(req) + if err != nil { + return err + } + + // For a stack in a VPC, OpsWorks has created some default security groups + // in the VPC, which it will now delete. + // Unfortunately, the security groups are deleted asynchronously and there + // is no robust way for us to determine when it is done. The VPC itself + // isn't deletable until the security groups are cleaned up, so this could + // make 'terraform destroy' fail if the VPC is also managed and we don't + // wait for the security groups to be deleted. + // There is no robust way to check for this, so we'll just wait a + // nominal amount of time. + _, inVpc := d.GetOk("vpc_id") + _, useOpsworksDefaultSg := d.GetOk("use_opsworks_security_group") + + if inVpc && useOpsworksDefaultSg { + log.Print("[INFO] Waiting for Opsworks built-in security groups to be deleted") + time.Sleep(30 * time.Second) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_static_web_layer.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_static_web_layer.go new file mode 100644 index 000000000..df91b1b1b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_static_web_layer.go @@ -0,0 +1,16 @@ +package aws + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsOpsworksStaticWebLayer() *schema.Resource { + layerType := &opsworksLayerType{ + TypeName: "web", + DefaultLayerName: "Static Web Server", + + Attributes: map[string]*opsworksLayerTypeAttribute{}, + } + + return layerType.SchemaResource() +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_user_profile.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_user_profile.go new file mode 100644 index 000000000..39670b295 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_user_profile.go @@ -0,0 +1,137 @@ +package aws + +import ( + "log" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/opsworks" +) + +func resourceAwsOpsworksUserProfile() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsOpsworksUserProfileCreate, + Read: resourceAwsOpsworksUserProfileRead, + Update: resourceAwsOpsworksUserProfileUpdate, + Delete: resourceAwsOpsworksUserProfileDelete, + + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + + "user_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "allow_self_management": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "ssh_username": { + Type: schema.TypeString, + Required: true, + }, + + "ssh_public_key": { + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceAwsOpsworksUserProfileRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + req := &opsworks.DescribeUserProfilesInput{ + IamUserArns: []*string{ + aws.String(d.Id()), + }, + } + + log.Printf("[DEBUG] Reading OpsWorks user profile: %s", d.Id()) + + resp, err := client.DescribeUserProfiles(req) + if err != nil { + if awserr, ok := err.(awserr.Error); ok { + if awserr.Code() == "ResourceNotFoundException" { + log.Printf("[DEBUG] OpsWorks user profile (%s) not found", d.Id()) + d.SetId("") + return nil + } + } + return err + } + + for _, profile := range resp.UserProfiles { + d.Set("allow_self_management", profile.AllowSelfManagement) + d.Set("user_arn", profile.IamUserArn) + d.Set("ssh_public_key", profile.SshPublicKey) + d.Set("ssh_username", profile.SshUsername) + break + } + + return nil +} + +func resourceAwsOpsworksUserProfileCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + req := &opsworks.CreateUserProfileInput{ + AllowSelfManagement: aws.Bool(d.Get("allow_self_management").(bool)), + IamUserArn: aws.String(d.Get("user_arn").(string)), + SshPublicKey: aws.String(d.Get("ssh_public_key").(string)), + SshUsername: aws.String(d.Get("ssh_username").(string)), + } + + resp, err := client.CreateUserProfile(req) + if err != nil { + return err + } + + d.SetId(*resp.IamUserArn) + + return resourceAwsOpsworksUserProfileUpdate(d, meta) +} + +func resourceAwsOpsworksUserProfileUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + req := &opsworks.UpdateUserProfileInput{ + AllowSelfManagement: aws.Bool(d.Get("allow_self_management").(bool)), + IamUserArn: aws.String(d.Get("user_arn").(string)), + SshPublicKey: aws.String(d.Get("ssh_public_key").(string)), + SshUsername: aws.String(d.Get("ssh_username").(string)), + } + + log.Printf("[DEBUG] Updating OpsWorks user profile: %s", req) + + _, err := client.UpdateUserProfile(req) + if err != nil { + return err + } + + return resourceAwsOpsworksUserProfileRead(d, meta) +} + +func resourceAwsOpsworksUserProfileDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + req := &opsworks.DeleteUserProfileInput{ + IamUserArn: aws.String(d.Id()), + } + + log.Printf("[DEBUG] Deleting OpsWorks user profile: %s", d.Id()) + + _, err := client.DeleteUserProfile(req) + + return err +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_placement_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_placement_group.go new file mode 100644 index 000000000..e5da78c9e --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_placement_group.go @@ -0,0 +1,153 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsPlacementGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsPlacementGroupCreate, + Read: resourceAwsPlacementGroupRead, + Delete: resourceAwsPlacementGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "strategy": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsPlacementGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + name := d.Get("name").(string) + input := ec2.CreatePlacementGroupInput{ + GroupName: aws.String(name), + Strategy: aws.String(d.Get("strategy").(string)), + } + log.Printf("[DEBUG] Creating EC2 Placement group: %s", input) + _, err := conn.CreatePlacementGroup(&input) + if err != nil { + return err + } + + wait := resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"available"}, + Timeout: 5 * time.Minute, + MinTimeout: 1 * time.Second, + Refresh: func() (interface{}, string, error) { + out, err := conn.DescribePlacementGroups(&ec2.DescribePlacementGroupsInput{ + GroupNames: []*string{aws.String(name)}, + }) + + if err != nil { + return out, "", err + } + + if len(out.PlacementGroups) == 0 { + return out, "", fmt.Errorf("Placement group not found (%q)", name) + } + pg := out.PlacementGroups[0] + + return out, *pg.State, nil + }, + } + + _, err = wait.WaitForState() + if err != nil { + return err + } + + log.Printf("[DEBUG] EC2 Placement group created: %q", name) + + d.SetId(name) + + return resourceAwsPlacementGroupRead(d, meta) +} + +func resourceAwsPlacementGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + input := ec2.DescribePlacementGroupsInput{ + GroupNames: []*string{aws.String(d.Id())}, + } + out, err := conn.DescribePlacementGroups(&input) + if err != nil { + return err + } + pg := out.PlacementGroups[0] + + log.Printf("[DEBUG] Received EC2 Placement Group: %s", pg) + + d.Set("name", pg.GroupName) + d.Set("strategy", pg.Strategy) + + return nil +} + +func resourceAwsPlacementGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + log.Printf("[DEBUG] Deleting EC2 Placement Group %q", d.Id()) + _, err := conn.DeletePlacementGroup(&ec2.DeletePlacementGroupInput{ + GroupName: aws.String(d.Id()), + }) + if err != nil { + return err + } + + wait := resource.StateChangeConf{ + Pending: []string{"deleting"}, + Target: []string{"deleted"}, + Timeout: 5 * time.Minute, + MinTimeout: 1 * time.Second, + Refresh: func() (interface{}, string, error) { + out, err := conn.DescribePlacementGroups(&ec2.DescribePlacementGroupsInput{ + GroupNames: []*string{aws.String(d.Id())}, + }) + + if err != nil { + awsErr := err.(awserr.Error) + if awsErr.Code() == "InvalidPlacementGroup.Unknown" { + return out, "deleted", nil + } + return out, "", awsErr + } + + if len(out.PlacementGroups) == 0 { + return out, "deleted", nil + } + + pg := out.PlacementGroups[0] + + return out, *pg.State, nil + }, + } + + _, err = wait.WaitForState() + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_proxy_protocol_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_proxy_protocol_policy.go new file mode 100644 index 000000000..ae7d61dc9 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_proxy_protocol_policy.go @@ -0,0 +1,267 @@ +package aws + +import ( + "fmt" + "log" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsProxyProtocolPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsProxyProtocolPolicyCreate, + Read: resourceAwsProxyProtocolPolicyRead, + Update: resourceAwsProxyProtocolPolicyUpdate, + Delete: resourceAwsProxyProtocolPolicyDelete, + + Schema: map[string]*schema.Schema{ + "load_balancer": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "instance_ports": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Required: true, + Set: schema.HashString, + }, + }, + } +} + +func resourceAwsProxyProtocolPolicyCreate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + elbname := aws.String(d.Get("load_balancer").(string)) + + input := &elb.CreateLoadBalancerPolicyInput{ + LoadBalancerName: elbname, + PolicyAttributes: []*elb.PolicyAttribute{ + &elb.PolicyAttribute{ + AttributeName: aws.String("ProxyProtocol"), + AttributeValue: aws.String("True"), + }, + }, + PolicyName: aws.String("TFEnableProxyProtocol"), + PolicyTypeName: aws.String("ProxyProtocolPolicyType"), + } + + // Create a policy + log.Printf("[DEBUG] ELB create a policy %s from policy type %s", + *input.PolicyName, *input.PolicyTypeName) + + if _, err := elbconn.CreateLoadBalancerPolicy(input); err != nil { + return fmt.Errorf("Error creating a policy %s: %s", + *input.PolicyName, err) + } + + // Assign the policy name for use later + d.Partial(true) + d.SetId(fmt.Sprintf("%s:%s", *elbname, *input.PolicyName)) + d.SetPartial("load_balancer") + log.Printf("[INFO] ELB PolicyName: %s", *input.PolicyName) + + return resourceAwsProxyProtocolPolicyUpdate(d, meta) +} + +func resourceAwsProxyProtocolPolicyRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + elbname := aws.String(d.Get("load_balancer").(string)) + + // Retrieve the current ELB policies for updating the state + req := &elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{elbname}, + } + resp, err := elbconn.DescribeLoadBalancers(req) + if err != nil { + if isLoadBalancerNotFound(err) { + // The ELB is gone now, so just remove it from the state + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving ELB attributes: %s", err) + } + + backends := flattenBackendPolicies(resp.LoadBalancerDescriptions[0].BackendServerDescriptions) + + ports := []*string{} + for ip := range backends { + ipstr := strconv.Itoa(int(ip)) + ports = append(ports, &ipstr) + } + d.Set("instance_ports", ports) + d.Set("load_balancer", *elbname) + return nil +} + +func resourceAwsProxyProtocolPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + elbname := aws.String(d.Get("load_balancer").(string)) + + // Retrieve the current ELB policies for updating the state + req := &elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{elbname}, + } + resp, err := elbconn.DescribeLoadBalancers(req) + if err != nil { + if isLoadBalancerNotFound(err) { + // The ELB is gone now, so just remove it from the state + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving ELB attributes: %s", err) + } + + backends := flattenBackendPolicies(resp.LoadBalancerDescriptions[0].BackendServerDescriptions) + _, policyName := resourceAwsProxyProtocolPolicyParseId(d.Id()) + + d.Partial(true) + if d.HasChange("instance_ports") { + o, n := d.GetChange("instance_ports") + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove := os.Difference(ns).List() + add := ns.Difference(os).List() + + inputs := []*elb.SetLoadBalancerPoliciesForBackendServerInput{} + + i, err := resourceAwsProxyProtocolPolicyRemove(policyName, remove, backends) + if err != nil { + return err + } + inputs = append(inputs, i...) + + i, err = resourceAwsProxyProtocolPolicyAdd(policyName, add, backends) + if err != nil { + return err + } + inputs = append(inputs, i...) + + for _, input := range inputs { + input.LoadBalancerName = elbname + if _, err := elbconn.SetLoadBalancerPoliciesForBackendServer(input); err != nil { + return fmt.Errorf("Error setting policy for backend: %s", err) + } + } + + d.SetPartial("instance_ports") + } + + return resourceAwsProxyProtocolPolicyRead(d, meta) +} + +func resourceAwsProxyProtocolPolicyDelete(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + elbname := aws.String(d.Get("load_balancer").(string)) + + // Retrieve the current ELB policies for updating the state + req := &elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{elbname}, + } + var err error + resp, err := elbconn.DescribeLoadBalancers(req) + if err != nil { + if isLoadBalancerNotFound(err) { + // The ELB is gone now, so just remove it from the state + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving ELB attributes: %s", err) + } + + backends := flattenBackendPolicies(resp.LoadBalancerDescriptions[0].BackendServerDescriptions) + ports := d.Get("instance_ports").(*schema.Set).List() + _, policyName := resourceAwsProxyProtocolPolicyParseId(d.Id()) + + inputs, err := resourceAwsProxyProtocolPolicyRemove(policyName, ports, backends) + if err != nil { + return fmt.Errorf("Error detaching a policy from backend: %s", err) + } + for _, input := range inputs { + input.LoadBalancerName = elbname + if _, err := elbconn.SetLoadBalancerPoliciesForBackendServer(input); err != nil { + return fmt.Errorf("Error setting policy for backend: %s", err) + } + } + + pOpt := &elb.DeleteLoadBalancerPolicyInput{ + LoadBalancerName: elbname, + PolicyName: aws.String(policyName), + } + if _, err := elbconn.DeleteLoadBalancerPolicy(pOpt); err != nil { + return fmt.Errorf("Error removing a policy from load balancer: %s", err) + } + + return nil +} + +func resourceAwsProxyProtocolPolicyRemove(policyName string, ports []interface{}, backends map[int64][]string) ([]*elb.SetLoadBalancerPoliciesForBackendServerInput, error) { + inputs := make([]*elb.SetLoadBalancerPoliciesForBackendServerInput, 0, len(ports)) + for _, p := range ports { + ip, err := strconv.ParseInt(p.(string), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error detaching the policy: %s", err) + } + + newPolicies := []*string{} + curPolicies, found := backends[ip] + if !found { + // No policy for this instance port found, just skip it. + continue + } + + for _, policy := range curPolicies { + if policy == policyName { + // remove the policy + continue + } + newPolicies = append(newPolicies, &policy) + } + + inputs = append(inputs, &elb.SetLoadBalancerPoliciesForBackendServerInput{ + InstancePort: &ip, + PolicyNames: newPolicies, + }) + } + return inputs, nil +} + +func resourceAwsProxyProtocolPolicyAdd(policyName string, ports []interface{}, backends map[int64][]string) ([]*elb.SetLoadBalancerPoliciesForBackendServerInput, error) { + inputs := make([]*elb.SetLoadBalancerPoliciesForBackendServerInput, 0, len(ports)) + for _, p := range ports { + ip, err := strconv.ParseInt(p.(string), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error attaching the policy: %s", err) + } + + newPolicies := []*string{} + curPolicies := backends[ip] + for _, p := range curPolicies { + if p == policyName { + // Just remove it for now. It will be back later. + continue + } else { + newPolicies = append(newPolicies, &p) + } + } + newPolicies = append(newPolicies, aws.String(policyName)) + + inputs = append(inputs, &elb.SetLoadBalancerPoliciesForBackendServerInput{ + InstancePort: &ip, + PolicyNames: newPolicies, + }) + } + return inputs, nil +} + +// resourceAwsProxyProtocolPolicyParseId takes an ID and parses it into +// it's constituent parts. You need two axes (LB name, policy name) +// to create or identify a proxy protocol policy in AWS's API. +func resourceAwsProxyProtocolPolicyParseId(id string) (string, string) { + parts := strings.SplitN(id, ":", 2) + return parts[0], parts[1] +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster.go new file mode 100644 index 000000000..8fc72ce5b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster.go @@ -0,0 +1,740 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsRDSCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRDSClusterCreate, + Read: resourceAwsRDSClusterRead, + Update: resourceAwsRDSClusterUpdate, + Delete: resourceAwsRDSClusterDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsRdsClusterImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(120 * time.Minute), + Update: schema.DefaultTimeout(120 * time.Minute), + Delete: schema.DefaultTimeout(120 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + "availability_zones": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + ForceNew: true, + Computed: true, + Set: schema.HashString, + }, + + "cluster_identifier": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"cluster_identifier_prefix"}, + ValidateFunc: validateRdsIdentifier, + }, + "cluster_identifier_prefix": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateRdsIdentifierPrefix, + }, + + "cluster_members": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Computed: true, + Set: schema.HashString, + }, + + "database_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "db_subnet_group_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "db_cluster_parameter_group_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "endpoint": { + Type: schema.TypeString, + Computed: true, + }, + + "reader_endpoint": { + Type: schema.TypeString, + Computed: true, + }, + + "engine": { + Type: schema.TypeString, + Computed: true, + }, + + "storage_encrypted": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "final_snapshot_identifier": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) { + es = append(es, fmt.Errorf( + "only alphanumeric characters and hyphens allowed in %q", k)) + } + if regexp.MustCompile(`--`).MatchString(value) { + es = append(es, fmt.Errorf("%q cannot contain two consecutive hyphens", k)) + } + if regexp.MustCompile(`-$`).MatchString(value) { + es = append(es, fmt.Errorf("%q cannot end in a hyphen", k)) + } + return + }, + }, + + "skip_final_snapshot": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "master_username": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + + "master_password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, + + "snapshot_identifier": { + Type: schema.TypeString, + Computed: false, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "port": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + // apply_immediately is used to determine when the update modifications + // take place. + // See http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html + "apply_immediately": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "vpc_security_group_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "preferred_backup_window": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateOnceADayWindowFormat, + }, + + "preferred_maintenance_window": { + Type: schema.TypeString, + Optional: true, + Computed: true, + StateFunc: func(val interface{}) string { + if val == nil { + return "" + } + return strings.ToLower(val.(string)) + }, + ValidateFunc: validateOnceAWeekWindowFormat, + }, + + "backup_retention_period": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(int) + if value > 35 { + es = append(es, fmt.Errorf( + "backup retention period cannot be more than 35 days")) + } + return + }, + }, + + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + + "replication_source_identifier": { + Type: schema.TypeString, + Optional: true, + }, + + "iam_database_authentication_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + + "cluster_resource_id": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsRdsClusterImport( + d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + // Neither skip_final_snapshot nor final_snapshot_identifier can be fetched + // from any API call, so we need to default skip_final_snapshot to true so + // that final_snapshot_identifier is not required + d.Set("skip_final_snapshot", true) + return []*schema.ResourceData{d}, nil +} + +func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) + + var identifier string + if v, ok := d.GetOk("cluster_identifier"); ok { + identifier = v.(string) + } else { + if v, ok := d.GetOk("cluster_identifier_prefix"); ok { + identifier = resource.PrefixedUniqueId(v.(string)) + } else { + identifier = resource.PrefixedUniqueId("tf-") + } + + d.Set("cluster_identifier", identifier) + } + + if _, ok := d.GetOk("snapshot_identifier"); ok { + opts := rds.RestoreDBClusterFromSnapshotInput{ + DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), + SnapshotIdentifier: aws.String(d.Get("snapshot_identifier").(string)), + Engine: aws.String("aurora"), + Tags: tags, + } + + if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { + opts.AvailabilityZones = expandStringList(attr.List()) + } + + if attr, ok := d.GetOk("db_subnet_group_name"); ok { + opts.DBSubnetGroupName = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("database_name"); ok { + opts.DatabaseName = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("option_group_name"); ok { + opts.OptionGroupName = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("port"); ok { + opts.Port = aws.Int64(int64(attr.(int))) + } + + var sgUpdate bool + if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { + sgUpdate = true + opts.VpcSecurityGroupIds = expandStringList(attr.List()) + } + + log.Printf("[DEBUG] RDS Cluster restore from snapshot configuration: %s", opts) + _, err := conn.RestoreDBClusterFromSnapshot(&opts) + if err != nil { + return fmt.Errorf("Error creating RDS Cluster: %s", err) + } + + if sgUpdate { + log.Printf("[INFO] RDS Cluster is restoring from snapshot with default security, but custom security should be set, will now update after snapshot is restored!") + + d.SetId(d.Get("cluster_identifier").(string)) + + log.Printf("[INFO] RDS Cluster Instance ID: %s", d.Id()) + + log.Println("[INFO] Waiting for RDS Cluster to be available") + + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "backing-up", "modifying", "preparing-data-migration", "migrating"}, + Target: []string{"available"}, + Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutCreate), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + // Wait, catching any errors + _, err := stateConf.WaitForState() + if err != nil { + return err + } + + err = resourceAwsRDSClusterInstanceUpdate(d, meta) + if err != nil { + return err + } + } + } else if _, ok := d.GetOk("replication_source_identifier"); ok { + createOpts := &rds.CreateDBClusterInput{ + DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), + Engine: aws.String("aurora"), + StorageEncrypted: aws.Bool(d.Get("storage_encrypted").(bool)), + ReplicationSourceIdentifier: aws.String(d.Get("replication_source_identifier").(string)), + Tags: tags, + } + + if attr, ok := d.GetOk("port"); ok { + createOpts.Port = aws.Int64(int64(attr.(int))) + } + + if attr, ok := d.GetOk("db_subnet_group_name"); ok { + createOpts.DBSubnetGroupName = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("db_cluster_parameter_group_name"); ok { + createOpts.DBClusterParameterGroupName = aws.String(attr.(string)) + } + + if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { + createOpts.VpcSecurityGroupIds = expandStringList(attr.List()) + } + + if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { + createOpts.AvailabilityZones = expandStringList(attr.List()) + } + + if v, ok := d.GetOk("backup_retention_period"); ok { + createOpts.BackupRetentionPeriod = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("preferred_backup_window"); ok { + createOpts.PreferredBackupWindow = aws.String(v.(string)) + } + + if v, ok := d.GetOk("preferred_maintenance_window"); ok { + createOpts.PreferredMaintenanceWindow = aws.String(v.(string)) + } + + if attr, ok := d.GetOk("kms_key_id"); ok { + createOpts.KmsKeyId = aws.String(attr.(string)) + } + + log.Printf("[DEBUG] Create RDS Cluster as read replica: %s", createOpts) + resp, err := conn.CreateDBCluster(createOpts) + if err != nil { + log.Printf("[ERROR] Error creating RDS Cluster: %s", err) + return err + } + + log.Printf("[DEBUG]: RDS Cluster create response: %s", resp) + + } else { + if _, ok := d.GetOk("master_password"); !ok { + return fmt.Errorf(`provider.aws: aws_rds_cluster: %s: "master_password": required field is not set`, d.Get("database_name").(string)) + } + + if _, ok := d.GetOk("master_username"); !ok { + return fmt.Errorf(`provider.aws: aws_rds_cluster: %s: "master_username": required field is not set`, d.Get("database_name").(string)) + } + + createOpts := &rds.CreateDBClusterInput{ + DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), + Engine: aws.String("aurora"), + MasterUserPassword: aws.String(d.Get("master_password").(string)), + MasterUsername: aws.String(d.Get("master_username").(string)), + StorageEncrypted: aws.Bool(d.Get("storage_encrypted").(bool)), + Tags: tags, + } + + if v := d.Get("database_name"); v.(string) != "" { + createOpts.DatabaseName = aws.String(v.(string)) + } + + if attr, ok := d.GetOk("port"); ok { + createOpts.Port = aws.Int64(int64(attr.(int))) + } + + if attr, ok := d.GetOk("db_subnet_group_name"); ok { + createOpts.DBSubnetGroupName = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("db_cluster_parameter_group_name"); ok { + createOpts.DBClusterParameterGroupName = aws.String(attr.(string)) + } + + if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { + createOpts.VpcSecurityGroupIds = expandStringList(attr.List()) + } + + if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { + createOpts.AvailabilityZones = expandStringList(attr.List()) + } + + if v, ok := d.GetOk("backup_retention_period"); ok { + createOpts.BackupRetentionPeriod = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("preferred_backup_window"); ok { + createOpts.PreferredBackupWindow = aws.String(v.(string)) + } + + if v, ok := d.GetOk("preferred_maintenance_window"); ok { + createOpts.PreferredMaintenanceWindow = aws.String(v.(string)) + } + + if attr, ok := d.GetOk("kms_key_id"); ok { + createOpts.KmsKeyId = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("iam_database_authentication_enabled"); ok { + createOpts.EnableIAMDatabaseAuthentication = aws.Bool(attr.(bool)) + } + + log.Printf("[DEBUG] RDS Cluster create options: %s", createOpts) + resp, err := conn.CreateDBCluster(createOpts) + if err != nil { + log.Printf("[ERROR] Error creating RDS Cluster: %s", err) + return err + } + + log.Printf("[DEBUG]: RDS Cluster create response: %s", resp) + } + + d.SetId(d.Get("cluster_identifier").(string)) + + log.Printf("[INFO] RDS Cluster ID: %s", d.Id()) + + log.Println( + "[INFO] Waiting for RDS Cluster to be available") + + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "backing-up", "modifying"}, + Target: []string{"available"}, + Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutCreate), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + // Wait, catching any errors + _, err := stateConf.WaitForState() + if err != nil { + return fmt.Errorf("[WARN] Error waiting for RDS Cluster state to be \"available\": %s", err) + } + + return resourceAwsRDSClusterRead(d, meta) +} + +func resourceAwsRDSClusterRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + + resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ + DBClusterIdentifier: aws.String(d.Id()), + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if "DBClusterNotFoundFault" == awsErr.Code() { + d.SetId("") + log.Printf("[DEBUG] RDS Cluster (%s) not found", d.Id()) + return nil + } + } + log.Printf("[DEBUG] Error describing RDS Cluster (%s)", d.Id()) + return err + } + + var dbc *rds.DBCluster + for _, c := range resp.DBClusters { + if *c.DBClusterIdentifier == d.Id() { + dbc = c + } + } + + if dbc == nil { + log.Printf("[WARN] RDS Cluster (%s) not found", d.Id()) + d.SetId("") + return nil + } + + if err := d.Set("availability_zones", aws.StringValueSlice(dbc.AvailabilityZones)); err != nil { + return fmt.Errorf("[DEBUG] Error saving AvailabilityZones to state for RDS Cluster (%s): %s", d.Id(), err) + } + + // Only set the DatabaseName if it is not nil. There is a known API bug where + // RDS accepts a DatabaseName but does not return it, causing a perpetual + // diff. + // See https://github.com/hashicorp/terraform/issues/4671 for backstory + if dbc.DatabaseName != nil { + d.Set("database_name", dbc.DatabaseName) + } + + d.Set("cluster_identifier", dbc.DBClusterIdentifier) + d.Set("cluster_resource_id", dbc.DbClusterResourceId) + d.Set("db_subnet_group_name", dbc.DBSubnetGroup) + d.Set("db_cluster_parameter_group_name", dbc.DBClusterParameterGroup) + d.Set("endpoint", dbc.Endpoint) + d.Set("engine", dbc.Engine) + d.Set("master_username", dbc.MasterUsername) + d.Set("port", dbc.Port) + d.Set("storage_encrypted", dbc.StorageEncrypted) + d.Set("backup_retention_period", dbc.BackupRetentionPeriod) + d.Set("preferred_backup_window", dbc.PreferredBackupWindow) + d.Set("preferred_maintenance_window", dbc.PreferredMaintenanceWindow) + d.Set("kms_key_id", dbc.KmsKeyId) + d.Set("reader_endpoint", dbc.ReaderEndpoint) + d.Set("replication_source_identifier", dbc.ReplicationSourceIdentifier) + d.Set("iam_database_authentication_enabled", dbc.IAMDatabaseAuthenticationEnabled) + + var vpcg []string + for _, g := range dbc.VpcSecurityGroups { + vpcg = append(vpcg, *g.VpcSecurityGroupId) + } + if err := d.Set("vpc_security_group_ids", vpcg); err != nil { + return fmt.Errorf("[DEBUG] Error saving VPC Security Group IDs to state for RDS Cluster (%s): %s", d.Id(), err) + } + + var cm []string + for _, m := range dbc.DBClusterMembers { + cm = append(cm, *m.DBInstanceIdentifier) + } + if err := d.Set("cluster_members", cm); err != nil { + return fmt.Errorf("[DEBUG] Error saving RDS Cluster Members to state for RDS Cluster (%s): %s", d.Id(), err) + } + + // Fetch and save tags + arn, err := buildRDSClusterARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) + if err != nil { + log.Printf("[DEBUG] Error building ARN for RDS Cluster (%s), not setting Tags", *dbc.DBClusterIdentifier) + } else { + if err := saveTagsRDS(conn, d, arn); err != nil { + log.Printf("[WARN] Failed to save tags for RDS Cluster (%s): %s", *dbc.DBClusterIdentifier, err) + } + } + + return nil +} + +func resourceAwsRDSClusterUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + requestUpdate := false + + req := &rds.ModifyDBClusterInput{ + ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), + DBClusterIdentifier: aws.String(d.Id()), + } + + if d.HasChange("master_password") { + req.MasterUserPassword = aws.String(d.Get("master_password").(string)) + requestUpdate = true + } + + if d.HasChange("vpc_security_group_ids") { + if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { + req.VpcSecurityGroupIds = expandStringList(attr.List()) + } else { + req.VpcSecurityGroupIds = []*string{} + } + requestUpdate = true + } + + if d.HasChange("preferred_backup_window") { + req.PreferredBackupWindow = aws.String(d.Get("preferred_backup_window").(string)) + requestUpdate = true + } + + if d.HasChange("preferred_maintenance_window") { + req.PreferredMaintenanceWindow = aws.String(d.Get("preferred_maintenance_window").(string)) + requestUpdate = true + } + + if d.HasChange("backup_retention_period") { + req.BackupRetentionPeriod = aws.Int64(int64(d.Get("backup_retention_period").(int))) + requestUpdate = true + } + + if d.HasChange("db_cluster_parameter_group_name") { + d.SetPartial("db_cluster_parameter_group_name") + req.DBClusterParameterGroupName = aws.String(d.Get("db_cluster_parameter_group_name").(string)) + requestUpdate = true + } + + if d.HasChange("iam_database_authentication_enabled") { + req.EnableIAMDatabaseAuthentication = aws.Bool(d.Get("iam_database_authentication_enabled").(bool)) + requestUpdate = true + } + + if requestUpdate { + _, err := conn.ModifyDBCluster(req) + if err != nil { + return fmt.Errorf("[WARN] Error modifying RDS Cluster (%s): %s", d.Id(), err) + } + } + + if arn, err := buildRDSClusterARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { + if err := setTagsRDS(conn, d, arn); err != nil { + return err + } else { + d.SetPartial("tags") + } + } + + return resourceAwsRDSClusterRead(d, meta) +} + +func resourceAwsRDSClusterDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + log.Printf("[DEBUG] Destroying RDS Cluster (%s)", d.Id()) + + deleteOpts := rds.DeleteDBClusterInput{ + DBClusterIdentifier: aws.String(d.Id()), + } + + skipFinalSnapshot := d.Get("skip_final_snapshot").(bool) + deleteOpts.SkipFinalSnapshot = aws.Bool(skipFinalSnapshot) + + if skipFinalSnapshot == false { + if name, present := d.GetOk("final_snapshot_identifier"); present { + deleteOpts.FinalDBSnapshotIdentifier = aws.String(name.(string)) + } else { + return fmt.Errorf("RDS Cluster FinalSnapshotIdentifier is required when a final snapshot is required") + } + } + + log.Printf("[DEBUG] RDS Cluster delete options: %s", deleteOpts) + _, err := conn.DeleteDBCluster(&deleteOpts) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if "InvalidDBClusterStateFault" == awsErr.Code() { + return fmt.Errorf("RDS Cluster cannot be deleted: %s", awsErr.Message()) + } + } + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"available", "deleting", "backing-up", "modifying"}, + Target: []string{"destroyed"}, + Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutDelete), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("[WARN] Error deleting RDS Cluster (%s): %s", d.Id(), err) + } + + return nil +} + +func resourceAwsRDSClusterStateRefreshFunc( + d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + conn := meta.(*AWSClient).rdsconn + + resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ + DBClusterIdentifier: aws.String(d.Id()), + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if "DBClusterNotFoundFault" == awsErr.Code() { + return 42, "destroyed", nil + } + } + log.Printf("[WARN] Error on retrieving DB Cluster (%s) when waiting: %s", d.Id(), err) + return nil, "", err + } + + var dbc *rds.DBCluster + + for _, c := range resp.DBClusters { + if *c.DBClusterIdentifier == d.Id() { + dbc = c + } + } + + if dbc == nil { + return 42, "destroyed", nil + } + + if dbc.Status != nil { + log.Printf("[DEBUG] DB Cluster status (%s): %s", d.Id(), *dbc.Status) + } + + return dbc, *dbc.Status, nil + } +} + +func buildRDSClusterARN(identifier, partition, accountid, region string) (string, error) { + if partition == "" { + return "", fmt.Errorf("Unable to construct RDS Cluster ARN because of missing AWS partition") + } + if accountid == "" { + return "", fmt.Errorf("Unable to construct RDS Cluster ARN because of missing AWS Account ID") + } + + arn := fmt.Sprintf("arn:%s:rds:%s:%s:cluster:%s", partition, region, accountid, identifier) + return arn, nil + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster_instance.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster_instance.go new file mode 100644 index 000000000..41bf2d03e --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster_instance.go @@ -0,0 +1,438 @@ +package aws + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsRDSClusterInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRDSClusterInstanceCreate, + Read: resourceAwsRDSClusterInstanceRead, + Update: resourceAwsRDSClusterInstanceUpdate, + Delete: resourceAwsRDSClusterInstanceDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(90 * time.Minute), + Update: schema.DefaultTimeout(90 * time.Minute), + Delete: schema.DefaultTimeout(90 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "identifier": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"identifier_prefix"}, + ValidateFunc: validateRdsIdentifier, + }, + "identifier_prefix": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateRdsIdentifierPrefix, + }, + + "db_subnet_group_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "writer": { + Type: schema.TypeBool, + Computed: true, + }, + + "cluster_identifier": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "endpoint": { + Type: schema.TypeString, + Computed: true, + }, + + "port": { + Type: schema.TypeInt, + Computed: true, + }, + + "publicly_accessible": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "instance_class": { + Type: schema.TypeString, + Required: true, + }, + + "db_parameter_group_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + // apply_immediately is used to determine when the update modifications + // take place. + // See http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html + "apply_immediately": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "kms_key_id": { + Type: schema.TypeString, + Computed: true, + }, + + "storage_encrypted": { + Type: schema.TypeBool, + Computed: true, + }, + + "auto_minor_version_upgrade": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "monitoring_role_arn": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "preferred_maintenance_window": { + Type: schema.TypeString, + Optional: true, + Computed: true, + StateFunc: func(v interface{}) string { + if v != nil { + value := v.(string) + return strings.ToLower(value) + } + return "" + }, + ValidateFunc: validateOnceAWeekWindowFormat, + }, + + "preferred_backup_window": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateOnceADayWindowFormat, + }, + + "monitoring_interval": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + }, + + "promotion_tier": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) + + createOpts := &rds.CreateDBInstanceInput{ + DBInstanceClass: aws.String(d.Get("instance_class").(string)), + DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), + Engine: aws.String("aurora"), + PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), + PromotionTier: aws.Int64(int64(d.Get("promotion_tier").(int))), + AutoMinorVersionUpgrade: aws.Bool(d.Get("auto_minor_version_upgrade").(bool)), + Tags: tags, + } + + if attr, ok := d.GetOk("db_parameter_group_name"); ok { + createOpts.DBParameterGroupName = aws.String(attr.(string)) + } + + if v, ok := d.GetOk("identifier"); ok { + createOpts.DBInstanceIdentifier = aws.String(v.(string)) + } else { + if v, ok := d.GetOk("identifier_prefix"); ok { + createOpts.DBInstanceIdentifier = aws.String(resource.PrefixedUniqueId(v.(string))) + } else { + createOpts.DBInstanceIdentifier = aws.String(resource.PrefixedUniqueId("tf-")) + } + } + + if attr, ok := d.GetOk("db_subnet_group_name"); ok { + createOpts.DBSubnetGroupName = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("monitoring_role_arn"); ok { + createOpts.MonitoringRoleArn = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("preferred_backup_window"); ok { + createOpts.PreferredBackupWindow = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("preferred_maintenance_window"); ok { + createOpts.PreferredMaintenanceWindow = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("monitoring_interval"); ok { + createOpts.MonitoringInterval = aws.Int64(int64(attr.(int))) + } + + log.Printf("[DEBUG] Creating RDS DB Instance opts: %s", createOpts) + resp, err := conn.CreateDBInstance(createOpts) + if err != nil { + return err + } + + d.SetId(*resp.DBInstance.DBInstanceIdentifier) + + // reuse db_instance refresh func + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "backing-up", "modifying"}, + Target: []string{"available"}, + Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutCreate), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + return resourceAwsRDSClusterInstanceRead(d, meta) +} + +func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{}) error { + db, err := resourceAwsDbInstanceRetrieve(d, meta) + // Errors from this helper are always reportable + if err != nil { + return fmt.Errorf("[WARN] Error on retrieving RDS Cluster Instance (%s): %s", d.Id(), err) + } + // A nil response means "not found" + if db == nil { + log.Printf("[WARN] RDS Cluster Instance (%s): not found, removing from state.", d.Id()) + d.SetId("") + return nil + } + + // Retrieve DB Cluster information, to determine if this Instance is a writer + conn := meta.(*AWSClient).rdsconn + resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ + DBClusterIdentifier: db.DBClusterIdentifier, + }) + + var dbc *rds.DBCluster + for _, c := range resp.DBClusters { + if *c.DBClusterIdentifier == *db.DBClusterIdentifier { + dbc = c + } + } + + if dbc == nil { + return fmt.Errorf("[WARN] Error finding RDS Cluster (%s) for Cluster Instance (%s): %s", + *db.DBClusterIdentifier, *db.DBInstanceIdentifier, err) + } + + for _, m := range dbc.DBClusterMembers { + if *db.DBInstanceIdentifier == *m.DBInstanceIdentifier { + if *m.IsClusterWriter == true { + d.Set("writer", true) + } else { + d.Set("writer", false) + } + } + } + + if db.Endpoint != nil { + d.Set("endpoint", db.Endpoint.Address) + d.Set("port", db.Endpoint.Port) + } + + d.Set("publicly_accessible", db.PubliclyAccessible) + d.Set("cluster_identifier", db.DBClusterIdentifier) + d.Set("instance_class", db.DBInstanceClass) + d.Set("identifier", db.DBInstanceIdentifier) + d.Set("storage_encrypted", db.StorageEncrypted) + d.Set("kms_key_id", db.KmsKeyId) + d.Set("auto_minor_version_upgrade", db.AutoMinorVersionUpgrade) + d.Set("promotion_tier", db.PromotionTier) + d.Set("preferred_backup_window", db.PreferredBackupWindow) + d.Set("preferred_maintenance_window", db.PreferredMaintenanceWindow) + + if db.MonitoringInterval != nil { + d.Set("monitoring_interval", db.MonitoringInterval) + } + + if db.MonitoringRoleArn != nil { + d.Set("monitoring_role_arn", db.MonitoringRoleArn) + } + + if len(db.DBParameterGroups) > 0 { + d.Set("db_parameter_group_name", db.DBParameterGroups[0].DBParameterGroupName) + } + + // Fetch and save tags + arn, err := buildRDSARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) + if err != nil { + log.Printf("[DEBUG] Error building ARN for RDS Cluster Instance (%s), not setting Tags", *db.DBInstanceIdentifier) + } else { + if err := saveTagsRDS(conn, d, arn); err != nil { + log.Printf("[WARN] Failed to save tags for RDS Cluster Instance (%s): %s", *db.DBClusterIdentifier, err) + } + } + + return nil +} + +func resourceAwsRDSClusterInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + requestUpdate := false + + req := &rds.ModifyDBInstanceInput{ + ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), + DBInstanceIdentifier: aws.String(d.Id()), + } + + if d.HasChange("db_parameter_group_name") { + req.DBParameterGroupName = aws.String(d.Get("db_parameter_group_name").(string)) + requestUpdate = true + } + + if d.HasChange("instance_class") { + req.DBInstanceClass = aws.String(d.Get("instance_class").(string)) + requestUpdate = true + } + + if d.HasChange("monitoring_role_arn") { + d.SetPartial("monitoring_role_arn") + req.MonitoringRoleArn = aws.String(d.Get("monitoring_role_arn").(string)) + requestUpdate = true + } + + if d.HasChange("preferred_backup_window") { + d.SetPartial("preferred_backup_window") + req.PreferredBackupWindow = aws.String(d.Get("preferred_backup_window").(string)) + requestUpdate = true + } + + if d.HasChange("preferred_maintenance_window") { + d.SetPartial("preferred_maintenance_window") + req.PreferredMaintenanceWindow = aws.String(d.Get("preferred_maintenance_window").(string)) + requestUpdate = true + } + + if d.HasChange("monitoring_interval") { + d.SetPartial("monitoring_interval") + req.MonitoringInterval = aws.Int64(int64(d.Get("monitoring_interval").(int))) + requestUpdate = true + } + + if d.HasChange("auto_minor_version_upgrade") { + d.SetPartial("auto_minor_version_upgrade") + req.AutoMinorVersionUpgrade = aws.Bool(d.Get("auto_minor_version_upgrade").(bool)) + requestUpdate = true + } + + if d.HasChange("promotion_tier") { + d.SetPartial("promotion_tier") + req.PromotionTier = aws.Int64(int64(d.Get("promotion_tier").(int))) + requestUpdate = true + } + + log.Printf("[DEBUG] Send DB Instance Modification request: %#v", requestUpdate) + if requestUpdate { + log.Printf("[DEBUG] DB Instance Modification request: %#v", req) + _, err := conn.ModifyDBInstance(req) + if err != nil { + return fmt.Errorf("Error modifying DB Instance %s: %s", d.Id(), err) + } + + // reuse db_instance refresh func + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "backing-up", "modifying"}, + Target: []string{"available"}, + Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutUpdate), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + } + + if arn, err := buildRDSARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { + if err := setTagsRDS(conn, d, arn); err != nil { + return err + } + } + + return resourceAwsRDSClusterInstanceRead(d, meta) +} + +func resourceAwsRDSClusterInstanceDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + + log.Printf("[DEBUG] RDS Cluster Instance destroy: %v", d.Id()) + + opts := rds.DeleteDBInstanceInput{DBInstanceIdentifier: aws.String(d.Id())} + + log.Printf("[DEBUG] RDS Cluster Instance destroy configuration: %s", opts) + if _, err := conn.DeleteDBInstance(&opts); err != nil { + return err + } + + // re-uses db_instance refresh func + log.Println("[INFO] Waiting for RDS Cluster Instance to be destroyed") + stateConf := &resource.StateChangeConf{ + Pending: []string{"modifying", "deleting"}, + Target: []string{}, + Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), + Timeout: d.Timeout(schema.TimeoutDelete), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + + if _, err := stateConf.WaitForState(); err != nil { + return err + } + + return nil + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster_parameter_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster_parameter_group.go new file mode 100644 index 000000000..61cb20f01 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_rds_cluster_parameter_group.go @@ -0,0 +1,293 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/rds" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsRDSClusterParameterGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRDSClusterParameterGroupCreate, + Read: resourceAwsRDSClusterParameterGroupRead, + Update: resourceAwsRDSClusterParameterGroupUpdate, + Delete: resourceAwsRDSClusterParameterGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: validateDbParamGroupName, + }, + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateDbParamGroupNamePrefix, + }, + "family": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "Managed by Terraform", + }, + "parameter": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: false, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "apply_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "immediate", + // this parameter is not actually state, but a + // meta-parameter describing how the RDS API call + // to modify the parameter group should be made. + // Future reads of the resource from AWS don't tell + // us what we used for apply_method previously, so + // by squashing state to an empty string we avoid + // needing to do an update for every future run. + StateFunc: func(interface{}) string { return "" }, + }, + }, + }, + Set: resourceAwsDbParameterHash, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsRDSClusterParameterGroupCreate(d *schema.ResourceData, meta interface{}) error { + rdsconn := meta.(*AWSClient).rdsconn + tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) + + var groupName string + if v, ok := d.GetOk("name"); ok { + groupName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + groupName = resource.PrefixedUniqueId(v.(string)) + } else { + groupName = resource.UniqueId() + } + + createOpts := rds.CreateDBClusterParameterGroupInput{ + DBClusterParameterGroupName: aws.String(groupName), + DBParameterGroupFamily: aws.String(d.Get("family").(string)), + Description: aws.String(d.Get("description").(string)), + Tags: tags, + } + + log.Printf("[DEBUG] Create DB Cluster Parameter Group: %#v", createOpts) + _, err := rdsconn.CreateDBClusterParameterGroup(&createOpts) + if err != nil { + return fmt.Errorf("Error creating DB Cluster Parameter Group: %s", err) + } + + d.SetId(*createOpts.DBClusterParameterGroupName) + log.Printf("[INFO] DB Cluster Parameter Group ID: %s", d.Id()) + + return resourceAwsRDSClusterParameterGroupUpdate(d, meta) +} + +func resourceAwsRDSClusterParameterGroupRead(d *schema.ResourceData, meta interface{}) error { + rdsconn := meta.(*AWSClient).rdsconn + + describeOpts := rds.DescribeDBClusterParameterGroupsInput{ + DBClusterParameterGroupName: aws.String(d.Id()), + } + + describeResp, err := rdsconn.DescribeDBClusterParameterGroups(&describeOpts) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "DBParameterGroupNotFound" { + log.Printf("[WARN] DB Cluster Parameter Group (%s) not found, error code (404)", d.Id()) + d.SetId("") + return nil + } + + return err + } + + if len(describeResp.DBClusterParameterGroups) != 1 || + *describeResp.DBClusterParameterGroups[0].DBClusterParameterGroupName != d.Id() { + return fmt.Errorf("Unable to find Cluster Parameter Group: %#v", describeResp.DBClusterParameterGroups) + } + + d.Set("name", describeResp.DBClusterParameterGroups[0].DBClusterParameterGroupName) + d.Set("family", describeResp.DBClusterParameterGroups[0].DBParameterGroupFamily) + d.Set("description", describeResp.DBClusterParameterGroups[0].Description) + + // Only include user customized parameters as there's hundreds of system/default ones + describeParametersOpts := rds.DescribeDBClusterParametersInput{ + DBClusterParameterGroupName: aws.String(d.Id()), + Source: aws.String("user"), + } + + describeParametersResp, err := rdsconn.DescribeDBClusterParameters(&describeParametersOpts) + if err != nil { + return err + } + + d.Set("parameter", flattenParameters(describeParametersResp.Parameters)) + + paramGroup := describeResp.DBClusterParameterGroups[0] + arn, err := buildRDSCPGARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) + if err != nil { + name := "" + if paramGroup.DBClusterParameterGroupName != nil && *paramGroup.DBClusterParameterGroupName != "" { + name = *paramGroup.DBClusterParameterGroupName + } + log.Printf("[DEBUG] Error building ARN for DB Cluster Parameter Group, not setting Tags for Cluster Param Group %s", name) + } else { + d.Set("arn", arn) + resp, err := rdsconn.ListTagsForResource(&rds.ListTagsForResourceInput{ + ResourceName: aws.String(arn), + }) + + if err != nil { + log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn) + } + + var dt []*rds.Tag + if len(resp.TagList) > 0 { + dt = resp.TagList + } + d.Set("tags", tagsToMapRDS(dt)) + } + + return nil +} + +func resourceAwsRDSClusterParameterGroupUpdate(d *schema.ResourceData, meta interface{}) error { + rdsconn := meta.(*AWSClient).rdsconn + + d.Partial(true) + + if d.HasChange("parameter") { + o, n := d.GetChange("parameter") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + // Expand the "parameter" set to aws-sdk-go compat []rds.Parameter + parameters, err := expandParameters(ns.Difference(os).List()) + if err != nil { + return err + } + + if len(parameters) > 0 { + modifyOpts := rds.ModifyDBClusterParameterGroupInput{ + DBClusterParameterGroupName: aws.String(d.Get("name").(string)), + Parameters: parameters, + } + + log.Printf("[DEBUG] Modify DB Cluster Parameter Group: %s", modifyOpts) + _, err = rdsconn.ModifyDBClusterParameterGroup(&modifyOpts) + if err != nil { + return fmt.Errorf("Error modifying DB Cluster Parameter Group: %s", err) + } + } + d.SetPartial("parameter") + } + + if arn, err := buildRDSCPGARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { + if err := setTagsRDS(rdsconn, d, arn); err != nil { + return err + } else { + d.SetPartial("tags") + } + } + + d.Partial(false) + + return resourceAwsRDSClusterParameterGroupRead(d, meta) +} + +func resourceAwsRDSClusterParameterGroupDelete(d *schema.ResourceData, meta interface{}) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"destroyed"}, + Refresh: resourceAwsRDSClusterParameterGroupDeleteRefreshFunc(d, meta), + Timeout: 3 * time.Minute, + MinTimeout: 1 * time.Second, + } + _, err := stateConf.WaitForState() + return err +} + +func resourceAwsRDSClusterParameterGroupDeleteRefreshFunc( + d *schema.ResourceData, + meta interface{}) resource.StateRefreshFunc { + rdsconn := meta.(*AWSClient).rdsconn + + return func() (interface{}, string, error) { + + deleteOpts := rds.DeleteDBClusterParameterGroupInput{ + DBClusterParameterGroupName: aws.String(d.Id()), + } + + if _, err := rdsconn.DeleteDBClusterParameterGroup(&deleteOpts); err != nil { + rdserr, ok := err.(awserr.Error) + if !ok { + return d, "error", err + } + + if rdserr.Code() != "DBParameterGroupNotFound" { + return d, "error", err + } + } + + return d, "destroyed", nil + } +} + +func buildRDSCPGARN(identifier, partition, accountid, region string) (string, error) { + if partition == "" { + return "", fmt.Errorf("Unable to construct RDS Cluster ARN because of missing AWS partition") + } + if accountid == "" { + return "", fmt.Errorf("Unable to construct RDS Cluster ARN because of missing AWS Account ID") + } + arn := fmt.Sprintf("arn:%s:rds:%s:%s:cluster-pg:%s", partition, region, accountid, identifier) + return arn, nil + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_cluster.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_cluster.go new file mode 100644 index 000000000..9ab4675c3 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_cluster.go @@ -0,0 +1,963 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/redshift" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsRedshiftCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRedshiftClusterCreate, + Read: resourceAwsRedshiftClusterRead, + Update: resourceAwsRedshiftClusterUpdate, + Delete: resourceAwsRedshiftClusterDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsRedshiftClusterImport, + }, + + Schema: map[string]*schema.Schema{ + "database_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateRedshiftClusterDbName, + }, + + "cluster_identifier": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateRedshiftClusterIdentifier, + }, + "cluster_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "node_type": { + Type: schema.TypeString, + Required: true, + }, + + "master_username": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateRedshiftClusterMasterUsername, + }, + + "master_password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + ValidateFunc: validateRedshiftClusterMasterPassword, + }, + + "cluster_security_groups": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "vpc_security_group_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "cluster_subnet_group_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "availability_zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "preferred_maintenance_window": { + Type: schema.TypeString, + Optional: true, + Computed: true, + StateFunc: func(val interface{}) string { + if val == nil { + return "" + } + return strings.ToLower(val.(string)) + }, + ValidateFunc: validateOnceAWeekWindowFormat, + }, + + "cluster_parameter_group_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "automated_snapshot_retention_period": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(int) + if value > 35 { + es = append(es, fmt.Errorf( + "backup retention period cannot be more than 35 days")) + } + return + }, + }, + + "port": { + Type: schema.TypeInt, + Optional: true, + Default: 5439, + }, + + "cluster_version": { + Type: schema.TypeString, + Optional: true, + Default: "1.0", + }, + + "allow_version_upgrade": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "number_of_nodes": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + }, + + "publicly_accessible": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "encrypted": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "enhanced_vpc_routing": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + + "elastic_ip": { + Type: schema.TypeString, + Optional: true, + }, + + "final_snapshot_identifier": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateRedshiftClusterFinalSnapshotIdentifier, + }, + + "skip_final_snapshot": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "endpoint": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "cluster_public_key": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "cluster_revision_number": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "iam_roles": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "enable_logging": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "bucket_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "s3_key_prefix": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "snapshot_identifier": { + Type: schema.TypeString, + Optional: true, + }, + + "snapshot_cluster_identifier": { + Type: schema.TypeString, + Optional: true, + }, + + "owner_account": { + Type: schema.TypeString, + Optional: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsRedshiftClusterImport( + d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + // Neither skip_final_snapshot nor final_snapshot_identifier can be fetched + // from any API call, so we need to default skip_final_snapshot to true so + // that final_snapshot_identifier is not required + d.Set("skip_final_snapshot", true) + return []*schema.ResourceData{d}, nil +} + +func resourceAwsRedshiftClusterCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + tags := tagsFromMapRedshift(d.Get("tags").(map[string]interface{})) + + if v, ok := d.GetOk("snapshot_identifier"); ok { + restoreOpts := &redshift.RestoreFromClusterSnapshotInput{ + ClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), + SnapshotIdentifier: aws.String(v.(string)), + Port: aws.Int64(int64(d.Get("port").(int))), + AllowVersionUpgrade: aws.Bool(d.Get("allow_version_upgrade").(bool)), + NodeType: aws.String(d.Get("node_type").(string)), + PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), + AutomatedSnapshotRetentionPeriod: aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))), + } + + if v, ok := d.GetOk("owner_account"); ok { + restoreOpts.OwnerAccount = aws.String(v.(string)) + } + + if v, ok := d.GetOk("snapshot_cluster_identifier"); ok { + restoreOpts.SnapshotClusterIdentifier = aws.String(v.(string)) + } + + if v, ok := d.GetOk("availability_zone"); ok { + restoreOpts.AvailabilityZone = aws.String(v.(string)) + } + + if v, ok := d.GetOk("cluster_subnet_group_name"); ok { + restoreOpts.ClusterSubnetGroupName = aws.String(v.(string)) + } + + if v, ok := d.GetOk("cluster_parameter_group_name"); ok { + restoreOpts.ClusterParameterGroupName = aws.String(v.(string)) + } + + if v := d.Get("cluster_security_groups").(*schema.Set); v.Len() > 0 { + restoreOpts.ClusterSecurityGroups = expandStringList(v.List()) + } + + if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { + restoreOpts.VpcSecurityGroupIds = expandStringList(v.List()) + } + + if v, ok := d.GetOk("preferred_maintenance_window"); ok { + restoreOpts.PreferredMaintenanceWindow = aws.String(v.(string)) + } + + if v, ok := d.GetOk("kms_key_id"); ok { + restoreOpts.KmsKeyId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("elastic_ip"); ok { + restoreOpts.ElasticIp = aws.String(v.(string)) + } + + if v, ok := d.GetOk("enhanced_vpc_routing"); ok { + restoreOpts.EnhancedVpcRouting = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("iam_roles"); ok { + restoreOpts.IamRoles = expandStringList(v.(*schema.Set).List()) + } + + log.Printf("[DEBUG] Redshift Cluster restore cluster options: %s", restoreOpts) + + resp, err := conn.RestoreFromClusterSnapshot(restoreOpts) + if err != nil { + log.Printf("[ERROR] Error Restoring Redshift Cluster from Snapshot: %s", err) + return err + } + + d.SetId(*resp.Cluster.ClusterIdentifier) + + } else { + if _, ok := d.GetOk("master_password"); !ok { + return fmt.Errorf(`provider.aws: aws_redshift_cluster: %s: "master_password": required field is not set`, d.Get("cluster_identifier").(string)) + } + + if _, ok := d.GetOk("master_username"); !ok { + return fmt.Errorf(`provider.aws: aws_redshift_cluster: %s: "master_username": required field is not set`, d.Get("cluster_identifier").(string)) + } + + createOpts := &redshift.CreateClusterInput{ + ClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), + Port: aws.Int64(int64(d.Get("port").(int))), + MasterUserPassword: aws.String(d.Get("master_password").(string)), + MasterUsername: aws.String(d.Get("master_username").(string)), + ClusterVersion: aws.String(d.Get("cluster_version").(string)), + NodeType: aws.String(d.Get("node_type").(string)), + DBName: aws.String(d.Get("database_name").(string)), + AllowVersionUpgrade: aws.Bool(d.Get("allow_version_upgrade").(bool)), + PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), + AutomatedSnapshotRetentionPeriod: aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))), + Tags: tags, + } + + if v := d.Get("number_of_nodes").(int); v > 1 { + createOpts.ClusterType = aws.String("multi-node") + createOpts.NumberOfNodes = aws.Int64(int64(d.Get("number_of_nodes").(int))) + } else { + createOpts.ClusterType = aws.String("single-node") + } + + if v := d.Get("cluster_security_groups").(*schema.Set); v.Len() > 0 { + createOpts.ClusterSecurityGroups = expandStringList(v.List()) + } + + if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { + createOpts.VpcSecurityGroupIds = expandStringList(v.List()) + } + + if v, ok := d.GetOk("cluster_subnet_group_name"); ok { + createOpts.ClusterSubnetGroupName = aws.String(v.(string)) + } + + if v, ok := d.GetOk("availability_zone"); ok { + createOpts.AvailabilityZone = aws.String(v.(string)) + } + + if v, ok := d.GetOk("preferred_maintenance_window"); ok { + createOpts.PreferredMaintenanceWindow = aws.String(v.(string)) + } + + if v, ok := d.GetOk("cluster_parameter_group_name"); ok { + createOpts.ClusterParameterGroupName = aws.String(v.(string)) + } + + if v, ok := d.GetOk("encrypted"); ok { + createOpts.Encrypted = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("enhanced_vpc_routing"); ok { + createOpts.EnhancedVpcRouting = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("kms_key_id"); ok { + createOpts.KmsKeyId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("elastic_ip"); ok { + createOpts.ElasticIp = aws.String(v.(string)) + } + + if v, ok := d.GetOk("iam_roles"); ok { + createOpts.IamRoles = expandStringList(v.(*schema.Set).List()) + } + + log.Printf("[DEBUG] Redshift Cluster create options: %s", createOpts) + resp, err := conn.CreateCluster(createOpts) + if err != nil { + log.Printf("[ERROR] Error creating Redshift Cluster: %s", err) + return err + } + + log.Printf("[DEBUG]: Cluster create response: %s", resp) + d.SetId(*resp.Cluster.ClusterIdentifier) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "backing-up", "modifying", "restoring"}, + Target: []string{"available"}, + Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta), + Timeout: 75 * time.Minute, + MinTimeout: 10 * time.Second, + } + + _, err := stateConf.WaitForState() + if err != nil { + return fmt.Errorf("[WARN] Error waiting for Redshift Cluster state to be \"available\": %s", err) + } + + if _, ok := d.GetOk("enable_logging"); ok { + + loggingErr := enableRedshiftClusterLogging(d, conn) + if loggingErr != nil { + log.Printf("[ERROR] Error Enabling Logging on Redshift Cluster: %s", err) + return loggingErr + } + + } + + return resourceAwsRedshiftClusterRead(d, meta) +} + +func resourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + + log.Printf("[INFO] Reading Redshift Cluster Information: %s", d.Id()) + resp, err := conn.DescribeClusters(&redshift.DescribeClustersInput{ + ClusterIdentifier: aws.String(d.Id()), + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if "ClusterNotFound" == awsErr.Code() { + d.SetId("") + log.Printf("[DEBUG] Redshift Cluster (%s) not found", d.Id()) + return nil + } + } + log.Printf("[DEBUG] Error describing Redshift Cluster (%s)", d.Id()) + return err + } + + var rsc *redshift.Cluster + for _, c := range resp.Clusters { + if *c.ClusterIdentifier == d.Id() { + rsc = c + } + } + + if rsc == nil { + log.Printf("[WARN] Redshift Cluster (%s) not found", d.Id()) + d.SetId("") + return nil + } + + log.Printf("[INFO] Reading Redshift Cluster Logging Status: %s", d.Id()) + loggingStatus, loggingErr := conn.DescribeLoggingStatus(&redshift.DescribeLoggingStatusInput{ + ClusterIdentifier: aws.String(d.Id()), + }) + + if loggingErr != nil { + return loggingErr + } + + d.Set("master_username", rsc.MasterUsername) + d.Set("node_type", rsc.NodeType) + d.Set("allow_version_upgrade", rsc.AllowVersionUpgrade) + d.Set("database_name", rsc.DBName) + d.Set("cluster_identifier", rsc.ClusterIdentifier) + d.Set("cluster_version", rsc.ClusterVersion) + + d.Set("cluster_subnet_group_name", rsc.ClusterSubnetGroupName) + d.Set("availability_zone", rsc.AvailabilityZone) + d.Set("encrypted", rsc.Encrypted) + d.Set("enhanced_vpc_routing", rsc.EnhancedVpcRouting) + d.Set("kms_key_id", rsc.KmsKeyId) + d.Set("automated_snapshot_retention_period", rsc.AutomatedSnapshotRetentionPeriod) + d.Set("preferred_maintenance_window", rsc.PreferredMaintenanceWindow) + if rsc.Endpoint != nil && rsc.Endpoint.Address != nil { + endpoint := *rsc.Endpoint.Address + if rsc.Endpoint.Port != nil { + endpoint = fmt.Sprintf("%s:%d", endpoint, *rsc.Endpoint.Port) + } + d.Set("port", rsc.Endpoint.Port) + d.Set("endpoint", endpoint) + } + d.Set("cluster_parameter_group_name", rsc.ClusterParameterGroups[0].ParameterGroupName) + if len(rsc.ClusterNodes) > 1 { + d.Set("cluster_type", "multi-node") + } else { + d.Set("cluster_type", "single-node") + } + d.Set("number_of_nodes", rsc.NumberOfNodes) + d.Set("publicly_accessible", rsc.PubliclyAccessible) + + var vpcg []string + for _, g := range rsc.VpcSecurityGroups { + vpcg = append(vpcg, *g.VpcSecurityGroupId) + } + if err := d.Set("vpc_security_group_ids", vpcg); err != nil { + return fmt.Errorf("[DEBUG] Error saving VPC Security Group IDs to state for Redshift Cluster (%s): %s", d.Id(), err) + } + + var csg []string + for _, g := range rsc.ClusterSecurityGroups { + csg = append(csg, *g.ClusterSecurityGroupName) + } + if err := d.Set("cluster_security_groups", csg); err != nil { + return fmt.Errorf("[DEBUG] Error saving Cluster Security Group Names to state for Redshift Cluster (%s): %s", d.Id(), err) + } + + var iamRoles []string + for _, i := range rsc.IamRoles { + iamRoles = append(iamRoles, *i.IamRoleArn) + } + if err := d.Set("iam_roles", iamRoles); err != nil { + return fmt.Errorf("[DEBUG] Error saving IAM Roles to state for Redshift Cluster (%s): %s", d.Id(), err) + } + + d.Set("cluster_public_key", rsc.ClusterPublicKey) + d.Set("cluster_revision_number", rsc.ClusterRevisionNumber) + d.Set("tags", tagsToMapRedshift(rsc.Tags)) + + d.Set("bucket_name", loggingStatus.BucketName) + d.Set("enable_logging", loggingStatus.LoggingEnabled) + d.Set("s3_key_prefix", loggingStatus.S3KeyPrefix) + + return nil +} + +func resourceAwsRedshiftClusterUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + d.Partial(true) + + arn, tagErr := buildRedshiftARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) + if tagErr != nil { + return fmt.Errorf("Error building ARN for Redshift Cluster, not updating Tags for cluster %s", d.Id()) + } else { + if tagErr := setTagsRedshift(conn, d, arn); tagErr != nil { + return tagErr + } else { + d.SetPartial("tags") + } + } + + requestUpdate := false + log.Printf("[INFO] Building Redshift Modify Cluster Options") + req := &redshift.ModifyClusterInput{ + ClusterIdentifier: aws.String(d.Id()), + } + + if d.HasChange("cluster_type") { + req.ClusterType = aws.String(d.Get("cluster_type").(string)) + requestUpdate = true + } + + if d.HasChange("node_type") { + req.NodeType = aws.String(d.Get("node_type").(string)) + requestUpdate = true + } + + if d.HasChange("number_of_nodes") { + if v := d.Get("number_of_nodes").(int); v > 1 { + req.ClusterType = aws.String("multi-node") + req.NumberOfNodes = aws.Int64(int64(d.Get("number_of_nodes").(int))) + } else { + req.ClusterType = aws.String("single-node") + } + + req.NodeType = aws.String(d.Get("node_type").(string)) + requestUpdate = true + } + + if d.HasChange("cluster_security_groups") { + req.ClusterSecurityGroups = expandStringList(d.Get("cluster_security_groups").(*schema.Set).List()) + requestUpdate = true + } + + if d.HasChange("vpc_security_group_ids") { + req.VpcSecurityGroupIds = expandStringList(d.Get("vpc_security_group_ids").(*schema.Set).List()) + requestUpdate = true + } + + if d.HasChange("master_password") { + req.MasterUserPassword = aws.String(d.Get("master_password").(string)) + requestUpdate = true + } + + if d.HasChange("cluster_parameter_group_name") { + req.ClusterParameterGroupName = aws.String(d.Get("cluster_parameter_group_name").(string)) + requestUpdate = true + } + + if d.HasChange("automated_snapshot_retention_period") { + req.AutomatedSnapshotRetentionPeriod = aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))) + requestUpdate = true + } + + if d.HasChange("preferred_maintenance_window") { + req.PreferredMaintenanceWindow = aws.String(d.Get("preferred_maintenance_window").(string)) + requestUpdate = true + } + + if d.HasChange("cluster_version") { + req.ClusterVersion = aws.String(d.Get("cluster_version").(string)) + requestUpdate = true + } + + if d.HasChange("allow_version_upgrade") { + req.AllowVersionUpgrade = aws.Bool(d.Get("allow_version_upgrade").(bool)) + requestUpdate = true + } + + if d.HasChange("publicly_accessible") { + req.PubliclyAccessible = aws.Bool(d.Get("publicly_accessible").(bool)) + requestUpdate = true + } + + if d.HasChange("enhanced_vpc_routing") { + req.EnhancedVpcRouting = aws.Bool(d.Get("enhanced_vpc_routing").(bool)) + requestUpdate = true + } + + if requestUpdate { + log.Printf("[INFO] Modifying Redshift Cluster: %s", d.Id()) + log.Printf("[DEBUG] Redshift Cluster Modify options: %s", req) + _, err := conn.ModifyCluster(req) + if err != nil { + return fmt.Errorf("[WARN] Error modifying Redshift Cluster (%s): %s", d.Id(), err) + } + } + + if d.HasChange("iam_roles") { + o, n := d.GetChange("iam_roles") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + removeIams := os.Difference(ns).List() + addIams := ns.Difference(os).List() + + log.Printf("[INFO] Building Redshift Modify Cluster IAM Role Options") + req := &redshift.ModifyClusterIamRolesInput{ + ClusterIdentifier: aws.String(d.Id()), + AddIamRoles: expandStringList(addIams), + RemoveIamRoles: expandStringList(removeIams), + } + + log.Printf("[INFO] Modifying Redshift Cluster IAM Roles: %s", d.Id()) + log.Printf("[DEBUG] Redshift Cluster Modify IAM Role options: %s", req) + _, err := conn.ModifyClusterIamRoles(req) + if err != nil { + return fmt.Errorf("[WARN] Error modifying Redshift Cluster IAM Roles (%s): %s", d.Id(), err) + } + + d.SetPartial("iam_roles") + } + + if requestUpdate || d.HasChange("iam_roles") { + + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "deleting", "rebooting", "resizing", "renaming", "modifying"}, + Target: []string{"available"}, + Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta), + Timeout: 40 * time.Minute, + MinTimeout: 10 * time.Second, + } + + // Wait, catching any errors + _, err := stateConf.WaitForState() + if err != nil { + return fmt.Errorf("[WARN] Error Modifying Redshift Cluster (%s): %s", d.Id(), err) + } + } + + if d.HasChange("enable_logging") || d.HasChange("bucket_name") || d.HasChange("s3_key_prefix") { + var loggingErr error + if _, ok := d.GetOk("enable_logging"); ok { + + log.Printf("[INFO] Enabling Logging for Redshift Cluster %q", d.Id()) + loggingErr = enableRedshiftClusterLogging(d, conn) + if loggingErr != nil { + return loggingErr + } + } else { + + log.Printf("[INFO] Disabling Logging for Redshift Cluster %q", d.Id()) + _, loggingErr = conn.DisableLogging(&redshift.DisableLoggingInput{ + ClusterIdentifier: aws.String(d.Id()), + }) + if loggingErr != nil { + return loggingErr + } + } + + d.SetPartial("enable_logging") + } + + d.Partial(false) + + return resourceAwsRedshiftClusterRead(d, meta) +} + +func enableRedshiftClusterLogging(d *schema.ResourceData, conn *redshift.Redshift) error { + if _, ok := d.GetOk("bucket_name"); !ok { + return fmt.Errorf("bucket_name must be set when enabling logging for Redshift Clusters") + } + + params := &redshift.EnableLoggingInput{ + ClusterIdentifier: aws.String(d.Id()), + BucketName: aws.String(d.Get("bucket_name").(string)), + } + + if v, ok := d.GetOk("s3_key_prefix"); ok { + params.S3KeyPrefix = aws.String(v.(string)) + } + + _, loggingErr := conn.EnableLogging(params) + if loggingErr != nil { + log.Printf("[ERROR] Error Enabling Logging on Redshift Cluster: %s", loggingErr) + return loggingErr + } + return nil +} + +func resourceAwsRedshiftClusterDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + log.Printf("[DEBUG] Destroying Redshift Cluster (%s)", d.Id()) + + deleteOpts := redshift.DeleteClusterInput{ + ClusterIdentifier: aws.String(d.Id()), + } + + skipFinalSnapshot := d.Get("skip_final_snapshot").(bool) + deleteOpts.SkipFinalClusterSnapshot = aws.Bool(skipFinalSnapshot) + + if skipFinalSnapshot == false { + if name, present := d.GetOk("final_snapshot_identifier"); present { + deleteOpts.FinalClusterSnapshotIdentifier = aws.String(name.(string)) + } else { + return fmt.Errorf("Redshift Cluster Instance FinalSnapshotIdentifier is required when a final snapshot is required") + } + } + + log.Printf("[DEBUG] Redshift Cluster delete options: %s", deleteOpts) + err := resource.Retry(15*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteCluster(&deleteOpts) + awsErr, ok := err.(awserr.Error) + if ok && awsErr.Code() == "InvalidClusterState" { + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + }) + + if err != nil { + return fmt.Errorf("[ERROR] Error deleting Redshift Cluster (%s): %s", d.Id(), err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"available", "creating", "deleting", "rebooting", "resizing", "renaming", "final-snapshot"}, + Target: []string{"destroyed"}, + Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta), + Timeout: 40 * time.Minute, + MinTimeout: 5 * time.Second, + } + + // Wait, catching any errors + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("[ERROR] Error deleting Redshift Cluster (%s): %s", d.Id(), err) + } + + log.Printf("[INFO] Redshift Cluster %s successfully deleted", d.Id()) + + return nil +} + +func resourceAwsRedshiftClusterStateRefreshFunc(d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + conn := meta.(*AWSClient).redshiftconn + + log.Printf("[INFO] Reading Redshift Cluster Information: %s", d.Id()) + resp, err := conn.DescribeClusters(&redshift.DescribeClustersInput{ + ClusterIdentifier: aws.String(d.Id()), + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if "ClusterNotFound" == awsErr.Code() { + return 42, "destroyed", nil + } + } + log.Printf("[WARN] Error on retrieving Redshift Cluster (%s) when waiting: %s", d.Id(), err) + return nil, "", err + } + + var rsc *redshift.Cluster + + for _, c := range resp.Clusters { + if *c.ClusterIdentifier == d.Id() { + rsc = c + } + } + + if rsc == nil { + return 42, "destroyed", nil + } + + if rsc.ClusterStatus != nil { + log.Printf("[DEBUG] Redshift Cluster status (%s): %s", d.Id(), *rsc.ClusterStatus) + } + + return rsc, *rsc.ClusterStatus, nil + } +} + +func validateRedshiftClusterIdentifier(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only lowercase alphanumeric characters and hyphens allowed in %q", k)) + } + if !regexp.MustCompile(`^[a-z]`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "first character of %q must be a letter", k)) + } + if regexp.MustCompile(`--`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot contain two consecutive hyphens", k)) + } + if regexp.MustCompile(`-$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot end with a hyphen", k)) + } + return +} + +func validateRedshiftClusterDbName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9a-z_$]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only lowercase alphanumeric characters, underscores, and dollar signs are allowed in %q", k)) + } + if !regexp.MustCompile(`^[a-zA-Z_]`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "first character of %q must be a letter or underscore", k)) + } + if len(value) > 64 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 64 characters: %q", k, value)) + } + if value == "" { + errors = append(errors, fmt.Errorf( + "%q cannot be an empty string", k)) + } + + return +} + +func validateRedshiftClusterFinalSnapshotIdentifier(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only alphanumeric characters and hyphens allowed in %q", k)) + } + if regexp.MustCompile(`--`).MatchString(value) { + errors = append(errors, fmt.Errorf("%q cannot contain two consecutive hyphens", k)) + } + if regexp.MustCompile(`-$`).MatchString(value) { + errors = append(errors, fmt.Errorf("%q cannot end in a hyphen", k)) + } + if len(value) > 255 { + errors = append(errors, fmt.Errorf("%q cannot be more than 255 characters", k)) + } + return +} + +func validateRedshiftClusterMasterUsername(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^\w+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only alphanumeric characters in %q", k)) + } + if !regexp.MustCompile(`^[A-Za-z]`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "first character of %q must be a letter", k)) + } + if len(value) > 128 { + errors = append(errors, fmt.Errorf("%q cannot be more than 128 characters", k)) + } + return +} + +func validateRedshiftClusterMasterPassword(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^.*[a-z].*`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must contain at least one lowercase letter", k)) + } + if !regexp.MustCompile(`^.*[A-Z].*`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must contain at least one uppercase letter", k)) + } + if !regexp.MustCompile(`^.*[0-9].*`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must contain at least one number", k)) + } + if !regexp.MustCompile(`^[^\@\/'" ]*$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot contain [/@\"' ]", k)) + } + if len(value) < 8 { + errors = append(errors, fmt.Errorf("%q must be at least 8 characters", k)) + } + return +} + +func buildRedshiftARN(identifier, partition, accountid, region string) (string, error) { + if partition == "" { + return "", fmt.Errorf("Unable to construct cluster ARN because of missing AWS partition") + } + if accountid == "" { + return "", fmt.Errorf("Unable to construct cluster ARN because of missing AWS Account ID") + } + arn := fmt.Sprintf("arn:%s:redshift:%s:%s:cluster:%s", partition, region, accountid, identifier) + return arn, nil + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_parameter_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_parameter_group.go new file mode 100644 index 000000000..e94ab8d78 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_parameter_group.go @@ -0,0 +1,242 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/redshift" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsRedshiftParameterGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRedshiftParameterGroupCreate, + Read: resourceAwsRedshiftParameterGroupRead, + Update: resourceAwsRedshiftParameterGroupUpdate, + Delete: resourceAwsRedshiftParameterGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + ValidateFunc: validateRedshiftParamGroupName, + }, + + "family": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "Managed by Terraform", + }, + + "parameter": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: false, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + Set: resourceAwsRedshiftParameterHash, + }, + }, + } +} + +func resourceAwsRedshiftParameterGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + + createOpts := redshift.CreateClusterParameterGroupInput{ + ParameterGroupName: aws.String(d.Get("name").(string)), + ParameterGroupFamily: aws.String(d.Get("family").(string)), + Description: aws.String(d.Get("description").(string)), + } + + log.Printf("[DEBUG] Create Redshift Parameter Group: %#v", createOpts) + _, err := conn.CreateClusterParameterGroup(&createOpts) + if err != nil { + return fmt.Errorf("Error creating Redshift Parameter Group: %s", err) + } + + d.SetId(*createOpts.ParameterGroupName) + log.Printf("[INFO] Redshift Parameter Group ID: %s", d.Id()) + + return resourceAwsRedshiftParameterGroupUpdate(d, meta) +} + +func resourceAwsRedshiftParameterGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + + describeOpts := redshift.DescribeClusterParameterGroupsInput{ + ParameterGroupName: aws.String(d.Id()), + } + + describeResp, err := conn.DescribeClusterParameterGroups(&describeOpts) + if err != nil { + return err + } + + if len(describeResp.ParameterGroups) != 1 || + *describeResp.ParameterGroups[0].ParameterGroupName != d.Id() { + d.SetId("") + return fmt.Errorf("Unable to find Parameter Group: %#v", describeResp.ParameterGroups) + } + + d.Set("name", describeResp.ParameterGroups[0].ParameterGroupName) + d.Set("family", describeResp.ParameterGroups[0].ParameterGroupFamily) + d.Set("description", describeResp.ParameterGroups[0].Description) + + describeParametersOpts := redshift.DescribeClusterParametersInput{ + ParameterGroupName: aws.String(d.Id()), + Source: aws.String("user"), + } + + describeParametersResp, err := conn.DescribeClusterParameters(&describeParametersOpts) + if err != nil { + return err + } + + d.Set("parameter", flattenRedshiftParameters(describeParametersResp.Parameters)) + return nil +} + +func resourceAwsRedshiftParameterGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + + d.Partial(true) + + if d.HasChange("parameter") { + o, n := d.GetChange("parameter") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + // Expand the "parameter" set to aws-sdk-go compat []redshift.Parameter + parameters, err := expandRedshiftParameters(ns.Difference(os).List()) + if err != nil { + return err + } + + if len(parameters) > 0 { + modifyOpts := redshift.ModifyClusterParameterGroupInput{ + ParameterGroupName: aws.String(d.Get("name").(string)), + Parameters: parameters, + } + + log.Printf("[DEBUG] Modify Redshift Parameter Group: %s", modifyOpts) + _, err = conn.ModifyClusterParameterGroup(&modifyOpts) + if err != nil { + return fmt.Errorf("Error modifying Redshift Parameter Group: %s", err) + } + } + d.SetPartial("parameter") + } + + d.Partial(false) + return resourceAwsRedshiftParameterGroupRead(d, meta) +} + +func resourceAwsRedshiftParameterGroupDelete(d *schema.ResourceData, meta interface{}) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"destroyed"}, + Refresh: resourceAwsRedshiftParameterGroupDeleteRefreshFunc(d, meta), + Timeout: 3 * time.Minute, + MinTimeout: 1 * time.Second, + } + _, err := stateConf.WaitForState() + return err +} + +func resourceAwsRedshiftParameterGroupDeleteRefreshFunc( + d *schema.ResourceData, + meta interface{}) resource.StateRefreshFunc { + conn := meta.(*AWSClient).redshiftconn + + return func() (interface{}, string, error) { + + deleteOpts := redshift.DeleteClusterParameterGroupInput{ + ParameterGroupName: aws.String(d.Id()), + } + + if _, err := conn.DeleteClusterParameterGroup(&deleteOpts); err != nil { + redshiftErr, ok := err.(awserr.Error) + if !ok { + return d, "error", err + } + + if redshiftErr.Code() != "RedshiftParameterGroupNotFoundFault" { + return d, "error", err + } + } + + return d, "destroyed", nil + } +} + +func resourceAwsRedshiftParameterHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + // Store the value as a lower case string, to match how we store them in flattenParameters + buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["value"].(string)))) + + return hashcode.String(buf.String()) +} + +func validateRedshiftParamGroupName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only lowercase alphanumeric characters and hyphens allowed in %q", k)) + } + if !regexp.MustCompile(`^[a-z]`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "first character of %q must be a letter", k)) + } + if regexp.MustCompile(`--`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot contain two consecutive hyphens", k)) + } + if regexp.MustCompile(`-$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot end with a hyphen", k)) + } + if len(value) > 255 { + errors = append(errors, fmt.Errorf( + "%q cannot be greater than 255 characters", k)) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_security_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_security_group.go new file mode 100644 index 000000000..24a45bfde --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_security_group.go @@ -0,0 +1,400 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/redshift" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsRedshiftSecurityGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRedshiftSecurityGroupCreate, + Read: resourceAwsRedshiftSecurityGroupRead, + Update: resourceAwsRedshiftSecurityGroupUpdate, + Delete: resourceAwsRedshiftSecurityGroupDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsRedshiftClusterImport, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateRedshiftSecurityGroupName, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "Managed by Terraform", + }, + + "ingress": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "security_group_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "security_group_owner_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + Set: resourceAwsRedshiftSecurityGroupIngressHash, + }, + }, + } +} + +func resourceAwsRedshiftSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + + var err error + var errs []error + + name := d.Get("name").(string) + desc := d.Get("description").(string) + sgInput := &redshift.CreateClusterSecurityGroupInput{ + ClusterSecurityGroupName: aws.String(name), + Description: aws.String(desc), + } + log.Printf("[DEBUG] Redshift security group create: name: %s, description: %s", name, desc) + _, err = conn.CreateClusterSecurityGroup(sgInput) + if err != nil { + return fmt.Errorf("Error creating RedshiftSecurityGroup: %s", err) + } + + d.SetId(d.Get("name").(string)) + + log.Printf("[INFO] Redshift Security Group ID: %s", d.Id()) + sg, err := resourceAwsRedshiftSecurityGroupRetrieve(d, meta) + if err != nil { + return err + } + + ingresses := d.Get("ingress").(*schema.Set) + for _, ing := range ingresses.List() { + err := resourceAwsRedshiftSecurityGroupAuthorizeRule(ing, *sg.ClusterSecurityGroupName, conn) + if err != nil { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return &multierror.Error{Errors: errs} + } + + log.Println("[INFO] Waiting for Redshift Security Group Ingress Authorizations to be authorized") + stateConf := &resource.StateChangeConf{ + Pending: []string{"authorizing"}, + Target: []string{"authorized"}, + Refresh: resourceAwsRedshiftSecurityGroupStateRefreshFunc(d, meta), + Timeout: 10 * time.Minute, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + return resourceAwsRedshiftSecurityGroupRead(d, meta) +} + +func resourceAwsRedshiftSecurityGroupRead(d *schema.ResourceData, meta interface{}) error { + sg, err := resourceAwsRedshiftSecurityGroupRetrieve(d, meta) + if err != nil { + return err + } + + rules := &schema.Set{ + F: resourceAwsRedshiftSecurityGroupIngressHash, + } + + for _, v := range sg.IPRanges { + rule := map[string]interface{}{"cidr": *v.CIDRIP} + rules.Add(rule) + } + + for _, g := range sg.EC2SecurityGroups { + rule := map[string]interface{}{ + "security_group_name": *g.EC2SecurityGroupName, + "security_group_owner_id": *g.EC2SecurityGroupOwnerId, + } + rules.Add(rule) + } + + d.Set("ingress", rules) + d.Set("name", *sg.ClusterSecurityGroupName) + d.Set("description", *sg.Description) + + return nil +} + +func resourceAwsRedshiftSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + + if d.HasChange("ingress") { + o, n := d.GetChange("ingress") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + removeIngressRules, err := expandRedshiftSGRevokeIngress(os.Difference(ns).List()) + if err != nil { + return err + } + if len(removeIngressRules) > 0 { + for _, r := range removeIngressRules { + r.ClusterSecurityGroupName = aws.String(d.Id()) + + _, err := conn.RevokeClusterSecurityGroupIngress(&r) + if err != nil { + return err + } + } + } + + addIngressRules, err := expandRedshiftSGAuthorizeIngress(ns.Difference(os).List()) + if err != nil { + return err + } + if len(addIngressRules) > 0 { + for _, r := range addIngressRules { + r.ClusterSecurityGroupName = aws.String(d.Id()) + + _, err := conn.AuthorizeClusterSecurityGroupIngress(&r) + if err != nil { + return err + } + } + } + + } + return resourceAwsRedshiftSecurityGroupRead(d, meta) +} + +func resourceAwsRedshiftSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + + log.Printf("[DEBUG] Redshift Security Group destroy: %v", d.Id()) + opts := redshift.DeleteClusterSecurityGroupInput{ + ClusterSecurityGroupName: aws.String(d.Id()), + } + + log.Printf("[DEBUG] Redshift Security Group destroy configuration: %v", opts) + _, err := conn.DeleteClusterSecurityGroup(&opts) + + if err != nil { + newerr, ok := err.(awserr.Error) + if ok && newerr.Code() == "InvalidRedshiftSecurityGroup.NotFound" { + return nil + } + return err + } + + return nil +} + +func resourceAwsRedshiftSecurityGroupRetrieve(d *schema.ResourceData, meta interface{}) (*redshift.ClusterSecurityGroup, error) { + conn := meta.(*AWSClient).redshiftconn + + opts := redshift.DescribeClusterSecurityGroupsInput{ + ClusterSecurityGroupName: aws.String(d.Id()), + } + + log.Printf("[DEBUG] Redshift Security Group describe configuration: %#v", opts) + + resp, err := conn.DescribeClusterSecurityGroups(&opts) + + if err != nil { + return nil, fmt.Errorf("Error retrieving Redshift Security Groups: %s", err) + } + + if len(resp.ClusterSecurityGroups) != 1 || + *resp.ClusterSecurityGroups[0].ClusterSecurityGroupName != d.Id() { + return nil, fmt.Errorf("Unable to find Redshift Security Group: %#v", resp.ClusterSecurityGroups) + } + + return resp.ClusterSecurityGroups[0], nil +} + +func validateRedshiftSecurityGroupName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value == "default" { + errors = append(errors, fmt.Errorf("the Redshift Security Group name cannot be %q", value)) + } + if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only lowercase alphanumeric characters and hyphens allowed in %q: %q", + k, value)) + } + if len(value) > 255 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 32 characters: %q", k, value)) + } + return + +} + +func resourceAwsRedshiftSecurityGroupIngressHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + if v, ok := m["cidr"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["security_group_name"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["security_group_owner_id"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + return hashcode.String(buf.String()) +} + +func resourceAwsRedshiftSecurityGroupAuthorizeRule(ingress interface{}, redshiftSecurityGroupName string, conn *redshift.Redshift) error { + ing := ingress.(map[string]interface{}) + + opts := redshift.AuthorizeClusterSecurityGroupIngressInput{ + ClusterSecurityGroupName: aws.String(redshiftSecurityGroupName), + } + + if attr, ok := ing["cidr"]; ok && attr != "" { + opts.CIDRIP = aws.String(attr.(string)) + } + + if attr, ok := ing["security_group_name"]; ok && attr != "" { + opts.EC2SecurityGroupName = aws.String(attr.(string)) + } + + if attr, ok := ing["security_group_owner_id"]; ok && attr != "" { + opts.EC2SecurityGroupOwnerId = aws.String(attr.(string)) + } + + log.Printf("[DEBUG] Authorize ingress rule configuration: %#v", opts) + _, err := conn.AuthorizeClusterSecurityGroupIngress(&opts) + + if err != nil { + return fmt.Errorf("Error authorizing security group ingress: %s", err) + } + + return nil +} + +func resourceAwsRedshiftSecurityGroupStateRefreshFunc( + d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + v, err := resourceAwsRedshiftSecurityGroupRetrieve(d, meta) + + if err != nil { + log.Printf("Error on retrieving Redshift Security Group when waiting: %s", err) + return nil, "", err + } + + statuses := make([]string, 0, len(v.EC2SecurityGroups)+len(v.IPRanges)) + for _, ec2g := range v.EC2SecurityGroups { + statuses = append(statuses, *ec2g.Status) + } + for _, ips := range v.IPRanges { + statuses = append(statuses, *ips.Status) + } + + for _, stat := range statuses { + // Not done + if stat != "authorized" { + return nil, "authorizing", nil + } + } + + return v, "authorized", nil + } +} + +func expandRedshiftSGAuthorizeIngress(configured []interface{}) ([]redshift.AuthorizeClusterSecurityGroupIngressInput, error) { + var ingress []redshift.AuthorizeClusterSecurityGroupIngressInput + + // Loop over our configured parameters and create + // an array of aws-sdk-go compatible objects + for _, pRaw := range configured { + data := pRaw.(map[string]interface{}) + + i := redshift.AuthorizeClusterSecurityGroupIngressInput{} + + if v, ok := data["cidr"]; ok { + i.CIDRIP = aws.String(v.(string)) + } + + if v, ok := data["security_group_name"]; ok { + i.EC2SecurityGroupName = aws.String(v.(string)) + } + + if v, ok := data["security_group_owner_id"]; ok { + i.EC2SecurityGroupOwnerId = aws.String(v.(string)) + } + + ingress = append(ingress, i) + } + + return ingress, nil +} + +func expandRedshiftSGRevokeIngress(configured []interface{}) ([]redshift.RevokeClusterSecurityGroupIngressInput, error) { + var ingress []redshift.RevokeClusterSecurityGroupIngressInput + + // Loop over our configured parameters and create + // an array of aws-sdk-go compatible objects + for _, pRaw := range configured { + data := pRaw.(map[string]interface{}) + + i := redshift.RevokeClusterSecurityGroupIngressInput{} + + if v, ok := data["cidr"]; ok { + i.CIDRIP = aws.String(v.(string)) + } + + if v, ok := data["security_group_name"]; ok { + i.EC2SecurityGroupName = aws.String(v.(string)) + } + + if v, ok := data["security_group_owner_id"]; ok { + i.EC2SecurityGroupOwnerId = aws.String(v.(string)) + } + + ingress = append(ingress, i) + } + + return ingress, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_subnet_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_subnet_group.go new file mode 100644 index 000000000..118abffe4 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_redshift_subnet_group.go @@ -0,0 +1,220 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/redshift" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsRedshiftSubnetGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRedshiftSubnetGroupCreate, + Read: resourceAwsRedshiftSubnetGroupRead, + Update: resourceAwsRedshiftSubnetGroupUpdate, + Delete: resourceAwsRedshiftSubnetGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + ValidateFunc: validateRedshiftSubnetGroupName, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Default: "Managed by Terraform", + }, + + "subnet_ids": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsRedshiftSubnetGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + + subnetIdsSet := d.Get("subnet_ids").(*schema.Set) + subnetIds := make([]*string, subnetIdsSet.Len()) + for i, subnetId := range subnetIdsSet.List() { + subnetIds[i] = aws.String(subnetId.(string)) + } + tags := tagsFromMapRedshift(d.Get("tags").(map[string]interface{})) + + createOpts := redshift.CreateClusterSubnetGroupInput{ + ClusterSubnetGroupName: aws.String(d.Get("name").(string)), + Description: aws.String(d.Get("description").(string)), + SubnetIds: subnetIds, + Tags: tags, + } + + log.Printf("[DEBUG] Create Redshift Subnet Group: %#v", createOpts) + _, err := conn.CreateClusterSubnetGroup(&createOpts) + if err != nil { + return fmt.Errorf("Error creating Redshift Subnet Group: %s", err) + } + + d.SetId(*createOpts.ClusterSubnetGroupName) + log.Printf("[INFO] Redshift Subnet Group ID: %s", d.Id()) + return resourceAwsRedshiftSubnetGroupRead(d, meta) +} + +func resourceAwsRedshiftSubnetGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + + describeOpts := redshift.DescribeClusterSubnetGroupsInput{ + ClusterSubnetGroupName: aws.String(d.Id()), + } + + describeResp, err := conn.DescribeClusterSubnetGroups(&describeOpts) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "ClusterSubnetGroupNotFoundFault" { + log.Printf("[INFO] Redshift Subnet Group: %s was not found", d.Id()) + d.SetId("") + return nil + } + return err + } + + if len(describeResp.ClusterSubnetGroups) == 0 { + return fmt.Errorf("Unable to find Redshift Subnet Group: %#v", describeResp.ClusterSubnetGroups) + } + + d.Set("name", d.Id()) + d.Set("description", describeResp.ClusterSubnetGroups[0].Description) + d.Set("subnet_ids", subnetIdsToSlice(describeResp.ClusterSubnetGroups[0].Subnets)) + if err := d.Set("tags", tagsToMapRedshift(describeResp.ClusterSubnetGroups[0].Tags)); err != nil { + return fmt.Errorf("[DEBUG] Error setting Redshift Subnet Group Tags: %#v", err) + } + + return nil +} + +func resourceAwsRedshiftSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + + arn, tagErr := buildRedshiftSubnetGroupARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) + if tagErr != nil { + return fmt.Errorf("Error building ARN for Redshift Subnet Group, not updating Tags for Subnet Group %s", d.Id()) + } else { + if tagErr := setTagsRedshift(conn, d, arn); tagErr != nil { + return tagErr + } + } + + if d.HasChange("subnet_ids") || d.HasChange("description") { + _, n := d.GetChange("subnet_ids") + if n == nil { + n = new(schema.Set) + } + ns := n.(*schema.Set) + + var sIds []*string + for _, s := range ns.List() { + sIds = append(sIds, aws.String(s.(string))) + } + + _, err := conn.ModifyClusterSubnetGroup(&redshift.ModifyClusterSubnetGroupInput{ + ClusterSubnetGroupName: aws.String(d.Id()), + Description: aws.String(d.Get("description").(string)), + SubnetIds: sIds, + }) + + if err != nil { + return err + } + } + + return nil +} + +func resourceAwsRedshiftSubnetGroupDelete(d *schema.ResourceData, meta interface{}) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"destroyed"}, + Refresh: resourceAwsRedshiftSubnetGroupDeleteRefreshFunc(d, meta), + Timeout: 3 * time.Minute, + MinTimeout: 1 * time.Second, + } + _, err := stateConf.WaitForState() + return err +} + +func resourceAwsRedshiftSubnetGroupDeleteRefreshFunc(d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { + conn := meta.(*AWSClient).redshiftconn + + return func() (interface{}, string, error) { + + deleteOpts := redshift.DeleteClusterSubnetGroupInput{ + ClusterSubnetGroupName: aws.String(d.Id()), + } + + if _, err := conn.DeleteClusterSubnetGroup(&deleteOpts); err != nil { + redshiftErr, ok := err.(awserr.Error) + if !ok { + return d, "error", err + } + + if redshiftErr.Code() != "ClusterSubnetGroupNotFoundFault" { + return d, "error", err + } + } + + return d, "destroyed", nil + } +} + +func subnetIdsToSlice(subnetIds []*redshift.Subnet) []string { + subnetsSlice := make([]string, 0, len(subnetIds)) + for _, s := range subnetIds { + subnetsSlice = append(subnetsSlice, *s.SubnetIdentifier) + } + return subnetsSlice +} + +func validateRedshiftSubnetGroupName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only lowercase alphanumeric characters and hyphens allowed in %q", k)) + } + if len(value) > 255 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 255 characters", k)) + } + if regexp.MustCompile(`(?i)^default$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q is not allowed as %q", "Default", k)) + } + return +} + +func buildRedshiftSubnetGroupARN(identifier, partition, accountid, region string) (string, error) { + if partition == "" { + return "", fmt.Errorf("Unable to construct Subnet Group ARN because of missing AWS partition") + } + if accountid == "" { + return "", fmt.Errorf("Unable to construct Subnet Group ARN because of missing AWS Account ID") + } + arn := fmt.Sprintf("arn:%s:redshift:%s:%s:subnetgroup:%s", partition, region, accountid, identifier) + return arn, nil + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route.go new file mode 100644 index 000000000..85151b089 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route.go @@ -0,0 +1,498 @@ +package aws + +import ( + "errors" + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +// How long to sleep if a limit-exceeded event happens +var routeTargetValidationError = errors.New("Error: more than 1 target specified. Only 1 of gateway_id, " + + "egress_only_gateway_id, nat_gateway_id, instance_id, network_interface_id or " + + "vpc_peering_connection_id is allowed.") + +// AWS Route resource Schema declaration +func resourceAwsRoute() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRouteCreate, + Read: resourceAwsRouteRead, + Update: resourceAwsRouteUpdate, + Delete: resourceAwsRouteDelete, + Exists: resourceAwsRouteExists, + + Schema: map[string]*schema.Schema{ + "destination_cidr_block": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "destination_ipv6_cidr_block": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "destination_prefix_list_id": { + Type: schema.TypeString, + Computed: true, + }, + + "gateway_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "egress_only_gateway_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "nat_gateway_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "instance_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "instance_owner_id": { + Type: schema.TypeString, + Computed: true, + }, + + "network_interface_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "origin": { + Type: schema.TypeString, + Computed: true, + }, + + "state": { + Type: schema.TypeString, + Computed: true, + }, + + "route_table_id": { + Type: schema.TypeString, + Required: true, + }, + + "vpc_peering_connection_id": { + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceAwsRouteCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + var numTargets int + var setTarget string + allowedTargets := []string{ + "egress_only_gateway_id", + "gateway_id", + "nat_gateway_id", + "instance_id", + "network_interface_id", + "vpc_peering_connection_id", + } + + // Check if more than 1 target is specified + for _, target := range allowedTargets { + if len(d.Get(target).(string)) > 0 { + numTargets++ + setTarget = target + } + } + + if numTargets > 1 { + return routeTargetValidationError + } + + createOpts := &ec2.CreateRouteInput{} + // Formulate CreateRouteInput based on the target type + switch setTarget { + case "gateway_id": + createOpts = &ec2.CreateRouteInput{ + RouteTableId: aws.String(d.Get("route_table_id").(string)), + GatewayId: aws.String(d.Get("gateway_id").(string)), + } + + if v, ok := d.GetOk("destination_cidr_block"); ok { + createOpts.DestinationCidrBlock = aws.String(v.(string)) + } + + if v, ok := d.GetOk("destination_ipv6_cidr_block"); ok { + createOpts.DestinationIpv6CidrBlock = aws.String(v.(string)) + } + + case "egress_only_gateway_id": + createOpts = &ec2.CreateRouteInput{ + RouteTableId: aws.String(d.Get("route_table_id").(string)), + DestinationIpv6CidrBlock: aws.String(d.Get("destination_ipv6_cidr_block").(string)), + EgressOnlyInternetGatewayId: aws.String(d.Get("egress_only_gateway_id").(string)), + } + case "nat_gateway_id": + createOpts = &ec2.CreateRouteInput{ + RouteTableId: aws.String(d.Get("route_table_id").(string)), + DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), + NatGatewayId: aws.String(d.Get("nat_gateway_id").(string)), + } + case "instance_id": + createOpts = &ec2.CreateRouteInput{ + RouteTableId: aws.String(d.Get("route_table_id").(string)), + DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), + InstanceId: aws.String(d.Get("instance_id").(string)), + } + case "network_interface_id": + createOpts = &ec2.CreateRouteInput{ + RouteTableId: aws.String(d.Get("route_table_id").(string)), + DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), + NetworkInterfaceId: aws.String(d.Get("network_interface_id").(string)), + } + case "vpc_peering_connection_id": + createOpts = &ec2.CreateRouteInput{ + RouteTableId: aws.String(d.Get("route_table_id").(string)), + DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), + VpcPeeringConnectionId: aws.String(d.Get("vpc_peering_connection_id").(string)), + } + default: + return fmt.Errorf("An invalid target type specified: %s", setTarget) + } + log.Printf("[DEBUG] Route create config: %s", createOpts) + + // Create the route + var err error + + err = resource.Retry(2*time.Minute, func() *resource.RetryError { + _, err = conn.CreateRoute(createOpts) + + if err != nil { + ec2err, ok := err.(awserr.Error) + if !ok { + return resource.NonRetryableError(err) + } + if ec2err.Code() == "InvalidParameterException" { + log.Printf("[DEBUG] Trying to create route again: %q", ec2err.Message()) + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + } + + return nil + }) + if err != nil { + return fmt.Errorf("Error creating route: %s", err) + } + + var route *ec2.Route + + if v, ok := d.GetOk("destination_cidr_block"); ok { + err = resource.Retry(2*time.Minute, func() *resource.RetryError { + route, err = findResourceRoute(conn, d.Get("route_table_id").(string), v.(string), "") + return resource.RetryableError(err) + }) + if err != nil { + return fmt.Errorf("Error finding route after creating it: %s", err) + } + } + + if v, ok := d.GetOk("destination_ipv6_cidr_block"); ok { + err = resource.Retry(2*time.Minute, func() *resource.RetryError { + route, err = findResourceRoute(conn, d.Get("route_table_id").(string), "", v.(string)) + return resource.RetryableError(err) + }) + if err != nil { + return fmt.Errorf("Error finding route after creating it: %s", err) + } + } + + d.SetId(routeIDHash(d, route)) + resourceAwsRouteSetResourceData(d, route) + return nil +} + +func resourceAwsRouteRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + routeTableId := d.Get("route_table_id").(string) + + destinationCidrBlock := d.Get("destination_cidr_block").(string) + destinationIpv6CidrBlock := d.Get("destination_ipv6_cidr_block").(string) + + route, err := findResourceRoute(conn, routeTableId, destinationCidrBlock, destinationIpv6CidrBlock) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidRouteTableID.NotFound" { + log.Printf("[WARN] Route Table %q could not be found. Removing Route from state.", + routeTableId) + d.SetId("") + return nil + } + return err + } + resourceAwsRouteSetResourceData(d, route) + return nil +} + +func resourceAwsRouteSetResourceData(d *schema.ResourceData, route *ec2.Route) { + d.Set("destination_prefix_list_id", route.DestinationPrefixListId) + d.Set("gateway_id", route.GatewayId) + d.Set("egress_only_gateway_id", route.EgressOnlyInternetGatewayId) + d.Set("nat_gateway_id", route.NatGatewayId) + d.Set("instance_id", route.InstanceId) + d.Set("instance_owner_id", route.InstanceOwnerId) + d.Set("network_interface_id", route.NetworkInterfaceId) + d.Set("origin", route.Origin) + d.Set("state", route.State) + d.Set("vpc_peering_connection_id", route.VpcPeeringConnectionId) +} + +func resourceAwsRouteUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + var numTargets int + var setTarget string + + allowedTargets := []string{ + "egress_only_gateway_id", + "gateway_id", + "nat_gateway_id", + "network_interface_id", + "instance_id", + "vpc_peering_connection_id", + } + replaceOpts := &ec2.ReplaceRouteInput{} + + // Check if more than 1 target is specified + for _, target := range allowedTargets { + if len(d.Get(target).(string)) > 0 { + numTargets++ + setTarget = target + } + } + + switch setTarget { + //instance_id is a special case due to the fact that AWS will "discover" the network_interace_id + //when it creates the route and return that data. In the case of an update, we should ignore the + //existing network_interface_id + case "instance_id": + if numTargets > 2 || (numTargets == 2 && len(d.Get("network_interface_id").(string)) == 0) { + return routeTargetValidationError + } + default: + if numTargets > 1 { + return routeTargetValidationError + } + } + + // Formulate ReplaceRouteInput based on the target type + switch setTarget { + case "gateway_id": + replaceOpts = &ec2.ReplaceRouteInput{ + RouteTableId: aws.String(d.Get("route_table_id").(string)), + DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), + GatewayId: aws.String(d.Get("gateway_id").(string)), + } + case "egress_only_gateway_id": + replaceOpts = &ec2.ReplaceRouteInput{ + RouteTableId: aws.String(d.Get("route_table_id").(string)), + DestinationIpv6CidrBlock: aws.String(d.Get("destination_ipv6_cidr_block").(string)), + EgressOnlyInternetGatewayId: aws.String(d.Get("egress_only_gateway_id").(string)), + } + case "nat_gateway_id": + replaceOpts = &ec2.ReplaceRouteInput{ + RouteTableId: aws.String(d.Get("route_table_id").(string)), + DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), + NatGatewayId: aws.String(d.Get("nat_gateway_id").(string)), + } + case "instance_id": + replaceOpts = &ec2.ReplaceRouteInput{ + RouteTableId: aws.String(d.Get("route_table_id").(string)), + DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), + InstanceId: aws.String(d.Get("instance_id").(string)), + } + case "network_interface_id": + replaceOpts = &ec2.ReplaceRouteInput{ + RouteTableId: aws.String(d.Get("route_table_id").(string)), + DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), + NetworkInterfaceId: aws.String(d.Get("network_interface_id").(string)), + } + case "vpc_peering_connection_id": + replaceOpts = &ec2.ReplaceRouteInput{ + RouteTableId: aws.String(d.Get("route_table_id").(string)), + DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), + VpcPeeringConnectionId: aws.String(d.Get("vpc_peering_connection_id").(string)), + } + default: + return fmt.Errorf("An invalid target type specified: %s", setTarget) + } + log.Printf("[DEBUG] Route replace config: %s", replaceOpts) + + // Replace the route + _, err := conn.ReplaceRoute(replaceOpts) + if err != nil { + return err + } + + return nil +} + +func resourceAwsRouteDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + deleteOpts := &ec2.DeleteRouteInput{ + RouteTableId: aws.String(d.Get("route_table_id").(string)), + } + if v, ok := d.GetOk("destination_cidr_block"); ok { + deleteOpts.DestinationCidrBlock = aws.String(v.(string)) + } + if v, ok := d.GetOk("destination_ipv6_cidr_block"); ok { + deleteOpts.DestinationIpv6CidrBlock = aws.String(v.(string)) + } + log.Printf("[DEBUG] Route delete opts: %s", deleteOpts) + + var err error + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + log.Printf("[DEBUG] Trying to delete route with opts %s", deleteOpts) + resp, err := conn.DeleteRoute(deleteOpts) + log.Printf("[DEBUG] Route delete result: %s", resp) + + if err == nil { + return nil + } + + ec2err, ok := err.(awserr.Error) + if !ok { + return resource.NonRetryableError(err) + } + if ec2err.Code() == "InvalidParameterException" { + log.Printf("[DEBUG] Trying to delete route again: %q", + ec2err.Message()) + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + }) + + if err != nil { + return err + } + + d.SetId("") + return nil +} + +func resourceAwsRouteExists(d *schema.ResourceData, meta interface{}) (bool, error) { + conn := meta.(*AWSClient).ec2conn + routeTableId := d.Get("route_table_id").(string) + + findOpts := &ec2.DescribeRouteTablesInput{ + RouteTableIds: []*string{&routeTableId}, + } + + res, err := conn.DescribeRouteTables(findOpts) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidRouteTableID.NotFound" { + log.Printf("[WARN] Route Table %q could not be found.", routeTableId) + return false, nil + } + return false, fmt.Errorf("Error while checking if route exists: %s", err) + } + + if len(res.RouteTables) < 1 || res.RouteTables[0] == nil { + log.Printf("[WARN] Route Table %q is gone, or route does not exist.", + routeTableId) + return false, nil + } + + if v, ok := d.GetOk("destination_cidr_block"); ok { + for _, route := range (*res.RouteTables[0]).Routes { + if route.DestinationCidrBlock != nil && *route.DestinationCidrBlock == v.(string) { + return true, nil + } + } + } + + if v, ok := d.GetOk("destination_ipv6_cidr_block"); ok { + for _, route := range (*res.RouteTables[0]).Routes { + if route.DestinationIpv6CidrBlock != nil && *route.DestinationIpv6CidrBlock == v.(string) { + return true, nil + } + } + } + + return false, nil +} + +// Create an ID for a route +func routeIDHash(d *schema.ResourceData, r *ec2.Route) string { + + if r.DestinationIpv6CidrBlock != nil && *r.DestinationIpv6CidrBlock != "" { + return fmt.Sprintf("r-%s%d", d.Get("route_table_id").(string), hashcode.String(*r.DestinationIpv6CidrBlock)) + } + + return fmt.Sprintf("r-%s%d", d.Get("route_table_id").(string), hashcode.String(*r.DestinationCidrBlock)) +} + +// Helper: retrieve a route +func findResourceRoute(conn *ec2.EC2, rtbid string, cidr string, ipv6cidr string) (*ec2.Route, error) { + routeTableID := rtbid + + findOpts := &ec2.DescribeRouteTablesInput{ + RouteTableIds: []*string{&routeTableID}, + } + + resp, err := conn.DescribeRouteTables(findOpts) + if err != nil { + return nil, err + } + + if len(resp.RouteTables) < 1 || resp.RouteTables[0] == nil { + return nil, fmt.Errorf("Route Table %q is gone, or route does not exist.", + routeTableID) + } + + if cidr != "" { + for _, route := range (*resp.RouteTables[0]).Routes { + if route.DestinationCidrBlock != nil && *route.DestinationCidrBlock == cidr { + return route, nil + } + } + + return nil, fmt.Errorf("Unable to find matching route for Route Table (%s) "+ + "and destination CIDR block (%s).", rtbid, cidr) + } + + if ipv6cidr != "" { + for _, route := range (*resp.RouteTables[0]).Routes { + if route.DestinationIpv6CidrBlock != nil && *route.DestinationIpv6CidrBlock == ipv6cidr { + return route, nil + } + } + + return nil, fmt.Errorf("Unable to find matching route for Route Table (%s) "+ + "and destination IPv6 CIDR block (%s).", rtbid, ipv6cidr) + } + + return nil, fmt.Errorf("When trying to find a matching route for Route Table %q "+ + "you need to specify a CIDR block of IPv6 CIDR Block", rtbid) + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_delegation_set.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_delegation_set.go new file mode 100644 index 000000000..34f96ddf5 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_delegation_set.go @@ -0,0 +1,111 @@ +package aws + +import ( + "log" + "sort" + "strings" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/route53" +) + +func resourceAwsRoute53DelegationSet() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRoute53DelegationSetCreate, + Read: resourceAwsRoute53DelegationSetRead, + Delete: resourceAwsRoute53DelegationSetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "reference_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "name_servers": &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + }, + } +} + +func resourceAwsRoute53DelegationSetCreate(d *schema.ResourceData, meta interface{}) error { + r53 := meta.(*AWSClient).r53conn + + callerRef := resource.UniqueId() + if v, ok := d.GetOk("reference_name"); ok { + callerRef = strings.Join([]string{ + v.(string), "-", callerRef, + }, "") + } + input := &route53.CreateReusableDelegationSetInput{ + CallerReference: aws.String(callerRef), + } + + log.Printf("[DEBUG] Creating Route53 reusable delegation set: %#v", input) + out, err := r53.CreateReusableDelegationSet(input) + if err != nil { + return err + } + log.Printf("[DEBUG] Route53 reusable delegation set created: %#v", out) + + set := out.DelegationSet + d.SetId(cleanDelegationSetId(*set.Id)) + d.Set("name_servers", expandNameServers(set.NameServers)) + return nil +} + +func resourceAwsRoute53DelegationSetRead(d *schema.ResourceData, meta interface{}) error { + r53 := meta.(*AWSClient).r53conn + + input := &route53.GetReusableDelegationSetInput{ + Id: aws.String(cleanDelegationSetId(d.Id())), + } + log.Printf("[DEBUG] Reading Route53 reusable delegation set: %#v", input) + out, err := r53.GetReusableDelegationSet(input) + if err != nil { + return err + } + log.Printf("[DEBUG] Route53 reusable delegation set received: %#v", out) + + set := out.DelegationSet + + d.SetId(cleanDelegationSetId(*set.Id)) + d.Set("name_servers", expandNameServers(set.NameServers)) + + return nil +} + +func resourceAwsRoute53DelegationSetDelete(d *schema.ResourceData, meta interface{}) error { + r53 := meta.(*AWSClient).r53conn + + input := &route53.DeleteReusableDelegationSetInput{ + Id: aws.String(cleanDelegationSetId(d.Id())), + } + log.Printf("[DEBUG] Deleting Route53 reusable delegation set: %#v", input) + _, err := r53.DeleteReusableDelegationSet(input) + return err +} + +func expandNameServers(name_servers []*string) []string { + log.Printf("[DEBUG] Processing %d name servers: %#v...", len(name_servers), name_servers) + ns := make([]string, len(name_servers)) + for i, server := range name_servers { + ns[i] = *server + } + sort.Strings(ns) + log.Printf("[DEBUG] Returning processed name servers: %#v", ns) + return ns +} + +func cleanDelegationSetId(id string) string { + return strings.TrimPrefix(id, "/delegationset/") +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_health_check.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_health_check.go new file mode 100644 index 000000000..6cf4ee205 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_health_check.go @@ -0,0 +1,379 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/route53" +) + +func resourceAwsRoute53HealthCheck() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRoute53HealthCheckCreate, + Read: resourceAwsRoute53HealthCheckRead, + Update: resourceAwsRoute53HealthCheckUpdate, + Delete: resourceAwsRoute53HealthCheckDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: func(val interface{}) string { + return strings.ToUpper(val.(string)) + }, + }, + "failure_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "request_interval": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, // todo this should be updateable but the awslabs route53 service doesnt have the ability + }, + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "fqdn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + + "invert_healthcheck": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + + "resource_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "search_string": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "measure_latency": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "child_healthchecks": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + }, + "child_health_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(int) + if value > 256 { + es = append(es, fmt.Errorf( + "Child HealthThreshold cannot be more than 256")) + } + return + }, + }, + + "cloudwatch_alarm_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "cloudwatch_alarm_region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "insufficient_data_health_status": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "reference_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "enable_sni": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsRoute53HealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).r53conn + + updateHealthCheck := &route53.UpdateHealthCheckInput{ + HealthCheckId: aws.String(d.Id()), + } + + if d.HasChange("failure_threshold") { + updateHealthCheck.FailureThreshold = aws.Int64(int64(d.Get("failure_threshold").(int))) + } + + if d.HasChange("fqdn") { + updateHealthCheck.FullyQualifiedDomainName = aws.String(d.Get("fqdn").(string)) + } + + if d.HasChange("port") { + updateHealthCheck.Port = aws.Int64(int64(d.Get("port").(int))) + } + + if d.HasChange("resource_path") { + updateHealthCheck.ResourcePath = aws.String(d.Get("resource_path").(string)) + } + + if d.HasChange("invert_healthcheck") { + updateHealthCheck.Inverted = aws.Bool(d.Get("invert_healthcheck").(bool)) + } + + if d.HasChange("child_healthchecks") { + updateHealthCheck.ChildHealthChecks = expandStringList(d.Get("child_healthchecks").(*schema.Set).List()) + + } + if d.HasChange("child_health_threshold") { + updateHealthCheck.HealthThreshold = aws.Int64(int64(d.Get("child_health_threshold").(int))) + } + + if d.HasChange("search_string") { + updateHealthCheck.SearchString = aws.String(d.Get("search_string").(string)) + } + + if d.HasChange("cloudwatch_alarm_name") || d.HasChange("cloudwatch_alarm_region") { + cloudwatchAlarm := &route53.AlarmIdentifier{ + Name: aws.String(d.Get("cloudwatch_alarm_name").(string)), + Region: aws.String(d.Get("cloudwatch_alarm_region").(string)), + } + + updateHealthCheck.AlarmIdentifier = cloudwatchAlarm + } + + if d.HasChange("insufficient_data_health_status") { + updateHealthCheck.InsufficientDataHealthStatus = aws.String(d.Get("insufficient_data_health_status").(string)) + } + + if d.HasChange("enable_sni") { + updateHealthCheck.EnableSNI = aws.Bool(d.Get("enable_sni").(bool)) + } + + _, err := conn.UpdateHealthCheck(updateHealthCheck) + if err != nil { + return err + } + + if err := setTagsR53(conn, d, "healthcheck"); err != nil { + return err + } + + return resourceAwsRoute53HealthCheckRead(d, meta) +} + +func resourceAwsRoute53HealthCheckCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).r53conn + + healthConfig := &route53.HealthCheckConfig{ + Type: aws.String(d.Get("type").(string)), + } + + if v, ok := d.GetOk("request_interval"); ok { + healthConfig.RequestInterval = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("failure_threshold"); ok { + healthConfig.FailureThreshold = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("fqdn"); ok { + healthConfig.FullyQualifiedDomainName = aws.String(v.(string)) + } + + if v, ok := d.GetOk("search_string"); ok { + healthConfig.SearchString = aws.String(v.(string)) + } + + if v, ok := d.GetOk("ip_address"); ok { + healthConfig.IPAddress = aws.String(v.(string)) + } + + if v, ok := d.GetOk("port"); ok { + healthConfig.Port = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("resource_path"); ok { + healthConfig.ResourcePath = aws.String(v.(string)) + } + + if *healthConfig.Type != route53.HealthCheckTypeCalculated && *healthConfig.Type != route53.HealthCheckTypeCloudwatchMetric { + if v, ok := d.GetOk("measure_latency"); ok { + healthConfig.MeasureLatency = aws.Bool(v.(bool)) + } + } + + if v, ok := d.GetOk("invert_healthcheck"); ok { + healthConfig.Inverted = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("enable_sni"); ok { + healthConfig.EnableSNI = aws.Bool(v.(bool)) + } + + if *healthConfig.Type == route53.HealthCheckTypeCalculated { + if v, ok := d.GetOk("child_healthchecks"); ok { + healthConfig.ChildHealthChecks = expandStringList(v.(*schema.Set).List()) + } + + if v, ok := d.GetOk("child_health_threshold"); ok { + healthConfig.HealthThreshold = aws.Int64(int64(v.(int))) + } + } + + if *healthConfig.Type == route53.HealthCheckTypeCloudwatchMetric { + cloudwatchAlarmIdentifier := &route53.AlarmIdentifier{} + + if v, ok := d.GetOk("cloudwatch_alarm_name"); ok { + cloudwatchAlarmIdentifier.Name = aws.String(v.(string)) + } + + if v, ok := d.GetOk("cloudwatch_alarm_region"); ok { + cloudwatchAlarmIdentifier.Region = aws.String(v.(string)) + } + + healthConfig.AlarmIdentifier = cloudwatchAlarmIdentifier + + if v, ok := d.GetOk("insufficient_data_health_status"); ok { + healthConfig.InsufficientDataHealthStatus = aws.String(v.(string)) + } + } + + callerRef := resource.UniqueId() + if v, ok := d.GetOk("reference_name"); ok { + callerRef = fmt.Sprintf("%s-%s", v.(string), callerRef) + } + + input := &route53.CreateHealthCheckInput{ + CallerReference: aws.String(callerRef), + HealthCheckConfig: healthConfig, + } + + resp, err := conn.CreateHealthCheck(input) + + if err != nil { + return err + } + + d.SetId(*resp.HealthCheck.Id) + + if err := setTagsR53(conn, d, "healthcheck"); err != nil { + return err + } + + return resourceAwsRoute53HealthCheckRead(d, meta) +} + +func resourceAwsRoute53HealthCheckRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).r53conn + + read, err := conn.GetHealthCheck(&route53.GetHealthCheckInput{HealthCheckId: aws.String(d.Id())}) + if err != nil { + if r53err, ok := err.(awserr.Error); ok && r53err.Code() == "NoSuchHealthCheck" { + d.SetId("") + return nil + + } + return err + } + + if read == nil { + return nil + } + + updated := read.HealthCheck.HealthCheckConfig + d.Set("type", updated.Type) + d.Set("failure_threshold", updated.FailureThreshold) + d.Set("request_interval", updated.RequestInterval) + d.Set("fqdn", updated.FullyQualifiedDomainName) + d.Set("search_string", updated.SearchString) + d.Set("ip_address", updated.IPAddress) + d.Set("port", updated.Port) + d.Set("resource_path", updated.ResourcePath) + d.Set("measure_latency", updated.MeasureLatency) + d.Set("invert_healthcheck", updated.Inverted) + d.Set("child_healthchecks", updated.ChildHealthChecks) + d.Set("child_health_threshold", updated.HealthThreshold) + d.Set("insufficient_data_health_status", updated.InsufficientDataHealthStatus) + d.Set("enable_sni", updated.EnableSNI) + + if updated.AlarmIdentifier != nil { + d.Set("cloudwatch_alarm_name", updated.AlarmIdentifier.Name) + d.Set("cloudwatch_alarm_region", updated.AlarmIdentifier.Region) + } + + // read the tags + req := &route53.ListTagsForResourceInput{ + ResourceId: aws.String(d.Id()), + ResourceType: aws.String("healthcheck"), + } + + resp, err := conn.ListTagsForResource(req) + if err != nil { + return err + } + + var tags []*route53.Tag + if resp.ResourceTagSet != nil { + tags = resp.ResourceTagSet.Tags + } + + if err := d.Set("tags", tagsToMapR53(tags)); err != nil { + return err + } + + return nil +} + +func resourceAwsRoute53HealthCheckDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).r53conn + + log.Printf("[DEBUG] Deleteing Route53 health check: %s", d.Id()) + _, err := conn.DeleteHealthCheck(&route53.DeleteHealthCheckInput{HealthCheckId: aws.String(d.Id())}) + if err != nil { + return err + } + + return nil +} + +func createChildHealthCheckList(s *schema.Set) (nl []*string) { + l := s.List() + for _, n := range l { + nl = append(nl, aws.String(n.(string))) + } + + return nl +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_record.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_record.go new file mode 100644 index 000000000..42c02c917 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_record.go @@ -0,0 +1,877 @@ +package aws + +import ( + "bytes" + "errors" + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/route53" +) + +var r53NoRecordsFound = errors.New("No matching Hosted Zone found") +var r53NoHostedZoneFound = errors.New("No matching records found") + +func resourceAwsRoute53Record() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRoute53RecordCreate, + Read: resourceAwsRoute53RecordRead, + Update: resourceAwsRoute53RecordUpdate, + Delete: resourceAwsRoute53RecordDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + SchemaVersion: 2, + MigrateState: resourceAwsRoute53RecordMigrateState, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: func(v interface{}) string { + value := strings.TrimSuffix(v.(string), ".") + return strings.ToLower(value) + }, + }, + + "fqdn": { + Type: schema.TypeString, + Computed: true, + }, + + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateRoute53RecordType, + }, + + "zone_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(string) + if value == "" { + es = append(es, fmt.Errorf("Cannot have empty zone_id")) + } + return + }, + }, + + "ttl": { + Type: schema.TypeInt, + Optional: true, + ConflictsWith: []string{"alias"}, + }, + + "weight": { + Type: schema.TypeInt, + Optional: true, + Removed: "Now implemented as weighted_routing_policy; Please see https://www.terraform.io/docs/providers/aws/r/route53_record.html", + }, + + "set_identifier": { + Type: schema.TypeString, + Optional: true, + }, + + "alias": { + Type: schema.TypeSet, + Optional: true, + ConflictsWith: []string{"records", "ttl"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "zone_id": { + Type: schema.TypeString, + Required: true, + }, + + "name": { + Type: schema.TypeString, + Required: true, + StateFunc: normalizeAwsAliasName, + }, + + "evaluate_target_health": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + Set: resourceAwsRoute53AliasRecordHash, + }, + + "failover": { // PRIMARY | SECONDARY + Type: schema.TypeString, + Optional: true, + Removed: "Now implemented as failover_routing_policy; see docs", + }, + + "failover_routing_policy": { + Type: schema.TypeList, + Optional: true, + ConflictsWith: []string{ + "geolocation_routing_policy", + "latency_routing_policy", + "weighted_routing_policy", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(string) + if value != "PRIMARY" && value != "SECONDARY" { + es = append(es, fmt.Errorf("Failover policy type must be PRIMARY or SECONDARY")) + } + return + }, + }, + }, + }, + }, + + "latency_routing_policy": { + Type: schema.TypeList, + Optional: true, + ConflictsWith: []string{ + "failover_routing_policy", + "geolocation_routing_policy", + "weighted_routing_policy", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "region": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + + "geolocation_routing_policy": { // AWS Geolocation + Type: schema.TypeList, + Optional: true, + ConflictsWith: []string{ + "failover_routing_policy", + "latency_routing_policy", + "weighted_routing_policy", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "continent": { + Type: schema.TypeString, + Optional: true, + }, + "country": { + Type: schema.TypeString, + Optional: true, + }, + "subdivision": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "weighted_routing_policy": { + Type: schema.TypeList, + Optional: true, + ConflictsWith: []string{ + "failover_routing_policy", + "geolocation_routing_policy", + "latency_routing_policy", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "weight": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + + "health_check_id": { // ID of health check + Type: schema.TypeString, + Optional: true, + }, + + "records": { + Type: schema.TypeSet, + ConflictsWith: []string{"alias"}, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + }, + }, + } +} + +func resourceAwsRoute53RecordUpdate(d *schema.ResourceData, meta interface{}) error { + // Route 53 supports CREATE, DELETE, and UPSERT actions. We use UPSERT, and + // AWS dynamically determines if a record should be created or updated. + // Amazon Route 53 can update an existing resource record set only when all + // of the following values match: Name, Type and SetIdentifier + // See http://docs.aws.amazon.com/Route53/latest/APIReference/API_ChangeResourceRecordSets.html + + if !d.HasChange("type") && !d.HasChange("set_identifier") { + // If neither type nor set_identifier changed we use UPSERT, + // for resouce update here we simply fall through to + // our resource create function. + return resourceAwsRoute53RecordCreate(d, meta) + } + + // Otherwise we delete the existing record and create a new record within + // a transactional change + conn := meta.(*AWSClient).r53conn + zone := cleanZoneID(d.Get("zone_id").(string)) + + var err error + zoneRecord, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone)}) + if err != nil { + return err + } + if zoneRecord.HostedZone == nil { + return fmt.Errorf("[WARN] No Route53 Zone found for id (%s)", zone) + } + + // Build the to be deleted record + en := expandRecordName(d.Get("name").(string), *zoneRecord.HostedZone.Name) + typeo, _ := d.GetChange("type") + + oldRec := &route53.ResourceRecordSet{ + Name: aws.String(en), + Type: aws.String(typeo.(string)), + } + + if v, _ := d.GetChange("ttl"); v.(int) != 0 { + oldRec.TTL = aws.Int64(int64(v.(int))) + } + + // Resource records + if v, _ := d.GetChange("records"); v != nil { + recs := v.(*schema.Set).List() + if len(recs) > 0 { + oldRec.ResourceRecords = expandResourceRecords(recs, typeo.(string)) + } + } + + // Alias record + if v, _ := d.GetChange("alias"); v != nil { + aliases := v.(*schema.Set).List() + if len(aliases) == 1 { + alias := aliases[0].(map[string]interface{}) + oldRec.AliasTarget = &route53.AliasTarget{ + DNSName: aws.String(alias["name"].(string)), + EvaluateTargetHealth: aws.Bool(alias["evaluate_target_health"].(bool)), + HostedZoneId: aws.String(alias["zone_id"].(string)), + } + } + } + + if v, _ := d.GetChange("set_identifier"); v.(string) != "" { + oldRec.SetIdentifier = aws.String(v.(string)) + } + + // Build the to be created record + rec, err := resourceAwsRoute53RecordBuildSet(d, *zoneRecord.HostedZone.Name) + if err != nil { + return err + } + + // Delete the old and create the new records in a single batch. We abuse + // StateChangeConf for this to retry for us since Route53 sometimes returns + // errors about another operation happening at the same time. + changeBatch := &route53.ChangeBatch{ + Comment: aws.String("Managed by Terraform"), + Changes: []*route53.Change{ + { + Action: aws.String("DELETE"), + ResourceRecordSet: oldRec, + }, + { + Action: aws.String("CREATE"), + ResourceRecordSet: rec, + }, + }, + } + + req := &route53.ChangeResourceRecordSetsInput{ + HostedZoneId: aws.String(cleanZoneID(*zoneRecord.HostedZone.Id)), + ChangeBatch: changeBatch, + } + + log.Printf("[DEBUG] Updating resource records for zone: %s, name: %s\n\n%s", + zone, *rec.Name, req) + + respRaw, err := changeRoute53RecordSet(conn, req) + if err != nil { + return errwrap.Wrapf("[ERR]: Error building changeset: {{err}}", err) + } + + changeInfo := respRaw.(*route53.ChangeResourceRecordSetsOutput).ChangeInfo + + // Generate an ID + vars := []string{ + zone, + strings.ToLower(d.Get("name").(string)), + d.Get("type").(string), + } + if v, ok := d.GetOk("set_identifier"); ok { + vars = append(vars, v.(string)) + } + + d.SetId(strings.Join(vars, "_")) + + err = waitForRoute53RecordSetToSync(conn, cleanChangeID(*changeInfo.Id)) + if err != nil { + return err + } + + return resourceAwsRoute53RecordRead(d, meta) +} + +func resourceAwsRoute53RecordCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).r53conn + zone := cleanZoneID(d.Get("zone_id").(string)) + + var err error + zoneRecord, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone)}) + if err != nil { + return err + } + if zoneRecord.HostedZone == nil { + return fmt.Errorf("[WARN] No Route53 Zone found for id (%s)", zone) + } + + // Build the record + rec, err := resourceAwsRoute53RecordBuildSet(d, *zoneRecord.HostedZone.Name) + if err != nil { + return err + } + + // Create the new records. We abuse StateChangeConf for this to + // retry for us since Route53 sometimes returns errors about another + // operation happening at the same time. + changeBatch := &route53.ChangeBatch{ + Comment: aws.String("Managed by Terraform"), + Changes: []*route53.Change{ + { + Action: aws.String("UPSERT"), + ResourceRecordSet: rec, + }, + }, + } + + req := &route53.ChangeResourceRecordSetsInput{ + HostedZoneId: aws.String(cleanZoneID(*zoneRecord.HostedZone.Id)), + ChangeBatch: changeBatch, + } + + log.Printf("[DEBUG] Creating resource records for zone: %s, name: %s\n\n%s", + zone, *rec.Name, req) + + respRaw, err := changeRoute53RecordSet(conn, req) + if err != nil { + return errwrap.Wrapf("[ERR]: Error building changeset: {{err}}", err) + } + + changeInfo := respRaw.(*route53.ChangeResourceRecordSetsOutput).ChangeInfo + + // Generate an ID + vars := []string{ + zone, + strings.ToLower(d.Get("name").(string)), + d.Get("type").(string), + } + if v, ok := d.GetOk("set_identifier"); ok { + vars = append(vars, v.(string)) + } + + d.SetId(strings.Join(vars, "_")) + + err = waitForRoute53RecordSetToSync(conn, cleanChangeID(*changeInfo.Id)) + if err != nil { + return err + } + + return resourceAwsRoute53RecordRead(d, meta) +} + +func changeRoute53RecordSet(conn *route53.Route53, input *route53.ChangeResourceRecordSetsInput) (interface{}, error) { + wait := resource.StateChangeConf{ + Pending: []string{"rejected"}, + Target: []string{"accepted"}, + Timeout: 5 * time.Minute, + MinTimeout: 1 * time.Second, + Refresh: func() (interface{}, string, error) { + resp, err := conn.ChangeResourceRecordSets(input) + if err != nil { + if r53err, ok := err.(awserr.Error); ok { + if r53err.Code() == "PriorRequestNotComplete" { + // There is some pending operation, so just retry + // in a bit. + return nil, "rejected", nil + } + } + + return nil, "failure", err + } + + return resp, "accepted", nil + }, + } + + return wait.WaitForState() +} + +func waitForRoute53RecordSetToSync(conn *route53.Route53, requestId string) error { + wait := resource.StateChangeConf{ + Delay: 30 * time.Second, + Pending: []string{"PENDING"}, + Target: []string{"INSYNC"}, + Timeout: 30 * time.Minute, + MinTimeout: 5 * time.Second, + Refresh: func() (result interface{}, state string, err error) { + changeRequest := &route53.GetChangeInput{ + Id: aws.String(requestId), + } + return resourceAwsGoRoute53Wait(conn, changeRequest) + }, + } + _, err := wait.WaitForState() + return err +} + +func resourceAwsRoute53RecordRead(d *schema.ResourceData, meta interface{}) error { + // If we don't have a zone ID we're doing an import. Parse it from the ID. + if _, ok := d.GetOk("zone_id"); !ok { + parts := strings.Split(d.Id(), "_") + + //we check that we have parsed the id into the correct number of segments + //we need at least 3 segments! + if len(parts) == 1 || len(parts) < 3 { + return fmt.Errorf("Error Importing aws_route_53 record. Please make sure the record ID is in the form ZONEID_RECORDNAME_TYPE (i.e. Z4KAPRWWNC7JR_dev_A") + } + + d.Set("zone_id", parts[0]) + d.Set("name", parts[1]) + d.Set("type", parts[2]) + if len(parts) > 3 { + d.Set("set_identifier", parts[3]) + } + } + + record, err := findRecord(d, meta) + if err != nil { + switch err { + case r53NoHostedZoneFound, r53NoRecordsFound: + log.Printf("[DEBUG] %s for: %s, removing from state file", err, d.Id()) + d.SetId("") + return nil + default: + return err + } + } + + err = d.Set("records", flattenResourceRecords(record.ResourceRecords, *record.Type)) + if err != nil { + return fmt.Errorf("[DEBUG] Error setting records for: %s, error: %#v", d.Id(), err) + } + + if alias := record.AliasTarget; alias != nil { + name := normalizeAwsAliasName(*alias.DNSName) + d.Set("alias", []interface{}{ + map[string]interface{}{ + "zone_id": *alias.HostedZoneId, + "name": name, + "evaluate_target_health": *alias.EvaluateTargetHealth, + }, + }) + } + + d.Set("ttl", record.TTL) + + if record.Failover != nil { + v := []map[string]interface{}{{ + "type": aws.StringValue(record.Failover), + }} + if err := d.Set("failover_routing_policy", v); err != nil { + return fmt.Errorf("[DEBUG] Error setting failover records for: %s, error: %#v", d.Id(), err) + } + } + + if record.GeoLocation != nil { + v := []map[string]interface{}{{ + "continent": aws.StringValue(record.GeoLocation.ContinentCode), + "country": aws.StringValue(record.GeoLocation.CountryCode), + "subdivision": aws.StringValue(record.GeoLocation.SubdivisionCode), + }} + if err := d.Set("geolocation_routing_policy", v); err != nil { + return fmt.Errorf("[DEBUG] Error setting gelocation records for: %s, error: %#v", d.Id(), err) + } + } + + if record.Region != nil { + v := []map[string]interface{}{{ + "region": aws.StringValue(record.Region), + }} + if err := d.Set("latency_routing_policy", v); err != nil { + return fmt.Errorf("[DEBUG] Error setting latency records for: %s, error: %#v", d.Id(), err) + } + } + + if record.Weight != nil { + v := []map[string]interface{}{{ + "weight": aws.Int64Value((record.Weight)), + }} + if err := d.Set("weighted_routing_policy", v); err != nil { + return fmt.Errorf("[DEBUG] Error setting weighted records for: %s, error: %#v", d.Id(), err) + } + } + + d.Set("set_identifier", record.SetIdentifier) + d.Set("health_check_id", record.HealthCheckId) + + return nil +} + +// findRecord takes a ResourceData struct for aws_resource_route53_record. It +// uses the referenced zone_id to query Route53 and find information on it's +// records. +// +// If records are found, it returns the matching +// route53.ResourceRecordSet and nil for the error. +// +// If no hosted zone is found, it returns a nil recordset and r53NoHostedZoneFound +// error. +// +// If no matching recordset is found, it returns nil and a r53NoRecordsFound +// error +// +// If there are other errors, it returns nil a nil recordset and passes on the +// error. +func findRecord(d *schema.ResourceData, meta interface{}) (*route53.ResourceRecordSet, error) { + conn := meta.(*AWSClient).r53conn + // Scan for a + zone := cleanZoneID(d.Get("zone_id").(string)) + + // get expanded name + zoneRecord, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone)}) + if err != nil { + if r53err, ok := err.(awserr.Error); ok && r53err.Code() == "NoSuchHostedZone" { + return nil, r53NoHostedZoneFound + } + return nil, err + } + + en := expandRecordName(d.Get("name").(string), *zoneRecord.HostedZone.Name) + log.Printf("[DEBUG] Expanded record name: %s", en) + d.Set("fqdn", en) + + lopts := &route53.ListResourceRecordSetsInput{ + HostedZoneId: aws.String(cleanZoneID(zone)), + StartRecordName: aws.String(en), + StartRecordType: aws.String(d.Get("type").(string)), + } + + log.Printf("[DEBUG] List resource records sets for zone: %s, opts: %s", + zone, lopts) + resp, err := conn.ListResourceRecordSets(lopts) + if err != nil { + return nil, err + } + + for _, record := range resp.ResourceRecordSets { + name := cleanRecordName(*record.Name) + if FQDN(strings.ToLower(name)) != FQDN(strings.ToLower(*lopts.StartRecordName)) { + continue + } + if strings.ToUpper(*record.Type) != strings.ToUpper(*lopts.StartRecordType) { + continue + } + + if record.SetIdentifier != nil && *record.SetIdentifier != d.Get("set_identifier") { + continue + } + // The only safe return where a record is found + return record, nil + } + return nil, r53NoRecordsFound +} + +func resourceAwsRoute53RecordDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).r53conn + // Get the records + rec, err := findRecord(d, meta) + if err != nil { + switch err { + case r53NoHostedZoneFound, r53NoRecordsFound: + log.Printf("[DEBUG] %s for: %s, removing from state file", err, d.Id()) + d.SetId("") + return nil + default: + return err + } + } + + // Change batch for deleting + changeBatch := &route53.ChangeBatch{ + Comment: aws.String("Deleted by Terraform"), + Changes: []*route53.Change{ + { + Action: aws.String("DELETE"), + ResourceRecordSet: rec, + }, + }, + } + + zone := cleanZoneID(d.Get("zone_id").(string)) + + req := &route53.ChangeResourceRecordSetsInput{ + HostedZoneId: aws.String(cleanZoneID(zone)), + ChangeBatch: changeBatch, + } + + respRaw, err := deleteRoute53RecordSet(conn, req) + if err != nil { + return errwrap.Wrapf("[ERR]: Error building changeset: {{err}}", err) + } + + changeInfo := respRaw.(*route53.ChangeResourceRecordSetsOutput).ChangeInfo + if changeInfo == nil { + log.Printf("[INFO] No ChangeInfo Found. Waiting for Sync not required") + return nil + } + + err = waitForRoute53RecordSetToSync(conn, cleanChangeID(*changeInfo.Id)) + if err != nil { + return err + } + + return err +} + +func deleteRoute53RecordSet(conn *route53.Route53, input *route53.ChangeResourceRecordSetsInput) (interface{}, error) { + wait := resource.StateChangeConf{ + Pending: []string{"rejected"}, + Target: []string{"accepted"}, + Timeout: 5 * time.Minute, + MinTimeout: 1 * time.Second, + Refresh: func() (interface{}, string, error) { + resp, err := conn.ChangeResourceRecordSets(input) + if err != nil { + if r53err, ok := err.(awserr.Error); ok { + if r53err.Code() == "PriorRequestNotComplete" { + // There is some pending operation, so just retry + // in a bit. + return 42, "rejected", nil + } + + if r53err.Code() == "InvalidChangeBatch" { + // This means that the record is already gone. + return resp, "accepted", nil + } + } + + return 42, "failure", err + } + + return resp, "accepted", nil + }, + } + + return wait.WaitForState() +} + +func resourceAwsRoute53RecordBuildSet(d *schema.ResourceData, zoneName string) (*route53.ResourceRecordSet, error) { + // get expanded name + en := expandRecordName(d.Get("name").(string), zoneName) + + // Create the RecordSet request with the fully expanded name, e.g. + // sub.domain.com. Route 53 requires a fully qualified domain name, but does + // not require the trailing ".", which it will itself, so we don't call FQDN + // here. + rec := &route53.ResourceRecordSet{ + Name: aws.String(en), + Type: aws.String(d.Get("type").(string)), + } + + if v, ok := d.GetOk("ttl"); ok { + rec.TTL = aws.Int64(int64(v.(int))) + } + + // Resource records + if v, ok := d.GetOk("records"); ok { + recs := v.(*schema.Set).List() + rec.ResourceRecords = expandResourceRecords(recs, d.Get("type").(string)) + } + + // Alias record + if v, ok := d.GetOk("alias"); ok { + aliases := v.(*schema.Set).List() + if len(aliases) > 1 { + return nil, fmt.Errorf("You can only define a single alias target per record") + } + alias := aliases[0].(map[string]interface{}) + rec.AliasTarget = &route53.AliasTarget{ + DNSName: aws.String(alias["name"].(string)), + EvaluateTargetHealth: aws.Bool(alias["evaluate_target_health"].(bool)), + HostedZoneId: aws.String(alias["zone_id"].(string)), + } + log.Printf("[DEBUG] Creating alias: %#v", alias) + } else { + if _, ok := d.GetOk("ttl"); !ok { + return nil, fmt.Errorf(`provider.aws: aws_route53_record: %s: "ttl": required field is not set`, d.Get("name").(string)) + } + + if _, ok := d.GetOk("records"); !ok { + return nil, fmt.Errorf(`provider.aws: aws_route53_record: %s: "records": required field is not set`, d.Get("name").(string)) + } + } + + if v, ok := d.GetOk("failover_routing_policy"); ok { + if _, ok := d.GetOk("set_identifier"); !ok { + return nil, fmt.Errorf(`provider.aws: aws_route53_record: %s: "set_identifier": required field is not set when "failover_routing_policy" is set`, d.Get("name").(string)) + } + records := v.([]interface{}) + if len(records) > 1 { + return nil, fmt.Errorf("You can only define a single failover_routing_policy per record") + } + failover := records[0].(map[string]interface{}) + + rec.Failover = aws.String(failover["type"].(string)) + } + + if v, ok := d.GetOk("health_check_id"); ok { + rec.HealthCheckId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("weighted_routing_policy"); ok { + if _, ok := d.GetOk("set_identifier"); !ok { + return nil, fmt.Errorf(`provider.aws: aws_route53_record: %s: "set_identifier": required field is not set when "weight_routing_policy" is set`, d.Get("name").(string)) + } + records := v.([]interface{}) + if len(records) > 1 { + return nil, fmt.Errorf("You can only define a single weighed_routing_policy per record") + } + weight := records[0].(map[string]interface{}) + + rec.Weight = aws.Int64(int64(weight["weight"].(int))) + } + + if v, ok := d.GetOk("set_identifier"); ok { + rec.SetIdentifier = aws.String(v.(string)) + } + + if v, ok := d.GetOk("latency_routing_policy"); ok { + if _, ok := d.GetOk("set_identifier"); !ok { + return nil, fmt.Errorf(`provider.aws: aws_route53_record: %s: "set_identifier": required field is not set when "latency_routing_policy" is set`, d.Get("name").(string)) + } + records := v.([]interface{}) + if len(records) > 1 { + return nil, fmt.Errorf("You can only define a single latency_routing_policy per record") + } + latency := records[0].(map[string]interface{}) + + rec.Region = aws.String(latency["region"].(string)) + } + + if v, ok := d.GetOk("geolocation_routing_policy"); ok { + if _, ok := d.GetOk("set_identifier"); !ok { + return nil, fmt.Errorf(`provider.aws: aws_route53_record: %s: "set_identifier": required field is not set when "geolocation_routing_policy" is set`, d.Get("name").(string)) + } + geolocations := v.([]interface{}) + if len(geolocations) > 1 { + return nil, fmt.Errorf("You can only define a single geolocation_routing_policy per record") + } + geolocation := geolocations[0].(map[string]interface{}) + + rec.GeoLocation = &route53.GeoLocation{ + ContinentCode: nilString(geolocation["continent"].(string)), + CountryCode: nilString(geolocation["country"].(string)), + SubdivisionCode: nilString(geolocation["subdivision"].(string)), + } + log.Printf("[DEBUG] Creating geolocation: %#v", geolocation) + } + + return rec, nil +} + +func FQDN(name string) string { + n := len(name) + if n == 0 || name[n-1] == '.' { + return name + } else { + return name + "." + } +} + +// Route 53 stores the "*" wildcard indicator as ASCII 42 and returns the +// octal equivalent, "\\052". Here we look for that, and convert back to "*" +// as needed. +func cleanRecordName(name string) string { + str := name + if strings.HasPrefix(name, "\\052") { + str = strings.Replace(name, "\\052", "*", 1) + log.Printf("[DEBUG] Replacing octal \\052 for * in: %s", name) + } + return str +} + +// Check if the current record name contains the zone suffix. +// If it does not, add the zone name to form a fully qualified name +// and keep AWS happy. +func expandRecordName(name, zone string) string { + rn := strings.ToLower(strings.TrimSuffix(name, ".")) + zone = strings.TrimSuffix(zone, ".") + if !strings.HasSuffix(rn, zone) { + if len(name) == 0 { + rn = zone + } else { + rn = strings.Join([]string{name, zone}, ".") + } + } + return rn +} + +func resourceAwsRoute53AliasRecordHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", normalizeAwsAliasName(m["name"].(string)))) + buf.WriteString(fmt.Sprintf("%s-", m["zone_id"].(string))) + buf.WriteString(fmt.Sprintf("%t-", m["evaluate_target_health"].(bool))) + + return hashcode.String(buf.String()) +} + +// nilString takes a string as an argument and returns a string +// pointer. The returned pointer is nil if the string argument is +// empty, otherwise it is a pointer to a copy of the string. +func nilString(s string) *string { + if s == "" { + return nil + } + return aws.String(s) +} + +func normalizeAwsAliasName(alias interface{}) string { + input := alias.(string) + if strings.HasPrefix(input, "dualstack.") { + return strings.Replace(input, "dualstack.", "", -1) + } + + return strings.TrimRight(input, ".") +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_record_migrate.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_record_migrate.go new file mode 100644 index 000000000..ad6cda9d3 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_record_migrate.go @@ -0,0 +1,62 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceAwsRoute53RecordMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found AWS Route53 Record State v0; migrating to v1 then v2") + v1InstanceState, err := migrateRoute53RecordStateV0toV1(is) + if err != nil { + return v1InstanceState, err + } + return migrateRoute53RecordStateV1toV2(v1InstanceState) + case 1: + log.Println("[INFO] Found AWS Route53 Record State v1; migrating to v2") + return migrateRoute53RecordStateV1toV2(is) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateRoute53RecordStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + newName := strings.TrimSuffix(is.Attributes["name"], ".") + is.Attributes["name"] = newName + log.Printf("[DEBUG] Attributes after migration: %#v, new name: %s", is.Attributes, newName) + return is, nil +} + +func migrateRoute53RecordStateV1toV2(is *terraform.InstanceState) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + if is.Attributes["weight"] != "" && is.Attributes["weight"] != "-1" { + is.Attributes["weighted_routing_policy.#"] = "1" + key := fmt.Sprintf("weighted_routing_policy.0.weight") + is.Attributes[key] = is.Attributes["weight"] + } + if is.Attributes["failover"] != "" { + is.Attributes["failover_routing_policy.#"] = "1" + key := fmt.Sprintf("failover_routing_policy.0.type") + is.Attributes[key] = is.Attributes["failover"] + } + delete(is.Attributes, "weight") + delete(is.Attributes, "failover") + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_zone.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_zone.go new file mode 100644 index 000000000..b30d38829 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_zone.go @@ -0,0 +1,391 @@ +package aws + +import ( + "fmt" + "log" + "sort" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/route53" +) + +func resourceAwsRoute53Zone() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRoute53ZoneCreate, + Read: resourceAwsRoute53ZoneRead, + Update: resourceAwsRoute53ZoneUpdate, + Delete: resourceAwsRoute53ZoneDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "comment": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "Managed by Terraform", + }, + + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"delegation_set_id"}, + }, + + "vpc_region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "zone_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "delegation_set_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"vpc_id"}, + }, + + "name_servers": &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + + "tags": tagsSchema(), + + "force_destroy": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + } +} + +func resourceAwsRoute53ZoneCreate(d *schema.ResourceData, meta interface{}) error { + r53 := meta.(*AWSClient).r53conn + + req := &route53.CreateHostedZoneInput{ + Name: aws.String(d.Get("name").(string)), + HostedZoneConfig: &route53.HostedZoneConfig{Comment: aws.String(d.Get("comment").(string))}, + CallerReference: aws.String(time.Now().Format(time.RFC3339Nano)), + } + if v := d.Get("vpc_id"); v != "" { + req.VPC = &route53.VPC{ + VPCId: aws.String(v.(string)), + VPCRegion: aws.String(meta.(*AWSClient).region), + } + if w := d.Get("vpc_region"); w != "" { + req.VPC.VPCRegion = aws.String(w.(string)) + } + d.Set("vpc_region", req.VPC.VPCRegion) + } + + if v, ok := d.GetOk("delegation_set_id"); ok { + req.DelegationSetId = aws.String(v.(string)) + } + + log.Printf("[DEBUG] Creating Route53 hosted zone: %s", *req.Name) + var err error + resp, err := r53.CreateHostedZone(req) + if err != nil { + return err + } + + // Store the zone_id + zone := cleanZoneID(*resp.HostedZone.Id) + d.Set("zone_id", zone) + d.SetId(zone) + + // Wait until we are done initializing + wait := resource.StateChangeConf{ + Delay: 30 * time.Second, + Pending: []string{"PENDING"}, + Target: []string{"INSYNC"}, + Timeout: 10 * time.Minute, + MinTimeout: 2 * time.Second, + Refresh: func() (result interface{}, state string, err error) { + changeRequest := &route53.GetChangeInput{ + Id: aws.String(cleanChangeID(*resp.ChangeInfo.Id)), + } + return resourceAwsGoRoute53Wait(r53, changeRequest) + }, + } + _, err = wait.WaitForState() + if err != nil { + return err + } + return resourceAwsRoute53ZoneUpdate(d, meta) +} + +func resourceAwsRoute53ZoneRead(d *schema.ResourceData, meta interface{}) error { + r53 := meta.(*AWSClient).r53conn + zone, err := r53.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(d.Id())}) + if err != nil { + // Handle a deleted zone + if r53err, ok := err.(awserr.Error); ok && r53err.Code() == "NoSuchHostedZone" { + d.SetId("") + return nil + } + return err + } + + // In the import case this will be empty + if _, ok := d.GetOk("zone_id"); !ok { + d.Set("zone_id", d.Id()) + } + if _, ok := d.GetOk("name"); !ok { + d.Set("name", zone.HostedZone.Name) + } + + if !*zone.HostedZone.Config.PrivateZone { + ns := make([]string, len(zone.DelegationSet.NameServers)) + for i := range zone.DelegationSet.NameServers { + ns[i] = *zone.DelegationSet.NameServers[i] + } + sort.Strings(ns) + if err := d.Set("name_servers", ns); err != nil { + return fmt.Errorf("[DEBUG] Error setting name servers for: %s, error: %#v", d.Id(), err) + } + } else { + ns, err := getNameServers(d.Id(), d.Get("name").(string), r53) + if err != nil { + return err + } + if err := d.Set("name_servers", ns); err != nil { + return fmt.Errorf("[DEBUG] Error setting name servers for: %s, error: %#v", d.Id(), err) + } + + // In the import case we just associate it with the first VPC + if _, ok := d.GetOk("vpc_id"); !ok { + if len(zone.VPCs) > 1 { + return fmt.Errorf( + "Can't import a route53_zone with more than one VPC attachment") + } + + if len(zone.VPCs) > 0 { + d.Set("vpc_id", zone.VPCs[0].VPCId) + d.Set("vpc_region", zone.VPCs[0].VPCRegion) + } + } + + var associatedVPC *route53.VPC + for _, vpc := range zone.VPCs { + if *vpc.VPCId == d.Get("vpc_id") { + associatedVPC = vpc + break + } + } + if associatedVPC == nil { + return fmt.Errorf("[DEBUG] VPC: %v is not associated with Zone: %v", d.Get("vpc_id"), d.Id()) + } + } + + if zone.DelegationSet != nil && zone.DelegationSet.Id != nil { + d.Set("delegation_set_id", cleanDelegationSetId(*zone.DelegationSet.Id)) + } + + if zone.HostedZone != nil && zone.HostedZone.Config != nil && zone.HostedZone.Config.Comment != nil { + d.Set("comment", zone.HostedZone.Config.Comment) + } + + // get tags + req := &route53.ListTagsForResourceInput{ + ResourceId: aws.String(d.Id()), + ResourceType: aws.String("hostedzone"), + } + + resp, err := r53.ListTagsForResource(req) + if err != nil { + return err + } + + var tags []*route53.Tag + if resp.ResourceTagSet != nil { + tags = resp.ResourceTagSet.Tags + } + + if err := d.Set("tags", tagsToMapR53(tags)); err != nil { + return err + } + + return nil +} + +func resourceAwsRoute53ZoneUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).r53conn + + d.Partial(true) + + if d.HasChange("comment") { + zoneInput := route53.UpdateHostedZoneCommentInput{ + Id: aws.String(d.Id()), + Comment: aws.String(d.Get("comment").(string)), + } + + _, err := conn.UpdateHostedZoneComment(&zoneInput) + if err != nil { + return err + } else { + d.SetPartial("comment") + } + } + + if err := setTagsR53(conn, d, "hostedzone"); err != nil { + return err + } else { + d.SetPartial("tags") + } + + d.Partial(false) + + return resourceAwsRoute53ZoneRead(d, meta) +} + +func resourceAwsRoute53ZoneDelete(d *schema.ResourceData, meta interface{}) error { + r53 := meta.(*AWSClient).r53conn + + if d.Get("force_destroy").(bool) { + if err := deleteAllRecordsInHostedZoneId(d.Id(), d.Get("name").(string), r53); err != nil { + return errwrap.Wrapf("{{err}}", err) + } + } + + log.Printf("[DEBUG] Deleting Route53 hosted zone: %s (ID: %s)", + d.Get("name").(string), d.Id()) + _, err := r53.DeleteHostedZone(&route53.DeleteHostedZoneInput{Id: aws.String(d.Id())}) + if err != nil { + if r53err, ok := err.(awserr.Error); ok && r53err.Code() == "NoSuchHostedZone" { + log.Printf("[DEBUG] No matching Route 53 Zone found for: %s, removing from state file", d.Id()) + d.SetId("") + return nil + } + return err + } + + return nil +} + +func deleteAllRecordsInHostedZoneId(hostedZoneId, hostedZoneName string, conn *route53.Route53) error { + input := &route53.ListResourceRecordSetsInput{ + HostedZoneId: aws.String(hostedZoneId), + } + + var lastDeleteErr, lastErrorFromWaiter error + var pageNum = 0 + err := conn.ListResourceRecordSetsPages(input, func(page *route53.ListResourceRecordSetsOutput, isLastPage bool) bool { + sets := page.ResourceRecordSets + pageNum += 1 + + changes := make([]*route53.Change, 0) + // 100 items per page returned by default + for _, set := range sets { + if strings.TrimSuffix(*set.Name, ".") == strings.TrimSuffix(hostedZoneName, ".") && (*set.Type == "NS" || *set.Type == "SOA") { + // Zone NS & SOA records cannot be deleted + continue + } + changes = append(changes, &route53.Change{ + Action: aws.String("DELETE"), + ResourceRecordSet: set, + }) + } + log.Printf("[DEBUG] Deleting %d records (page %d) from %s", + len(changes), pageNum, hostedZoneId) + + req := &route53.ChangeResourceRecordSetsInput{ + HostedZoneId: aws.String(hostedZoneId), + ChangeBatch: &route53.ChangeBatch{ + Comment: aws.String("Deleted by Terraform"), + Changes: changes, + }, + } + + var resp interface{} + resp, lastDeleteErr = deleteRoute53RecordSet(conn, req) + if out, ok := resp.(*route53.ChangeResourceRecordSetsOutput); ok { + log.Printf("[DEBUG] Waiting for change batch to become INSYNC: %#v", out) + if out.ChangeInfo != nil && out.ChangeInfo.Id != nil { + lastErrorFromWaiter = waitForRoute53RecordSetToSync(conn, cleanChangeID(*out.ChangeInfo.Id)) + } else { + log.Printf("[DEBUG] Change info was empty") + } + } else { + log.Printf("[DEBUG] Unable to wait for change batch because of an error: %s", lastDeleteErr) + } + + return !isLastPage + }) + if err != nil { + return fmt.Errorf("Failed listing/deleting record sets: %s\nLast error from deletion: %s\nLast error from waiter: %s", + err, lastDeleteErr, lastErrorFromWaiter) + } + + return nil +} + +func resourceAwsGoRoute53Wait(r53 *route53.Route53, ref *route53.GetChangeInput) (result interface{}, state string, err error) { + + status, err := r53.GetChange(ref) + if err != nil { + return nil, "UNKNOWN", err + } + return true, *status.ChangeInfo.Status, nil +} + +// cleanChangeID is used to remove the leading /change/ +func cleanChangeID(ID string) string { + return cleanPrefix(ID, "/change/") +} + +// cleanZoneID is used to remove the leading /hostedzone/ +func cleanZoneID(ID string) string { + return cleanPrefix(ID, "/hostedzone/") +} + +// cleanPrefix removes a string prefix from an ID +func cleanPrefix(ID, prefix string) string { + if strings.HasPrefix(ID, prefix) { + ID = strings.TrimPrefix(ID, prefix) + } + return ID +} + +func getNameServers(zoneId string, zoneName string, r53 *route53.Route53) ([]string, error) { + resp, err := r53.ListResourceRecordSets(&route53.ListResourceRecordSetsInput{ + HostedZoneId: aws.String(zoneId), + StartRecordName: aws.String(zoneName), + StartRecordType: aws.String("NS"), + }) + if err != nil { + return nil, err + } + if len(resp.ResourceRecordSets) == 0 { + return nil, nil + } + ns := make([]string, len(resp.ResourceRecordSets[0].ResourceRecords)) + for i := range resp.ResourceRecordSets[0].ResourceRecords { + ns[i] = *resp.ResourceRecordSets[0].ResourceRecords[i].Value + } + sort.Strings(ns) + return ns, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_zone_association.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_zone_association.go new file mode 100644 index 000000000..c416095ec --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route53_zone_association.go @@ -0,0 +1,149 @@ +package aws + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/route53" +) + +func resourceAwsRoute53ZoneAssociation() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRoute53ZoneAssociationCreate, + Read: resourceAwsRoute53ZoneAssociationRead, + Update: resourceAwsRoute53ZoneAssociationUpdate, + Delete: resourceAwsRoute53ZoneAssociationDelete, + + Schema: map[string]*schema.Schema{ + "zone_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "vpc_region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceAwsRoute53ZoneAssociationCreate(d *schema.ResourceData, meta interface{}) error { + r53 := meta.(*AWSClient).r53conn + + req := &route53.AssociateVPCWithHostedZoneInput{ + HostedZoneId: aws.String(d.Get("zone_id").(string)), + VPC: &route53.VPC{ + VPCId: aws.String(d.Get("vpc_id").(string)), + VPCRegion: aws.String(meta.(*AWSClient).region), + }, + Comment: aws.String("Managed by Terraform"), + } + if w := d.Get("vpc_region"); w != "" { + req.VPC.VPCRegion = aws.String(w.(string)) + } + + log.Printf("[DEBUG] Associating Route53 Private Zone %s with VPC %s with region %s", *req.HostedZoneId, *req.VPC.VPCId, *req.VPC.VPCRegion) + var err error + resp, err := r53.AssociateVPCWithHostedZone(req) + if err != nil { + return err + } + + // Store association id + d.SetId(fmt.Sprintf("%s:%s", *req.HostedZoneId, *req.VPC.VPCId)) + d.Set("vpc_region", req.VPC.VPCRegion) + + // Wait until we are done initializing + wait := resource.StateChangeConf{ + Delay: 30 * time.Second, + Pending: []string{"PENDING"}, + Target: []string{"INSYNC"}, + Timeout: 10 * time.Minute, + MinTimeout: 2 * time.Second, + Refresh: func() (result interface{}, state string, err error) { + changeRequest := &route53.GetChangeInput{ + Id: aws.String(cleanChangeID(*resp.ChangeInfo.Id)), + } + return resourceAwsGoRoute53Wait(r53, changeRequest) + }, + } + _, err = wait.WaitForState() + if err != nil { + return err + } + + return resourceAwsRoute53ZoneAssociationUpdate(d, meta) +} + +func resourceAwsRoute53ZoneAssociationRead(d *schema.ResourceData, meta interface{}) error { + r53 := meta.(*AWSClient).r53conn + zone_id, vpc_id := resourceAwsRoute53ZoneAssociationParseId(d.Id()) + zone, err := r53.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone_id)}) + if err != nil { + // Handle a deleted zone + if r53err, ok := err.(awserr.Error); ok && r53err.Code() == "NoSuchHostedZone" { + d.SetId("") + return nil + } + return err + } + + for _, vpc := range zone.VPCs { + if vpc_id == *vpc.VPCId { + // association is there, return + return nil + } + } + + // no association found + d.SetId("") + return nil +} + +func resourceAwsRoute53ZoneAssociationUpdate(d *schema.ResourceData, meta interface{}) error { + return resourceAwsRoute53ZoneAssociationRead(d, meta) +} + +func resourceAwsRoute53ZoneAssociationDelete(d *schema.ResourceData, meta interface{}) error { + r53 := meta.(*AWSClient).r53conn + zone_id, vpc_id := resourceAwsRoute53ZoneAssociationParseId(d.Id()) + log.Printf("[DEBUG] Deleting Route53 Private Zone (%s) association (VPC: %s)", + zone_id, vpc_id) + + req := &route53.DisassociateVPCFromHostedZoneInput{ + HostedZoneId: aws.String(zone_id), + VPC: &route53.VPC{ + VPCId: aws.String(vpc_id), + VPCRegion: aws.String(d.Get("vpc_region").(string)), + }, + Comment: aws.String("Managed by Terraform"), + } + + _, err := r53.DisassociateVPCFromHostedZone(req) + if err != nil { + return err + } + + return nil +} + +func resourceAwsRoute53ZoneAssociationParseId(id string) (zone_id, vpc_id string) { + parts := strings.SplitN(id, ":", 2) + zone_id = parts[0] + vpc_id = parts[1] + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route_table.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route_table.go new file mode 100644 index 000000000..f5c72e2d5 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route_table.go @@ -0,0 +1,525 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsRouteTable() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRouteTableCreate, + Read: resourceAwsRouteTableRead, + Update: resourceAwsRouteTableUpdate, + Delete: resourceAwsRouteTableDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsRouteTableImportState, + }, + + Schema: map[string]*schema.Schema{ + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "tags": tagsSchema(), + + "propagating_vgws": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "route": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr_block": { + Type: schema.TypeString, + Optional: true, + }, + + "ipv6_cidr_block": { + Type: schema.TypeString, + Optional: true, + }, + + "egress_only_gateway_id": { + Type: schema.TypeString, + Optional: true, + }, + + "gateway_id": { + Type: schema.TypeString, + Optional: true, + }, + + "instance_id": { + Type: schema.TypeString, + Optional: true, + }, + + "nat_gateway_id": { + Type: schema.TypeString, + Optional: true, + }, + + "vpc_peering_connection_id": { + Type: schema.TypeString, + Optional: true, + }, + + "network_interface_id": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + Set: resourceAwsRouteTableHash, + }, + }, + } +} + +func resourceAwsRouteTableCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + // Create the routing table + createOpts := &ec2.CreateRouteTableInput{ + VpcId: aws.String(d.Get("vpc_id").(string)), + } + log.Printf("[DEBUG] RouteTable create config: %#v", createOpts) + + resp, err := conn.CreateRouteTable(createOpts) + if err != nil { + return fmt.Errorf("Error creating route table: %s", err) + } + + // Get the ID and store it + rt := resp.RouteTable + d.SetId(*rt.RouteTableId) + log.Printf("[INFO] Route Table ID: %s", d.Id()) + + // Wait for the route table to become available + log.Printf( + "[DEBUG] Waiting for route table (%s) to become available", + d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"ready"}, + Refresh: resourceAwsRouteTableStateRefreshFunc(conn, d.Id()), + Timeout: 10 * time.Minute, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for route table (%s) to become available: %s", + d.Id(), err) + } + + return resourceAwsRouteTableUpdate(d, meta) +} + +func resourceAwsRouteTableRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + rtRaw, _, err := resourceAwsRouteTableStateRefreshFunc(conn, d.Id())() + if err != nil { + return err + } + if rtRaw == nil { + d.SetId("") + return nil + } + + rt := rtRaw.(*ec2.RouteTable) + d.Set("vpc_id", rt.VpcId) + + propagatingVGWs := make([]string, 0, len(rt.PropagatingVgws)) + for _, vgw := range rt.PropagatingVgws { + propagatingVGWs = append(propagatingVGWs, *vgw.GatewayId) + } + d.Set("propagating_vgws", propagatingVGWs) + + // Create an empty schema.Set to hold all routes + route := &schema.Set{F: resourceAwsRouteTableHash} + + // Loop through the routes and add them to the set + for _, r := range rt.Routes { + if r.GatewayId != nil && *r.GatewayId == "local" { + continue + } + + if r.Origin != nil && *r.Origin == "EnableVgwRoutePropagation" { + continue + } + + if r.DestinationPrefixListId != nil { + // Skipping because VPC endpoint routes are handled separately + // See aws_vpc_endpoint + continue + } + + m := make(map[string]interface{}) + + if r.DestinationCidrBlock != nil { + m["cidr_block"] = *r.DestinationCidrBlock + } + if r.DestinationIpv6CidrBlock != nil { + m["ipv6_cidr_block"] = *r.DestinationIpv6CidrBlock + } + if r.EgressOnlyInternetGatewayId != nil { + m["egress_only_gateway_id"] = *r.EgressOnlyInternetGatewayId + } + if r.GatewayId != nil { + m["gateway_id"] = *r.GatewayId + } + if r.NatGatewayId != nil { + m["nat_gateway_id"] = *r.NatGatewayId + } + if r.InstanceId != nil { + m["instance_id"] = *r.InstanceId + } + if r.VpcPeeringConnectionId != nil { + m["vpc_peering_connection_id"] = *r.VpcPeeringConnectionId + } + if r.NetworkInterfaceId != nil { + m["network_interface_id"] = *r.NetworkInterfaceId + } + + route.Add(m) + } + d.Set("route", route) + + // Tags + d.Set("tags", tagsToMap(rt.Tags)) + + return nil +} + +func resourceAwsRouteTableUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + if d.HasChange("propagating_vgws") { + o, n := d.GetChange("propagating_vgws") + os := o.(*schema.Set) + ns := n.(*schema.Set) + remove := os.Difference(ns).List() + add := ns.Difference(os).List() + + // Now first loop through all the old propagations and disable any obsolete ones + for _, vgw := range remove { + id := vgw.(string) + + // Disable the propagation as it no longer exists in the config + log.Printf( + "[INFO] Deleting VGW propagation from %s: %s", + d.Id(), id) + _, err := conn.DisableVgwRoutePropagation(&ec2.DisableVgwRoutePropagationInput{ + RouteTableId: aws.String(d.Id()), + GatewayId: aws.String(id), + }) + if err != nil { + return err + } + } + + // Make sure we save the state of the currently configured rules + propagatingVGWs := os.Intersection(ns) + d.Set("propagating_vgws", propagatingVGWs) + + // Then loop through all the newly configured propagations and enable them + for _, vgw := range add { + id := vgw.(string) + + var err error + for i := 0; i < 5; i++ { + log.Printf("[INFO] Enabling VGW propagation for %s: %s", d.Id(), id) + _, err = conn.EnableVgwRoutePropagation(&ec2.EnableVgwRoutePropagationInput{ + RouteTableId: aws.String(d.Id()), + GatewayId: aws.String(id), + }) + if err == nil { + break + } + + // If we get a Gateway.NotAttached, it is usually some + // eventually consistency stuff. So we have to just wait a + // bit... + ec2err, ok := err.(awserr.Error) + if ok && ec2err.Code() == "Gateway.NotAttached" { + time.Sleep(20 * time.Second) + continue + } + } + if err != nil { + return err + } + + propagatingVGWs.Add(vgw) + d.Set("propagating_vgws", propagatingVGWs) + } + } + + // Check if the route set as a whole has changed + if d.HasChange("route") { + o, n := d.GetChange("route") + ors := o.(*schema.Set).Difference(n.(*schema.Set)) + nrs := n.(*schema.Set).Difference(o.(*schema.Set)) + + // Now first loop through all the old routes and delete any obsolete ones + for _, route := range ors.List() { + m := route.(map[string]interface{}) + + deleteOpts := &ec2.DeleteRouteInput{ + RouteTableId: aws.String(d.Id()), + } + + if s := m["ipv6_cidr_block"].(string); s != "" { + deleteOpts.DestinationIpv6CidrBlock = aws.String(s) + + log.Printf( + "[INFO] Deleting route from %s: %s", + d.Id(), m["ipv6_cidr_block"].(string)) + } + + if s := m["cidr_block"].(string); s != "" { + deleteOpts.DestinationCidrBlock = aws.String(s) + + log.Printf( + "[INFO] Deleting route from %s: %s", + d.Id(), m["cidr_block"].(string)) + } + + _, err := conn.DeleteRoute(deleteOpts) + if err != nil { + return err + } + } + + // Make sure we save the state of the currently configured rules + routes := o.(*schema.Set).Intersection(n.(*schema.Set)) + d.Set("route", routes) + + // Then loop through all the newly configured routes and create them + for _, route := range nrs.List() { + m := route.(map[string]interface{}) + + opts := ec2.CreateRouteInput{ + RouteTableId: aws.String(d.Id()), + } + + if s := m["vpc_peering_connection_id"].(string); s != "" { + opts.VpcPeeringConnectionId = aws.String(s) + } + + if s := m["network_interface_id"].(string); s != "" { + opts.NetworkInterfaceId = aws.String(s) + } + + if s := m["instance_id"].(string); s != "" { + opts.InstanceId = aws.String(s) + } + + if s := m["ipv6_cidr_block"].(string); s != "" { + opts.DestinationIpv6CidrBlock = aws.String(s) + } + + if s := m["cidr_block"].(string); s != "" { + opts.DestinationCidrBlock = aws.String(s) + } + + if s := m["gateway_id"].(string); s != "" { + opts.GatewayId = aws.String(s) + } + + if s := m["egress_only_gateway_id"].(string); s != "" { + opts.EgressOnlyInternetGatewayId = aws.String(s) + } + + if s := m["nat_gateway_id"].(string); s != "" { + opts.NatGatewayId = aws.String(s) + } + + log.Printf("[INFO] Creating route for %s: %#v", d.Id(), opts) + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.CreateRoute(&opts) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "InvalidRouteTableID.NotFound" { + return resource.RetryableError(awsErr) + } + } + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return err + } + + routes.Add(route) + d.Set("route", routes) + } + } + + if err := setTags(conn, d); err != nil { + return err + } else { + d.SetPartial("tags") + } + + return resourceAwsRouteTableRead(d, meta) +} + +func resourceAwsRouteTableDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + // First request the routing table since we'll have to disassociate + // all the subnets first. + rtRaw, _, err := resourceAwsRouteTableStateRefreshFunc(conn, d.Id())() + if err != nil { + return err + } + if rtRaw == nil { + return nil + } + rt := rtRaw.(*ec2.RouteTable) + + // Do all the disassociations + for _, a := range rt.Associations { + log.Printf("[INFO] Disassociating association: %s", *a.RouteTableAssociationId) + _, err := conn.DisassociateRouteTable(&ec2.DisassociateRouteTableInput{ + AssociationId: a.RouteTableAssociationId, + }) + if err != nil { + // First check if the association ID is not found. If this + // is the case, then it was already disassociated somehow, + // and that is okay. + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidAssociationID.NotFound" { + err = nil + } + } + if err != nil { + return err + } + } + + // Delete the route table + log.Printf("[INFO] Deleting Route Table: %s", d.Id()) + _, err = conn.DeleteRouteTable(&ec2.DeleteRouteTableInput{ + RouteTableId: aws.String(d.Id()), + }) + if err != nil { + ec2err, ok := err.(awserr.Error) + if ok && ec2err.Code() == "InvalidRouteTableID.NotFound" { + return nil + } + + return fmt.Errorf("Error deleting route table: %s", err) + } + + // Wait for the route table to really destroy + log.Printf( + "[DEBUG] Waiting for route table (%s) to become destroyed", + d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ready"}, + Target: []string{}, + Refresh: resourceAwsRouteTableStateRefreshFunc(conn, d.Id()), + Timeout: 5 * time.Minute, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for route table (%s) to become destroyed: %s", + d.Id(), err) + } + + return nil +} + +func resourceAwsRouteTableHash(v interface{}) int { + var buf bytes.Buffer + m, castOk := v.(map[string]interface{}) + if !castOk { + return 0 + } + + if v, ok := m["ipv6_cidr_block"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["cidr_block"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["gateway_id"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["egress_only_gateway_id"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + natGatewaySet := false + if v, ok := m["nat_gateway_id"]; ok { + natGatewaySet = v.(string) != "" + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + instanceSet := false + if v, ok := m["instance_id"]; ok { + instanceSet = v.(string) != "" + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["vpc_peering_connection_id"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["network_interface_id"]; ok && !(instanceSet || natGatewaySet) { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + return hashcode.String(buf.String()) +} + +// resourceAwsRouteTableStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// a RouteTable. +func resourceAwsRouteTableStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeRouteTables(&ec2.DescribeRouteTablesInput{ + RouteTableIds: []*string{aws.String(id)}, + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidRouteTableID.NotFound" { + resp = nil + } else { + log.Printf("Error on RouteTableStateRefresh: %s", err) + return nil, "", err + } + } + + if resp == nil { + // Sometimes AWS just has consistency issues and doesn't see + // our instance yet. Return an empty state. + return nil, "", nil + } + + rt := resp.RouteTables[0] + return rt, "ready", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route_table_association.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route_table_association.go new file mode 100644 index 000000000..eb2c19409 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_route_table_association.go @@ -0,0 +1,155 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsRouteTableAssociation() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRouteTableAssociationCreate, + Read: resourceAwsRouteTableAssociationRead, + Update: resourceAwsRouteTableAssociationUpdate, + Delete: resourceAwsRouteTableAssociationDelete, + + Schema: map[string]*schema.Schema{ + "subnet_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "route_table_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceAwsRouteTableAssociationCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + log.Printf( + "[INFO] Creating route table association: %s => %s", + d.Get("subnet_id").(string), + d.Get("route_table_id").(string)) + + associationOpts := ec2.AssociateRouteTableInput{ + RouteTableId: aws.String(d.Get("route_table_id").(string)), + SubnetId: aws.String(d.Get("subnet_id").(string)), + } + + var resp *ec2.AssociateRouteTableOutput + var err error + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + resp, err = conn.AssociateRouteTable(&associationOpts) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "InvalidRouteTableID.NotFound" { + return resource.RetryableError(awsErr) + } + } + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return err + } + + // Set the ID and return + d.SetId(*resp.AssociationId) + log.Printf("[INFO] Association ID: %s", d.Id()) + + return nil +} + +func resourceAwsRouteTableAssociationRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + // Get the routing table that this association belongs to + rtRaw, _, err := resourceAwsRouteTableStateRefreshFunc( + conn, d.Get("route_table_id").(string))() + if err != nil { + return err + } + if rtRaw == nil { + return nil + } + rt := rtRaw.(*ec2.RouteTable) + + // Inspect that the association exists + found := false + for _, a := range rt.Associations { + if *a.RouteTableAssociationId == d.Id() { + found = true + d.Set("subnet_id", *a.SubnetId) + break + } + } + + if !found { + // It seems it doesn't exist anymore, so clear the ID + d.SetId("") + } + + return nil +} + +func resourceAwsRouteTableAssociationUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + log.Printf( + "[INFO] Creating route table association: %s => %s", + d.Get("subnet_id").(string), + d.Get("route_table_id").(string)) + + req := &ec2.ReplaceRouteTableAssociationInput{ + AssociationId: aws.String(d.Id()), + RouteTableId: aws.String(d.Get("route_table_id").(string)), + } + resp, err := conn.ReplaceRouteTableAssociation(req) + + if err != nil { + ec2err, ok := err.(awserr.Error) + if ok && ec2err.Code() == "InvalidAssociationID.NotFound" { + // Not found, so just create a new one + return resourceAwsRouteTableAssociationCreate(d, meta) + } + + return err + } + + // Update the ID + d.SetId(*resp.NewAssociationId) + log.Printf("[INFO] Association ID: %s", d.Id()) + + return nil +} + +func resourceAwsRouteTableAssociationDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + log.Printf("[INFO] Deleting route table association: %s", d.Id()) + _, err := conn.DisassociateRouteTable(&ec2.DisassociateRouteTableInput{ + AssociationId: aws.String(d.Id()), + }) + if err != nil { + ec2err, ok := err.(awserr.Error) + if ok && ec2err.Code() == "InvalidAssociationID.NotFound" { + return nil + } + + return fmt.Errorf("Error deleting route table association: %s", err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket.go new file mode 100644 index 000000000..7da1ac18f --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket.go @@ -0,0 +1,1858 @@ +package aws + +import ( + "bytes" + "encoding/json" + "fmt" + "log" + "net/url" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsS3Bucket() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsS3BucketCreate, + Read: resourceAwsS3BucketRead, + Update: resourceAwsS3BucketUpdate, + Delete: resourceAwsS3BucketDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsS3BucketImportState, + }, + + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"bucket_prefix"}, + }, + "bucket_prefix": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "bucket_domain_name": { + Type: schema.TypeString, + Computed: true, + }, + + "arn": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "acl": { + Type: schema.TypeString, + Default: "private", + Optional: true, + }, + + "policy": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateJsonString, + DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, + }, + + "cors_rule": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_headers": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "allowed_methods": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "allowed_origins": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "expose_headers": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "max_age_seconds": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + + "website": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "index_document": { + Type: schema.TypeString, + Optional: true, + }, + + "error_document": { + Type: schema.TypeString, + Optional: true, + }, + + "redirect_all_requests_to": { + Type: schema.TypeString, + ConflictsWith: []string{ + "website.0.index_document", + "website.0.error_document", + "website.0.routing_rules", + }, + Optional: true, + }, + + "routing_rules": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateJsonString, + StateFunc: func(v interface{}) string { + json, _ := normalizeJsonString(v) + return json + }, + }, + }, + }, + }, + + "hosted_zone_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "website_endpoint": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "website_domain": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "versioning": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "mfa_delete": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, + + "logging": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target_bucket": { + Type: schema.TypeString, + Required: true, + }, + "target_prefix": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["target_bucket"])) + buf.WriteString(fmt.Sprintf("%s-", m["target_prefix"])) + return hashcode.String(buf.String()) + }, + }, + + "lifecycle_rule": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateS3BucketLifecycleRuleId, + }, + "prefix": { + Type: schema.TypeString, + Required: true, + }, + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + "abort_incomplete_multipart_upload_days": { + Type: schema.TypeInt, + Optional: true, + }, + "expiration": { + Type: schema.TypeSet, + Optional: true, + Set: expirationHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "date": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateS3BucketLifecycleTimestamp, + }, + "days": { + Type: schema.TypeInt, + Optional: true, + }, + "expired_object_delete_marker": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + "noncurrent_version_expiration": { + Type: schema.TypeSet, + Optional: true, + Set: expirationHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "days": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + "transition": { + Type: schema.TypeSet, + Optional: true, + Set: transitionHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "date": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateS3BucketLifecycleTimestamp, + }, + "days": { + Type: schema.TypeInt, + Optional: true, + }, + "storage_class": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateS3BucketLifecycleStorageClass, + }, + }, + }, + }, + "noncurrent_version_transition": { + Type: schema.TypeSet, + Optional: true, + Set: transitionHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "days": { + Type: schema.TypeInt, + Optional: true, + }, + "storage_class": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateS3BucketLifecycleStorageClass, + }, + }, + }, + }, + }, + }, + }, + + "force_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "acceleration_status": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateS3BucketAccelerationStatus, + }, + + "request_payer": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateS3BucketRequestPayerType, + }, + + "replication_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "role": { + Type: schema.TypeString, + Required: true, + }, + "rules": { + Type: schema.TypeSet, + Required: true, + Set: rulesHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateS3BucketReplicationRuleId, + }, + "destination": { + Type: schema.TypeSet, + MaxItems: 1, + MinItems: 1, + Required: true, + Set: destinationHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + "storage_class": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateS3BucketReplicationDestinationStorageClass, + }, + }, + }, + }, + "prefix": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateS3BucketReplicationRulePrefix, + }, + "status": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateS3BucketReplicationRuleStatus, + }, + }, + }, + }, + }, + }, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + + // Get the bucket and acl + var bucket string + if v, ok := d.GetOk("bucket"); ok { + bucket = v.(string) + } else if v, ok := d.GetOk("bucket_prefix"); ok { + bucket = resource.PrefixedUniqueId(v.(string)) + } else { + bucket = resource.UniqueId() + } + d.Set("bucket", bucket) + acl := d.Get("acl").(string) + + log.Printf("[DEBUG] S3 bucket create: %s, ACL: %s", bucket, acl) + + req := &s3.CreateBucketInput{ + Bucket: aws.String(bucket), + ACL: aws.String(acl), + } + + var awsRegion string + if region, ok := d.GetOk("region"); ok { + awsRegion = region.(string) + } else { + awsRegion = meta.(*AWSClient).region + } + log.Printf("[DEBUG] S3 bucket create: %s, using region: %s", bucket, awsRegion) + + // Special case us-east-1 region and do not set the LocationConstraint. + // See "Request Elements: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUT.html + if awsRegion != "us-east-1" { + req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{ + LocationConstraint: aws.String(awsRegion), + } + } + + if err := validateS3BucketName(bucket, awsRegion); err != nil { + return fmt.Errorf("Error validating S3 bucket name: %s", err) + } + + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + log.Printf("[DEBUG] Trying to create new S3 bucket: %q", bucket) + _, err := s3conn.CreateBucket(req) + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "OperationAborted" { + log.Printf("[WARN] Got an error while trying to create S3 bucket %s: %s", bucket, err) + return resource.RetryableError( + fmt.Errorf("[WARN] Error creating S3 bucket %s, retrying: %s", + bucket, err)) + } + } + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) + + if err != nil { + return fmt.Errorf("Error creating S3 bucket: %s", err) + } + + // Assign the bucket name as the resource ID + d.SetId(bucket) + + return resourceAwsS3BucketUpdate(d, meta) +} + +func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + if err := setTagsS3(s3conn, d); err != nil { + return fmt.Errorf("%q: %s", d.Get("bucket").(string), err) + } + + if d.HasChange("policy") { + if err := resourceAwsS3BucketPolicyUpdate(s3conn, d); err != nil { + return err + } + } + + if d.HasChange("cors_rule") { + if err := resourceAwsS3BucketCorsUpdate(s3conn, d); err != nil { + return err + } + } + + if d.HasChange("website") { + if err := resourceAwsS3BucketWebsiteUpdate(s3conn, d); err != nil { + return err + } + } + + if d.HasChange("versioning") { + if err := resourceAwsS3BucketVersioningUpdate(s3conn, d); err != nil { + return err + } + } + if d.HasChange("acl") { + if err := resourceAwsS3BucketAclUpdate(s3conn, d); err != nil { + return err + } + } + + if d.HasChange("logging") { + if err := resourceAwsS3BucketLoggingUpdate(s3conn, d); err != nil { + return err + } + } + + if d.HasChange("lifecycle_rule") { + if err := resourceAwsS3BucketLifecycleUpdate(s3conn, d); err != nil { + return err + } + } + + if d.HasChange("acceleration_status") { + if err := resourceAwsS3BucketAccelerationUpdate(s3conn, d); err != nil { + return err + } + } + + if d.HasChange("request_payer") { + if err := resourceAwsS3BucketRequestPayerUpdate(s3conn, d); err != nil { + return err + } + } + + if d.HasChange("replication_configuration") { + if err := resourceAwsS3BucketReplicationConfigurationUpdate(s3conn, d); err != nil { + return err + } + } + + return resourceAwsS3BucketRead(d, meta) +} + +func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + + var err error + _, err = s3conn.HeadBucket(&s3.HeadBucketInput{ + Bucket: aws.String(d.Id()), + }) + if err != nil { + if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() == 404 { + log.Printf("[WARN] S3 Bucket (%s) not found, error code (404)", d.Id()) + d.SetId("") + return nil + } else { + // some of the AWS SDK's errors can be empty strings, so let's add + // some additional context. + return fmt.Errorf("error reading S3 bucket \"%s\": %s", d.Id(), err) + } + } + + // In the import case, we won't have this + if _, ok := d.GetOk("bucket"); !ok { + d.Set("bucket", d.Id()) + } + + d.Set("bucket_domain_name", bucketDomainName(d.Get("bucket").(string))) + + // Read the policy + if _, ok := d.GetOk("policy"); ok { + pol, err := s3conn.GetBucketPolicy(&s3.GetBucketPolicyInput{ + Bucket: aws.String(d.Id()), + }) + log.Printf("[DEBUG] S3 bucket: %s, read policy: %v", d.Id(), pol) + if err != nil { + if err := d.Set("policy", ""); err != nil { + return err + } + } else { + if v := pol.Policy; v == nil { + if err := d.Set("policy", ""); err != nil { + return err + } + } else { + policy, err := normalizeJsonString(*v) + if err != nil { + return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err) + } + d.Set("policy", policy) + } + } + } + + // Read the CORS + cors, err := s3conn.GetBucketCors(&s3.GetBucketCorsInput{ + Bucket: aws.String(d.Id()), + }) + if err != nil { + // An S3 Bucket might not have CORS configuration set. + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "NoSuchCORSConfiguration" { + return err + } + log.Printf("[WARN] S3 bucket: %s, no CORS configuration could be found.", d.Id()) + } + log.Printf("[DEBUG] S3 bucket: %s, read CORS: %v", d.Id(), cors) + if cors.CORSRules != nil { + rules := make([]map[string]interface{}, 0, len(cors.CORSRules)) + for _, ruleObject := range cors.CORSRules { + rule := make(map[string]interface{}) + rule["allowed_headers"] = flattenStringList(ruleObject.AllowedHeaders) + rule["allowed_methods"] = flattenStringList(ruleObject.AllowedMethods) + rule["allowed_origins"] = flattenStringList(ruleObject.AllowedOrigins) + // Both the "ExposeHeaders" and "MaxAgeSeconds" might not be set. + if ruleObject.AllowedOrigins != nil { + rule["expose_headers"] = flattenStringList(ruleObject.ExposeHeaders) + } + if ruleObject.MaxAgeSeconds != nil { + rule["max_age_seconds"] = int(*ruleObject.MaxAgeSeconds) + } + rules = append(rules, rule) + } + if err := d.Set("cors_rule", rules); err != nil { + return err + } + } + + // Read the website configuration + ws, err := s3conn.GetBucketWebsite(&s3.GetBucketWebsiteInput{ + Bucket: aws.String(d.Id()), + }) + var websites []map[string]interface{} + if err == nil { + w := make(map[string]interface{}) + + if v := ws.IndexDocument; v != nil { + w["index_document"] = *v.Suffix + } + + if v := ws.ErrorDocument; v != nil { + w["error_document"] = *v.Key + } + + if v := ws.RedirectAllRequestsTo; v != nil { + if v.Protocol == nil { + w["redirect_all_requests_to"] = *v.HostName + } else { + var host string + var path string + parsedHostName, err := url.Parse(*v.HostName) + if err == nil { + host = parsedHostName.Host + path = parsedHostName.Path + } else { + host = *v.HostName + path = "" + } + + w["redirect_all_requests_to"] = (&url.URL{ + Host: host, + Path: path, + Scheme: *v.Protocol, + }).String() + } + } + + if v := ws.RoutingRules; v != nil { + rr, err := normalizeRoutingRules(v) + if err != nil { + return fmt.Errorf("Error while marshaling routing rules: %s", err) + } + w["routing_rules"] = rr + } + + websites = append(websites, w) + } + if err := d.Set("website", websites); err != nil { + return err + } + + // Read the versioning configuration + versioning, err := s3conn.GetBucketVersioning(&s3.GetBucketVersioningInput{ + Bucket: aws.String(d.Id()), + }) + if err != nil { + return err + } + log.Printf("[DEBUG] S3 Bucket: %s, versioning: %v", d.Id(), versioning) + if versioning != nil { + vcl := make([]map[string]interface{}, 0, 1) + vc := make(map[string]interface{}) + if versioning.Status != nil && *versioning.Status == s3.BucketVersioningStatusEnabled { + vc["enabled"] = true + } else { + vc["enabled"] = false + } + + if versioning.MFADelete != nil && *versioning.MFADelete == s3.MFADeleteEnabled { + vc["mfa_delete"] = true + } else { + vc["mfa_delete"] = false + } + vcl = append(vcl, vc) + if err := d.Set("versioning", vcl); err != nil { + return err + } + } + + // Read the acceleration status + accelerate, err := s3conn.GetBucketAccelerateConfiguration(&s3.GetBucketAccelerateConfigurationInput{ + Bucket: aws.String(d.Id()), + }) + if err != nil { + // Amazon S3 Transfer Acceleration might not be supported in the + // given region, for example, China (Beijing) and the Government + // Cloud does not support this feature at the moment. + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "UnsupportedArgument" { + return err + } + + var awsRegion string + if region, ok := d.GetOk("region"); ok { + awsRegion = region.(string) + } else { + awsRegion = meta.(*AWSClient).region + } + + log.Printf("[WARN] S3 bucket: %s, the S3 Transfer Acceleration is not supported in the region: %s", d.Id(), awsRegion) + } else { + log.Printf("[DEBUG] S3 bucket: %s, read Acceleration: %v", d.Id(), accelerate) + d.Set("acceleration_status", accelerate.Status) + } + + // Read the request payer configuration. + payer, err := s3conn.GetBucketRequestPayment(&s3.GetBucketRequestPaymentInput{ + Bucket: aws.String(d.Id()), + }) + if err != nil { + return err + } + log.Printf("[DEBUG] S3 Bucket: %s, read request payer: %v", d.Id(), payer) + if payer.Payer != nil { + if err := d.Set("request_payer", *payer.Payer); err != nil { + return err + } + } + + // Read the logging configuration + logging, err := s3conn.GetBucketLogging(&s3.GetBucketLoggingInput{ + Bucket: aws.String(d.Id()), + }) + if err != nil { + return err + } + + log.Printf("[DEBUG] S3 Bucket: %s, logging: %v", d.Id(), logging) + lcl := make([]map[string]interface{}, 0, 1) + if v := logging.LoggingEnabled; v != nil { + lc := make(map[string]interface{}) + if *v.TargetBucket != "" { + lc["target_bucket"] = *v.TargetBucket + } + if *v.TargetPrefix != "" { + lc["target_prefix"] = *v.TargetPrefix + } + lcl = append(lcl, lc) + } + if err := d.Set("logging", lcl); err != nil { + return err + } + + // Read the lifecycle configuration + lifecycle, err := s3conn.GetBucketLifecycleConfiguration(&s3.GetBucketLifecycleConfigurationInput{ + Bucket: aws.String(d.Id()), + }) + if err != nil { + if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 { + return err + } + } + log.Printf("[DEBUG] S3 Bucket: %s, lifecycle: %v", d.Id(), lifecycle) + if len(lifecycle.Rules) > 0 { + rules := make([]map[string]interface{}, 0, len(lifecycle.Rules)) + + for _, lifecycleRule := range lifecycle.Rules { + rule := make(map[string]interface{}) + + // ID + if lifecycleRule.ID != nil && *lifecycleRule.ID != "" { + rule["id"] = *lifecycleRule.ID + } + // Prefix + if lifecycleRule.Prefix != nil && *lifecycleRule.Prefix != "" { + rule["prefix"] = *lifecycleRule.Prefix + } + // Enabled + if lifecycleRule.Status != nil { + if *lifecycleRule.Status == s3.ExpirationStatusEnabled { + rule["enabled"] = true + } else { + rule["enabled"] = false + } + } + + // AbortIncompleteMultipartUploadDays + if lifecycleRule.AbortIncompleteMultipartUpload != nil { + if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil { + rule["abort_incomplete_multipart_upload_days"] = int(*lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation) + } + } + + // expiration + if lifecycleRule.Expiration != nil { + e := make(map[string]interface{}) + if lifecycleRule.Expiration.Date != nil { + e["date"] = (*lifecycleRule.Expiration.Date).Format("2006-01-02") + } + if lifecycleRule.Expiration.Days != nil { + e["days"] = int(*lifecycleRule.Expiration.Days) + } + if lifecycleRule.Expiration.ExpiredObjectDeleteMarker != nil { + e["expired_object_delete_marker"] = *lifecycleRule.Expiration.ExpiredObjectDeleteMarker + } + rule["expiration"] = schema.NewSet(expirationHash, []interface{}{e}) + } + // noncurrent_version_expiration + if lifecycleRule.NoncurrentVersionExpiration != nil { + e := make(map[string]interface{}) + if lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays != nil { + e["days"] = int(*lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays) + } + rule["noncurrent_version_expiration"] = schema.NewSet(expirationHash, []interface{}{e}) + } + //// transition + if len(lifecycleRule.Transitions) > 0 { + transitions := make([]interface{}, 0, len(lifecycleRule.Transitions)) + for _, v := range lifecycleRule.Transitions { + t := make(map[string]interface{}) + if v.Date != nil { + t["date"] = (*v.Date).Format("2006-01-02") + } + if v.Days != nil { + t["days"] = int(*v.Days) + } + if v.StorageClass != nil { + t["storage_class"] = *v.StorageClass + } + transitions = append(transitions, t) + } + rule["transition"] = schema.NewSet(transitionHash, transitions) + } + // noncurrent_version_transition + if len(lifecycleRule.NoncurrentVersionTransitions) > 0 { + transitions := make([]interface{}, 0, len(lifecycleRule.NoncurrentVersionTransitions)) + for _, v := range lifecycleRule.NoncurrentVersionTransitions { + t := make(map[string]interface{}) + if v.NoncurrentDays != nil { + t["days"] = int(*v.NoncurrentDays) + } + if v.StorageClass != nil { + t["storage_class"] = *v.StorageClass + } + transitions = append(transitions, t) + } + rule["noncurrent_version_transition"] = schema.NewSet(transitionHash, transitions) + } + + rules = append(rules, rule) + } + + if err := d.Set("lifecycle_rule", rules); err != nil { + return err + } + } + + // Read the bucket replication configuration + replication, err := s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{ + Bucket: aws.String(d.Id()), + }) + if err != nil { + if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 { + return err + } + } + + log.Printf("[DEBUG] S3 Bucket: %s, read replication configuration: %v", d.Id(), replication) + if r := replication.ReplicationConfiguration; r != nil { + if err := d.Set("replication_configuration", flattenAwsS3BucketReplicationConfiguration(replication.ReplicationConfiguration)); err != nil { + log.Printf("[DEBUG] Error setting replication configuration: %s", err) + return err + } + } + + // Add the region as an attribute + location, err := s3conn.GetBucketLocation( + &s3.GetBucketLocationInput{ + Bucket: aws.String(d.Id()), + }, + ) + if err != nil { + return err + } + var region string + if location.LocationConstraint != nil { + region = *location.LocationConstraint + } + region = normalizeRegion(region) + if err := d.Set("region", region); err != nil { + return err + } + + // Add the hosted zone ID for this bucket's region as an attribute + hostedZoneID := HostedZoneIDForRegion(region) + if err := d.Set("hosted_zone_id", hostedZoneID); err != nil { + return err + } + + // Add website_endpoint as an attribute + websiteEndpoint, err := websiteEndpoint(s3conn, d) + if err != nil { + return err + } + if websiteEndpoint != nil { + if err := d.Set("website_endpoint", websiteEndpoint.Endpoint); err != nil { + return err + } + if err := d.Set("website_domain", websiteEndpoint.Domain); err != nil { + return err + } + } + + tagSet, err := getTagSetS3(s3conn, d.Id()) + if err != nil { + return err + } + + if err := d.Set("tags", tagsToMapS3(tagSet)); err != nil { + return err + } + + d.Set("arn", fmt.Sprintf("arn:%s:s3:::%s", meta.(*AWSClient).partition, d.Id())) + + return nil +} + +func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + + log.Printf("[DEBUG] S3 Delete Bucket: %s", d.Id()) + _, err := s3conn.DeleteBucket(&s3.DeleteBucketInput{ + Bucket: aws.String(d.Id()), + }) + if err != nil { + ec2err, ok := err.(awserr.Error) + if ok && ec2err.Code() == "BucketNotEmpty" { + if d.Get("force_destroy").(bool) { + // bucket may have things delete them + log.Printf("[DEBUG] S3 Bucket attempting to forceDestroy %+v", err) + + bucket := d.Get("bucket").(string) + resp, err := s3conn.ListObjectVersions( + &s3.ListObjectVersionsInput{ + Bucket: aws.String(bucket), + }, + ) + + if err != nil { + return fmt.Errorf("Error S3 Bucket list Object Versions err: %s", err) + } + + objectsToDelete := make([]*s3.ObjectIdentifier, 0) + + if len(resp.DeleteMarkers) != 0 { + + for _, v := range resp.DeleteMarkers { + objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ + Key: v.Key, + VersionId: v.VersionId, + }) + } + } + + if len(resp.Versions) != 0 { + for _, v := range resp.Versions { + objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ + Key: v.Key, + VersionId: v.VersionId, + }) + } + } + + params := &s3.DeleteObjectsInput{ + Bucket: aws.String(bucket), + Delete: &s3.Delete{ + Objects: objectsToDelete, + }, + } + + _, err = s3conn.DeleteObjects(params) + + if err != nil { + return fmt.Errorf("Error S3 Bucket force_destroy error deleting: %s", err) + } + + // this line recurses until all objects are deleted or an error is returned + return resourceAwsS3BucketDelete(d, meta) + } + } + return fmt.Errorf("Error deleting S3 Bucket: %s %q", err, d.Get("bucket").(string)) + } + return nil +} + +func resourceAwsS3BucketPolicyUpdate(s3conn *s3.S3, d *schema.ResourceData) error { + bucket := d.Get("bucket").(string) + policy := d.Get("policy").(string) + + if policy != "" { + log.Printf("[DEBUG] S3 bucket: %s, put policy: %s", bucket, policy) + + params := &s3.PutBucketPolicyInput{ + Bucket: aws.String(bucket), + Policy: aws.String(policy), + } + + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + if _, err := s3conn.PutBucketPolicy(params); err != nil { + if awserr, ok := err.(awserr.Error); ok { + if awserr.Code() == "MalformedPolicy" { + return resource.RetryableError(awserr) + } + } + return resource.NonRetryableError(err) + } + return nil + }) + + if err != nil { + return fmt.Errorf("Error putting S3 policy: %s", err) + } + } else { + log.Printf("[DEBUG] S3 bucket: %s, delete policy: %s", bucket, policy) + _, err := s3conn.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{ + Bucket: aws.String(bucket), + }) + + if err != nil { + return fmt.Errorf("Error deleting S3 policy: %s", err) + } + } + + return nil +} + +func resourceAwsS3BucketCorsUpdate(s3conn *s3.S3, d *schema.ResourceData) error { + bucket := d.Get("bucket").(string) + rawCors := d.Get("cors_rule").([]interface{}) + + if len(rawCors) == 0 { + // Delete CORS + log.Printf("[DEBUG] S3 bucket: %s, delete CORS", bucket) + _, err := s3conn.DeleteBucketCors(&s3.DeleteBucketCorsInput{ + Bucket: aws.String(bucket), + }) + if err != nil { + return fmt.Errorf("Error deleting S3 CORS: %s", err) + } + } else { + // Put CORS + rules := make([]*s3.CORSRule, 0, len(rawCors)) + for _, cors := range rawCors { + corsMap := cors.(map[string]interface{}) + r := &s3.CORSRule{} + for k, v := range corsMap { + log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v, %#v", bucket, k, v) + if k == "max_age_seconds" { + r.MaxAgeSeconds = aws.Int64(int64(v.(int))) + } else { + vMap := make([]*string, len(v.([]interface{}))) + for i, vv := range v.([]interface{}) { + str := vv.(string) + vMap[i] = aws.String(str) + } + switch k { + case "allowed_headers": + r.AllowedHeaders = vMap + case "allowed_methods": + r.AllowedMethods = vMap + case "allowed_origins": + r.AllowedOrigins = vMap + case "expose_headers": + r.ExposeHeaders = vMap + } + } + } + rules = append(rules, r) + } + corsInput := &s3.PutBucketCorsInput{ + Bucket: aws.String(bucket), + CORSConfiguration: &s3.CORSConfiguration{ + CORSRules: rules, + }, + } + log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v", bucket, corsInput) + _, err := s3conn.PutBucketCors(corsInput) + if err != nil { + return fmt.Errorf("Error putting S3 CORS: %s", err) + } + } + + return nil +} + +func resourceAwsS3BucketWebsiteUpdate(s3conn *s3.S3, d *schema.ResourceData) error { + ws := d.Get("website").([]interface{}) + + if len(ws) == 1 { + var w map[string]interface{} + if ws[0] != nil { + w = ws[0].(map[string]interface{}) + } else { + w = make(map[string]interface{}) + } + return resourceAwsS3BucketWebsitePut(s3conn, d, w) + } else if len(ws) == 0 { + return resourceAwsS3BucketWebsiteDelete(s3conn, d) + } else { + return fmt.Errorf("Cannot specify more than one website.") + } +} + +func resourceAwsS3BucketWebsitePut(s3conn *s3.S3, d *schema.ResourceData, website map[string]interface{}) error { + bucket := d.Get("bucket").(string) + + var indexDocument, errorDocument, redirectAllRequestsTo, routingRules string + if v, ok := website["index_document"]; ok { + indexDocument = v.(string) + } + if v, ok := website["error_document"]; ok { + errorDocument = v.(string) + } + if v, ok := website["redirect_all_requests_to"]; ok { + redirectAllRequestsTo = v.(string) + } + if v, ok := website["routing_rules"]; ok { + routingRules = v.(string) + } + + if indexDocument == "" && redirectAllRequestsTo == "" { + return fmt.Errorf("Must specify either index_document or redirect_all_requests_to.") + } + + websiteConfiguration := &s3.WebsiteConfiguration{} + + if indexDocument != "" { + websiteConfiguration.IndexDocument = &s3.IndexDocument{Suffix: aws.String(indexDocument)} + } + + if errorDocument != "" { + websiteConfiguration.ErrorDocument = &s3.ErrorDocument{Key: aws.String(errorDocument)} + } + + if redirectAllRequestsTo != "" { + redirect, err := url.Parse(redirectAllRequestsTo) + if err == nil && redirect.Scheme != "" { + var redirectHostBuf bytes.Buffer + redirectHostBuf.WriteString(redirect.Host) + if redirect.Path != "" { + redirectHostBuf.WriteString(redirect.Path) + } + websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectHostBuf.String()), Protocol: aws.String(redirect.Scheme)} + } else { + websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectAllRequestsTo)} + } + } + + if routingRules != "" { + var unmarshaledRules []*s3.RoutingRule + if err := json.Unmarshal([]byte(routingRules), &unmarshaledRules); err != nil { + return err + } + websiteConfiguration.RoutingRules = unmarshaledRules + } + + putInput := &s3.PutBucketWebsiteInput{ + Bucket: aws.String(bucket), + WebsiteConfiguration: websiteConfiguration, + } + + log.Printf("[DEBUG] S3 put bucket website: %#v", putInput) + + _, err := s3conn.PutBucketWebsite(putInput) + if err != nil { + return fmt.Errorf("Error putting S3 website: %s", err) + } + + return nil +} + +func resourceAwsS3BucketWebsiteDelete(s3conn *s3.S3, d *schema.ResourceData) error { + bucket := d.Get("bucket").(string) + deleteInput := &s3.DeleteBucketWebsiteInput{Bucket: aws.String(bucket)} + + log.Printf("[DEBUG] S3 delete bucket website: %#v", deleteInput) + + _, err := s3conn.DeleteBucketWebsite(deleteInput) + if err != nil { + return fmt.Errorf("Error deleting S3 website: %s", err) + } + + d.Set("website_endpoint", "") + d.Set("website_domain", "") + + return nil +} + +func websiteEndpoint(s3conn *s3.S3, d *schema.ResourceData) (*S3Website, error) { + // If the bucket doesn't have a website configuration, return an empty + // endpoint + if _, ok := d.GetOk("website"); !ok { + return nil, nil + } + + bucket := d.Get("bucket").(string) + + // Lookup the region for this bucket + location, err := s3conn.GetBucketLocation( + &s3.GetBucketLocationInput{ + Bucket: aws.String(bucket), + }, + ) + if err != nil { + return nil, err + } + var region string + if location.LocationConstraint != nil { + region = *location.LocationConstraint + } + + return WebsiteEndpoint(bucket, region), nil +} + +func bucketDomainName(bucket string) string { + return fmt.Sprintf("%s.s3.amazonaws.com", bucket) +} + +func WebsiteEndpoint(bucket string, region string) *S3Website { + domain := WebsiteDomainUrl(region) + return &S3Website{Endpoint: fmt.Sprintf("%s.%s", bucket, domain), Domain: domain} +} + +func WebsiteDomainUrl(region string) string { + region = normalizeRegion(region) + + // New regions uses different syntax for website endpoints + // http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html + if isOldRegion(region) { + return fmt.Sprintf("s3-website-%s.amazonaws.com", region) + } + return fmt.Sprintf("s3-website.%s.amazonaws.com", region) +} + +func isOldRegion(region string) bool { + oldRegions := []string{ + "ap-northeast-1", + "ap-southeast-1", + "ap-southeast-2", + "eu-west-1", + "sa-east-1", + "us-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", + } + for _, r := range oldRegions { + if region == r { + return true + } + } + return false +} + +func resourceAwsS3BucketAclUpdate(s3conn *s3.S3, d *schema.ResourceData) error { + acl := d.Get("acl").(string) + bucket := d.Get("bucket").(string) + + i := &s3.PutBucketAclInput{ + Bucket: aws.String(bucket), + ACL: aws.String(acl), + } + log.Printf("[DEBUG] S3 put bucket ACL: %#v", i) + + _, err := s3conn.PutBucketAcl(i) + if err != nil { + return fmt.Errorf("Error putting S3 ACL: %s", err) + } + + return nil +} + +func resourceAwsS3BucketVersioningUpdate(s3conn *s3.S3, d *schema.ResourceData) error { + v := d.Get("versioning").([]interface{}) + bucket := d.Get("bucket").(string) + vc := &s3.VersioningConfiguration{} + + if len(v) > 0 { + c := v[0].(map[string]interface{}) + + if c["enabled"].(bool) { + vc.Status = aws.String(s3.BucketVersioningStatusEnabled) + } else { + vc.Status = aws.String(s3.BucketVersioningStatusSuspended) + } + + if c["mfa_delete"].(bool) { + vc.MFADelete = aws.String(s3.MFADeleteEnabled) + } else { + vc.MFADelete = aws.String(s3.MFADeleteDisabled) + } + + } else { + vc.Status = aws.String(s3.BucketVersioningStatusSuspended) + } + + i := &s3.PutBucketVersioningInput{ + Bucket: aws.String(bucket), + VersioningConfiguration: vc, + } + log.Printf("[DEBUG] S3 put bucket versioning: %#v", i) + + _, err := s3conn.PutBucketVersioning(i) + if err != nil { + return fmt.Errorf("Error putting S3 versioning: %s", err) + } + + return nil +} + +func resourceAwsS3BucketLoggingUpdate(s3conn *s3.S3, d *schema.ResourceData) error { + logging := d.Get("logging").(*schema.Set).List() + bucket := d.Get("bucket").(string) + loggingStatus := &s3.BucketLoggingStatus{} + + if len(logging) > 0 { + c := logging[0].(map[string]interface{}) + + loggingEnabled := &s3.LoggingEnabled{} + if val, ok := c["target_bucket"]; ok { + loggingEnabled.TargetBucket = aws.String(val.(string)) + } + if val, ok := c["target_prefix"]; ok { + loggingEnabled.TargetPrefix = aws.String(val.(string)) + } + + loggingStatus.LoggingEnabled = loggingEnabled + } + + i := &s3.PutBucketLoggingInput{ + Bucket: aws.String(bucket), + BucketLoggingStatus: loggingStatus, + } + log.Printf("[DEBUG] S3 put bucket logging: %#v", i) + + _, err := s3conn.PutBucketLogging(i) + if err != nil { + return fmt.Errorf("Error putting S3 logging: %s", err) + } + + return nil +} + +func resourceAwsS3BucketAccelerationUpdate(s3conn *s3.S3, d *schema.ResourceData) error { + bucket := d.Get("bucket").(string) + enableAcceleration := d.Get("acceleration_status").(string) + + i := &s3.PutBucketAccelerateConfigurationInput{ + Bucket: aws.String(bucket), + AccelerateConfiguration: &s3.AccelerateConfiguration{ + Status: aws.String(enableAcceleration), + }, + } + log.Printf("[DEBUG] S3 put bucket acceleration: %#v", i) + + _, err := s3conn.PutBucketAccelerateConfiguration(i) + if err != nil { + return fmt.Errorf("Error putting S3 acceleration: %s", err) + } + + return nil +} + +func resourceAwsS3BucketRequestPayerUpdate(s3conn *s3.S3, d *schema.ResourceData) error { + bucket := d.Get("bucket").(string) + payer := d.Get("request_payer").(string) + + i := &s3.PutBucketRequestPaymentInput{ + Bucket: aws.String(bucket), + RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{ + Payer: aws.String(payer), + }, + } + log.Printf("[DEBUG] S3 put bucket request payer: %#v", i) + + _, err := s3conn.PutBucketRequestPayment(i) + if err != nil { + return fmt.Errorf("Error putting S3 request payer: %s", err) + } + + return nil +} + +func resourceAwsS3BucketReplicationConfigurationUpdate(s3conn *s3.S3, d *schema.ResourceData) error { + bucket := d.Get("bucket").(string) + replicationConfiguration := d.Get("replication_configuration").([]interface{}) + + if len(replicationConfiguration) == 0 { + i := &s3.DeleteBucketReplicationInput{ + Bucket: aws.String(bucket), + } + + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + if _, err := s3conn.DeleteBucketReplication(i); err != nil { + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return fmt.Errorf("Error removing S3 bucket replication: %s", err) + } + return nil + } + + hasVersioning := false + // Validate that bucket versioning is enabled + if versioning, ok := d.GetOk("versioning"); ok { + v := versioning.([]interface{}) + + if v[0].(map[string]interface{})["enabled"].(bool) { + hasVersioning = true + } + } + + if !hasVersioning { + return fmt.Errorf("versioning must be enabled to allow S3 bucket replication") + } + + c := replicationConfiguration[0].(map[string]interface{}) + + rc := &s3.ReplicationConfiguration{} + if val, ok := c["role"]; ok { + rc.Role = aws.String(val.(string)) + } + + rcRules := c["rules"].(*schema.Set).List() + rules := []*s3.ReplicationRule{} + for _, v := range rcRules { + rr := v.(map[string]interface{}) + rcRule := &s3.ReplicationRule{ + Prefix: aws.String(rr["prefix"].(string)), + Status: aws.String(rr["status"].(string)), + } + + if rrid, ok := rr["id"]; ok { + rcRule.ID = aws.String(rrid.(string)) + } + + ruleDestination := &s3.Destination{} + if destination, ok := rr["destination"]; ok { + dest := destination.(*schema.Set).List() + + bd := dest[0].(map[string]interface{}) + ruleDestination.Bucket = aws.String(bd["bucket"].(string)) + + if storageClass, ok := bd["storage_class"]; ok && storageClass != "" { + ruleDestination.StorageClass = aws.String(storageClass.(string)) + } + } + rcRule.Destination = ruleDestination + rules = append(rules, rcRule) + } + + rc.Rules = rules + i := &s3.PutBucketReplicationInput{ + Bucket: aws.String(bucket), + ReplicationConfiguration: rc, + } + log.Printf("[DEBUG] S3 put bucket replication configuration: %#v", i) + + _, err := s3conn.PutBucketReplication(i) + if err != nil { + return fmt.Errorf("Error putting S3 replication configuration: %s", err) + } + + return nil +} + +func resourceAwsS3BucketLifecycleUpdate(s3conn *s3.S3, d *schema.ResourceData) error { + bucket := d.Get("bucket").(string) + + lifecycleRules := d.Get("lifecycle_rule").([]interface{}) + + if len(lifecycleRules) == 0 { + i := &s3.DeleteBucketLifecycleInput{ + Bucket: aws.String(bucket), + } + + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + if _, err := s3conn.DeleteBucketLifecycle(i); err != nil { + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return fmt.Errorf("Error removing S3 lifecycle: %s", err) + } + return nil + } + + rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules)) + + for i, lifecycleRule := range lifecycleRules { + r := lifecycleRule.(map[string]interface{}) + + rule := &s3.LifecycleRule{ + Prefix: aws.String(r["prefix"].(string)), + } + + // ID + if val, ok := r["id"].(string); ok && val != "" { + rule.ID = aws.String(val) + } else { + rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-")) + } + + // Enabled + if val, ok := r["enabled"].(bool); ok && val { + rule.Status = aws.String(s3.ExpirationStatusEnabled) + } else { + rule.Status = aws.String(s3.ExpirationStatusDisabled) + } + + // AbortIncompleteMultipartUpload + if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 { + rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{ + DaysAfterInitiation: aws.Int64(int64(val)), + } + } + + // Expiration + expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).(*schema.Set).List() + if len(expiration) > 0 { + e := expiration[0].(map[string]interface{}) + i := &s3.LifecycleExpiration{} + + if val, ok := e["date"].(string); ok && val != "" { + t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) + if err != nil { + return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) + } + i.Date = aws.Time(t) + } else if val, ok := e["days"].(int); ok && val > 0 { + i.Days = aws.Int64(int64(val)) + } else if val, ok := e["expired_object_delete_marker"].(bool); ok { + i.ExpiredObjectDeleteMarker = aws.Bool(val) + } + rule.Expiration = i + } + + // NoncurrentVersionExpiration + nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).(*schema.Set).List() + if len(nc_expiration) > 0 { + e := nc_expiration[0].(map[string]interface{}) + + if val, ok := e["days"].(int); ok && val > 0 { + rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{ + NoncurrentDays: aws.Int64(int64(val)), + } + } + } + + // Transitions + transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List() + if len(transitions) > 0 { + rule.Transitions = make([]*s3.Transition, 0, len(transitions)) + for _, transition := range transitions { + transition := transition.(map[string]interface{}) + i := &s3.Transition{} + if val, ok := transition["date"].(string); ok && val != "" { + t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) + if err != nil { + return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) + } + i.Date = aws.Time(t) + } else if val, ok := transition["days"].(int); ok && val > 0 { + i.Days = aws.Int64(int64(val)) + } + if val, ok := transition["storage_class"].(string); ok && val != "" { + i.StorageClass = aws.String(val) + } + + rule.Transitions = append(rule.Transitions, i) + } + } + // NoncurrentVersionTransitions + nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List() + if len(nc_transitions) > 0 { + rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions)) + for _, transition := range nc_transitions { + transition := transition.(map[string]interface{}) + i := &s3.NoncurrentVersionTransition{} + if val, ok := transition["days"].(int); ok && val > 0 { + i.NoncurrentDays = aws.Int64(int64(val)) + } + if val, ok := transition["storage_class"].(string); ok && val != "" { + i.StorageClass = aws.String(val) + } + + rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i) + } + } + + rules = append(rules, rule) + } + + i := &s3.PutBucketLifecycleConfigurationInput{ + Bucket: aws.String(bucket), + LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ + Rules: rules, + }, + } + + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + if _, err := s3conn.PutBucketLifecycleConfiguration(i); err != nil { + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return fmt.Errorf("Error putting S3 lifecycle: %s", err) + } + + return nil +} + +func flattenAwsS3BucketReplicationConfiguration(r *s3.ReplicationConfiguration) []map[string]interface{} { + replication_configuration := make([]map[string]interface{}, 0, 1) + m := make(map[string]interface{}) + + if r.Role != nil && *r.Role != "" { + m["role"] = *r.Role + } + + rules := make([]interface{}, 0, len(r.Rules)) + for _, v := range r.Rules { + t := make(map[string]interface{}) + if v.Destination != nil { + rd := make(map[string]interface{}) + if v.Destination.Bucket != nil { + rd["bucket"] = *v.Destination.Bucket + } + if v.Destination.StorageClass != nil { + rd["storage_class"] = *v.Destination.StorageClass + } + t["destination"] = schema.NewSet(destinationHash, []interface{}{rd}) + } + + if v.ID != nil { + t["id"] = *v.ID + } + if v.Prefix != nil { + t["prefix"] = *v.Prefix + } + if v.Status != nil { + t["status"] = *v.Status + } + rules = append(rules, t) + } + m["rules"] = schema.NewSet(rulesHash, rules) + + replication_configuration = append(replication_configuration, m) + + return replication_configuration +} + +func normalizeRoutingRules(w []*s3.RoutingRule) (string, error) { + withNulls, err := json.Marshal(w) + if err != nil { + return "", err + } + + var rules []map[string]interface{} + if err := json.Unmarshal(withNulls, &rules); err != nil { + return "", err + } + + var cleanRules []map[string]interface{} + for _, rule := range rules { + cleanRules = append(cleanRules, removeNil(rule)) + } + + withoutNulls, err := json.Marshal(cleanRules) + if err != nil { + return "", err + } + + return string(withoutNulls), nil +} + +func removeNil(data map[string]interface{}) map[string]interface{} { + withoutNil := make(map[string]interface{}) + + for k, v := range data { + if v == nil { + continue + } + + switch v.(type) { + case map[string]interface{}: + withoutNil[k] = removeNil(v.(map[string]interface{})) + default: + withoutNil[k] = v + } + } + + return withoutNil +} + +// DEPRECATED. Please consider using `normalizeJsonString` function instead. +func normalizeJson(jsonString interface{}) string { + if jsonString == nil || jsonString == "" { + return "" + } + var j interface{} + err := json.Unmarshal([]byte(jsonString.(string)), &j) + if err != nil { + return fmt.Sprintf("Error parsing JSON: %s", err) + } + b, _ := json.Marshal(j) + return string(b[:]) +} + +func normalizeRegion(region string) string { + // Default to us-east-1 if the bucket doesn't have a region: + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html + if region == "" { + region = "us-east-1" + } + + return region +} + +func validateS3BucketAccelerationStatus(v interface{}, k string) (ws []string, errors []error) { + validTypes := map[string]struct{}{ + "Enabled": struct{}{}, + "Suspended": struct{}{}, + } + + if _, ok := validTypes[v.(string)]; !ok { + errors = append(errors, fmt.Errorf("S3 Bucket Acceleration Status %q is invalid, must be %q or %q", v.(string), "Enabled", "Suspended")) + } + return +} + +func validateS3BucketRequestPayerType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != s3.PayerRequester && value != s3.PayerBucketOwner { + errors = append(errors, fmt.Errorf( + "%q contains an invalid Request Payer type %q. Valid types are either %q or %q", + k, value, s3.PayerRequester, s3.PayerBucketOwner)) + } + return +} + +// validateS3BucketName validates any S3 bucket name that is not inside the us-east-1 region. +// Buckets outside of this region have to be DNS-compliant. After the same restrictions are +// applied to buckets in the us-east-1 region, this function can be refactored as a SchemaValidateFunc +func validateS3BucketName(value string, region string) error { + if region != "us-east-1" { + if (len(value) < 3) || (len(value) > 63) { + return fmt.Errorf("%q must contain from 3 to 63 characters", value) + } + if !regexp.MustCompile(`^[0-9a-z-.]+$`).MatchString(value) { + return fmt.Errorf("only lowercase alphanumeric characters and hyphens allowed in %q", value) + } + if regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`).MatchString(value) { + return fmt.Errorf("%q must not be formatted as an IP address", value) + } + if strings.HasPrefix(value, `.`) { + return fmt.Errorf("%q cannot start with a period", value) + } + if strings.HasSuffix(value, `.`) { + return fmt.Errorf("%q cannot end with a period", value) + } + if strings.Contains(value, `..`) { + return fmt.Errorf("%q can be only one period between labels", value) + } + } else { + if len(value) > 255 { + return fmt.Errorf("%q must contain less than 256 characters", value) + } + if !regexp.MustCompile(`^[0-9a-zA-Z-._]+$`).MatchString(value) { + return fmt.Errorf("only alphanumeric characters, hyphens, periods, and underscores allowed in %q", value) + } + } + return nil +} + +func expirationHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + if v, ok := m["date"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["days"]; ok { + buf.WriteString(fmt.Sprintf("%d-", v.(int))) + } + if v, ok := m["expired_object_delete_marker"]; ok { + buf.WriteString(fmt.Sprintf("%t-", v.(bool))) + } + return hashcode.String(buf.String()) +} + +func transitionHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + if v, ok := m["date"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["days"]; ok { + buf.WriteString(fmt.Sprintf("%d-", v.(int))) + } + if v, ok := m["storage_class"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + return hashcode.String(buf.String()) +} + +func rulesHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + if v, ok := m["id"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["prefix"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["status"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + return hashcode.String(buf.String()) +} + +func destinationHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + if v, ok := m["bucket"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["storage_class"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + return hashcode.String(buf.String()) +} + +type S3Website struct { + Endpoint, Domain string +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_notification.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_notification.go new file mode 100644 index 000000000..f3e19b484 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_notification.go @@ -0,0 +1,467 @@ +package aws + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/s3" +) + +func resourceAwsS3BucketNotification() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsS3BucketNotificationPut, + Read: resourceAwsS3BucketNotificationRead, + Update: resourceAwsS3BucketNotificationPut, + Delete: resourceAwsS3BucketNotificationDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "topic": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "filter_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "filter_suffix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "topic_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "events": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + }, + }, + + "queue": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "filter_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "filter_suffix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "queue_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "events": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + }, + }, + + "lambda_function": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "filter_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "filter_suffix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "lambda_function_arn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "events": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + }, + }, + }, + } +} + +func resourceAwsS3BucketNotificationPut(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + bucket := d.Get("bucket").(string) + + // TopicNotifications + topicNotifications := d.Get("topic").([]interface{}) + topicConfigs := make([]*s3.TopicConfiguration, 0, len(topicNotifications)) + for i, c := range topicNotifications { + tc := &s3.TopicConfiguration{} + + c := c.(map[string]interface{}) + + // Id + if val, ok := c["id"].(string); ok && val != "" { + tc.Id = aws.String(val) + } else { + tc.Id = aws.String(resource.PrefixedUniqueId("tf-s3-topic-")) + } + + // TopicArn + if val, ok := c["topic_arn"].(string); ok { + tc.TopicArn = aws.String(val) + } + + // Events + events := d.Get(fmt.Sprintf("topic.%d.events", i)).(*schema.Set).List() + tc.Events = make([]*string, 0, len(events)) + for _, e := range events { + tc.Events = append(tc.Events, aws.String(e.(string))) + } + + // Filter + filterRules := make([]*s3.FilterRule, 0, 2) + if val, ok := c["filter_prefix"].(string); ok && val != "" { + filterRule := &s3.FilterRule{ + Name: aws.String("prefix"), + Value: aws.String(val), + } + filterRules = append(filterRules, filterRule) + } + if val, ok := c["filter_suffix"].(string); ok && val != "" { + filterRule := &s3.FilterRule{ + Name: aws.String("suffix"), + Value: aws.String(val), + } + filterRules = append(filterRules, filterRule) + } + if len(filterRules) > 0 { + tc.Filter = &s3.NotificationConfigurationFilter{ + Key: &s3.KeyFilter{ + FilterRules: filterRules, + }, + } + } + topicConfigs = append(topicConfigs, tc) + } + + // SQS + queueNotifications := d.Get("queue").([]interface{}) + queueConfigs := make([]*s3.QueueConfiguration, 0, len(queueNotifications)) + for i, c := range queueNotifications { + qc := &s3.QueueConfiguration{} + + c := c.(map[string]interface{}) + + // Id + if val, ok := c["id"].(string); ok && val != "" { + qc.Id = aws.String(val) + } else { + qc.Id = aws.String(resource.PrefixedUniqueId("tf-s3-queue-")) + } + + // QueueArn + if val, ok := c["queue_arn"].(string); ok { + qc.QueueArn = aws.String(val) + } + + // Events + events := d.Get(fmt.Sprintf("queue.%d.events", i)).(*schema.Set).List() + qc.Events = make([]*string, 0, len(events)) + for _, e := range events { + qc.Events = append(qc.Events, aws.String(e.(string))) + } + + // Filter + filterRules := make([]*s3.FilterRule, 0, 2) + if val, ok := c["filter_prefix"].(string); ok && val != "" { + filterRule := &s3.FilterRule{ + Name: aws.String("prefix"), + Value: aws.String(val), + } + filterRules = append(filterRules, filterRule) + } + if val, ok := c["filter_suffix"].(string); ok && val != "" { + filterRule := &s3.FilterRule{ + Name: aws.String("suffix"), + Value: aws.String(val), + } + filterRules = append(filterRules, filterRule) + } + if len(filterRules) > 0 { + qc.Filter = &s3.NotificationConfigurationFilter{ + Key: &s3.KeyFilter{ + FilterRules: filterRules, + }, + } + } + queueConfigs = append(queueConfigs, qc) + } + + // Lambda + lambdaFunctionNotifications := d.Get("lambda_function").([]interface{}) + lambdaConfigs := make([]*s3.LambdaFunctionConfiguration, 0, len(lambdaFunctionNotifications)) + for i, c := range lambdaFunctionNotifications { + lc := &s3.LambdaFunctionConfiguration{} + + c := c.(map[string]interface{}) + + // Id + if val, ok := c["id"].(string); ok && val != "" { + lc.Id = aws.String(val) + } else { + lc.Id = aws.String(resource.PrefixedUniqueId("tf-s3-lambda-")) + } + + // LambdaFunctionArn + if val, ok := c["lambda_function_arn"].(string); ok { + lc.LambdaFunctionArn = aws.String(val) + } + + // Events + events := d.Get(fmt.Sprintf("lambda_function.%d.events", i)).(*schema.Set).List() + lc.Events = make([]*string, 0, len(events)) + for _, e := range events { + lc.Events = append(lc.Events, aws.String(e.(string))) + } + + // Filter + filterRules := make([]*s3.FilterRule, 0, 2) + if val, ok := c["filter_prefix"].(string); ok && val != "" { + filterRule := &s3.FilterRule{ + Name: aws.String("prefix"), + Value: aws.String(val), + } + filterRules = append(filterRules, filterRule) + } + if val, ok := c["filter_suffix"].(string); ok && val != "" { + filterRule := &s3.FilterRule{ + Name: aws.String("suffix"), + Value: aws.String(val), + } + filterRules = append(filterRules, filterRule) + } + if len(filterRules) > 0 { + lc.Filter = &s3.NotificationConfigurationFilter{ + Key: &s3.KeyFilter{ + FilterRules: filterRules, + }, + } + } + lambdaConfigs = append(lambdaConfigs, lc) + } + + notificationConfiguration := &s3.NotificationConfiguration{} + if len(lambdaConfigs) > 0 { + notificationConfiguration.LambdaFunctionConfigurations = lambdaConfigs + } + if len(queueConfigs) > 0 { + notificationConfiguration.QueueConfigurations = queueConfigs + } + if len(topicConfigs) > 0 { + notificationConfiguration.TopicConfigurations = topicConfigs + } + i := &s3.PutBucketNotificationConfigurationInput{ + Bucket: aws.String(bucket), + NotificationConfiguration: notificationConfiguration, + } + + log.Printf("[DEBUG] S3 bucket: %s, Putting notification: %v", bucket, i) + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + if _, err := s3conn.PutBucketNotificationConfiguration(i); err != nil { + if awserr, ok := err.(awserr.Error); ok { + switch awserr.Message() { + case "Unable to validate the following destination configurations": + return resource.RetryableError(awserr) + } + } + // Didn't recognize the error, so shouldn't retry. + return resource.NonRetryableError(err) + } + // Successful put configuration + return nil + }) + if err != nil { + return fmt.Errorf("Error putting S3 notification configuration: %s", err) + } + + d.SetId(bucket) + + return resourceAwsS3BucketNotificationRead(d, meta) +} + +func resourceAwsS3BucketNotificationDelete(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + + i := &s3.PutBucketNotificationConfigurationInput{ + Bucket: aws.String(d.Id()), + NotificationConfiguration: &s3.NotificationConfiguration{}, + } + + log.Printf("[DEBUG] S3 bucket: %s, Deleting notification: %v", d.Id(), i) + _, err := s3conn.PutBucketNotificationConfiguration(i) + if err != nil { + return fmt.Errorf("Error deleting S3 notification configuration: %s", err) + } + + d.SetId("") + + return nil +} + +func resourceAwsS3BucketNotificationRead(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + + var err error + _, err = s3conn.HeadBucket(&s3.HeadBucketInput{ + Bucket: aws.String(d.Id()), + }) + if err != nil { + if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() == 404 { + log.Printf("[WARN] S3 Bucket (%s) not found, error code (404)", d.Id()) + d.SetId("") + return nil + } else { + // some of the AWS SDK's errors can be empty strings, so let's add + // some additional context. + return fmt.Errorf("error reading S3 bucket \"%s\": %s", d.Id(), err) + } + } + + // Read the notification configuration + notificationConfigs, err := s3conn.GetBucketNotificationConfiguration(&s3.GetBucketNotificationConfigurationRequest{ + Bucket: aws.String(d.Id()), + }) + if err != nil { + return err + } + log.Printf("[DEBUG] S3 Bucket: %s, get notification: %v", d.Id(), notificationConfigs) + // Topic Notification + if err := d.Set("topic", flattenTopicConfigurations(notificationConfigs.TopicConfigurations)); err != nil { + return fmt.Errorf("error reading S3 bucket \"%s\" topic notification: %s", d.Id(), err) + } + + // SQS Notification + if err := d.Set("queue", flattenQueueConfigurations(notificationConfigs.QueueConfigurations)); err != nil { + return fmt.Errorf("error reading S3 bucket \"%s\" queue notification: %s", d.Id(), err) + } + + // Lambda Notification + if err := d.Set("lambda_function", flattenLambdaFunctionConfigurations(notificationConfigs.LambdaFunctionConfigurations)); err != nil { + return fmt.Errorf("error reading S3 bucket \"%s\" lambda function notification: %s", d.Id(), err) + } + + return nil +} + +func flattenNotificationConfigurationFilter(filter *s3.NotificationConfigurationFilter) map[string]interface{} { + filterRules := map[string]interface{}{} + for _, f := range filter.Key.FilterRules { + if strings.ToLower(*f.Name) == s3.FilterRuleNamePrefix { + filterRules["filter_prefix"] = *f.Value + } + if strings.ToLower(*f.Name) == s3.FilterRuleNameSuffix { + filterRules["filter_suffix"] = *f.Value + } + } + return filterRules +} + +func flattenTopicConfigurations(configs []*s3.TopicConfiguration) []map[string]interface{} { + topicNotifications := make([]map[string]interface{}, 0, len(configs)) + for _, notification := range configs { + var conf map[string]interface{} + if filter := notification.Filter; filter != nil { + conf = flattenNotificationConfigurationFilter(filter) + } else { + conf = map[string]interface{}{} + } + + conf["id"] = *notification.Id + conf["events"] = schema.NewSet(schema.HashString, flattenStringList(notification.Events)) + conf["topic_arn"] = *notification.TopicArn + topicNotifications = append(topicNotifications, conf) + } + + return topicNotifications +} + +func flattenQueueConfigurations(configs []*s3.QueueConfiguration) []map[string]interface{} { + queueNotifications := make([]map[string]interface{}, 0, len(configs)) + for _, notification := range configs { + var conf map[string]interface{} + if filter := notification.Filter; filter != nil { + conf = flattenNotificationConfigurationFilter(filter) + } else { + conf = map[string]interface{}{} + } + + conf["id"] = *notification.Id + conf["events"] = schema.NewSet(schema.HashString, flattenStringList(notification.Events)) + conf["queue_arn"] = *notification.QueueArn + queueNotifications = append(queueNotifications, conf) + } + + return queueNotifications +} + +func flattenLambdaFunctionConfigurations(configs []*s3.LambdaFunctionConfiguration) []map[string]interface{} { + lambdaFunctionNotifications := make([]map[string]interface{}, 0, len(configs)) + for _, notification := range configs { + var conf map[string]interface{} + if filter := notification.Filter; filter != nil { + conf = flattenNotificationConfigurationFilter(filter) + } else { + conf = map[string]interface{}{} + } + + conf["id"] = *notification.Id + conf["events"] = schema.NewSet(schema.HashString, flattenStringList(notification.Events)) + conf["lambda_function_arn"] = *notification.LambdaFunctionArn + lambdaFunctionNotifications = append(lambdaFunctionNotifications, conf) + } + + return lambdaFunctionNotifications +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_object.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_object.go new file mode 100644 index 000000000..c14914187 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_object.go @@ -0,0 +1,407 @@ +package aws + +import ( + "bytes" + "fmt" + "io" + "log" + "net/url" + "os" + "sort" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/mitchellh/go-homedir" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/kms" + "github.com/aws/aws-sdk-go/service/s3" +) + +func resourceAwsS3BucketObject() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsS3BucketObjectPut, + Read: resourceAwsS3BucketObjectRead, + Update: resourceAwsS3BucketObjectPut, + Delete: resourceAwsS3BucketObjectDelete, + + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "acl": { + Type: schema.TypeString, + Default: "private", + Optional: true, + ValidateFunc: validateS3BucketObjectAclType, + }, + + "cache_control": { + Type: schema.TypeString, + Optional: true, + }, + + "content_disposition": { + Type: schema.TypeString, + Optional: true, + }, + + "content_encoding": { + Type: schema.TypeString, + Optional: true, + }, + + "content_language": { + Type: schema.TypeString, + Optional: true, + }, + + "content_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "source": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"content"}, + }, + + "content": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"source"}, + }, + + "storage_class": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateS3BucketObjectStorageClassType, + }, + + "server_side_encryption": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateS3BucketObjectServerSideEncryption, + Computed: true, + }, + + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, + + "etag": { + Type: schema.TypeString, + // This will conflict with SSE-C and SSE-KMS encryption and multi-part upload + // if/when it's actually implemented. The Etag then won't match raw-file MD5. + // See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html + Optional: true, + Computed: true, + ConflictsWith: []string{"kms_key_id", "server_side_encryption"}, + }, + + "version_id": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + + restricted := meta.(*AWSClient).IsGovCloud() || meta.(*AWSClient).IsChinaCloud() + + var body io.ReadSeeker + + if v, ok := d.GetOk("source"); ok { + source := v.(string) + path, err := homedir.Expand(source) + if err != nil { + return fmt.Errorf("Error expanding homedir in source (%s): %s", source, err) + } + file, err := os.Open(path) + if err != nil { + return fmt.Errorf("Error opening S3 bucket object source (%s): %s", source, err) + } + + body = file + } else if v, ok := d.GetOk("content"); ok { + content := v.(string) + body = bytes.NewReader([]byte(content)) + } else { + return fmt.Errorf("Must specify \"source\" or \"content\" field") + } + + bucket := d.Get("bucket").(string) + key := d.Get("key").(string) + + putInput := &s3.PutObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + ACL: aws.String(d.Get("acl").(string)), + Body: body, + } + + if v, ok := d.GetOk("storage_class"); ok { + putInput.StorageClass = aws.String(v.(string)) + } + + if v, ok := d.GetOk("cache_control"); ok { + putInput.CacheControl = aws.String(v.(string)) + } + + if v, ok := d.GetOk("content_type"); ok { + putInput.ContentType = aws.String(v.(string)) + } + + if v, ok := d.GetOk("content_encoding"); ok { + putInput.ContentEncoding = aws.String(v.(string)) + } + + if v, ok := d.GetOk("content_language"); ok { + putInput.ContentLanguage = aws.String(v.(string)) + } + + if v, ok := d.GetOk("content_disposition"); ok { + putInput.ContentDisposition = aws.String(v.(string)) + } + + if v, ok := d.GetOk("server_side_encryption"); ok { + putInput.ServerSideEncryption = aws.String(v.(string)) + } + + if v, ok := d.GetOk("kms_key_id"); ok { + putInput.SSEKMSKeyId = aws.String(v.(string)) + putInput.ServerSideEncryption = aws.String(s3.ServerSideEncryptionAwsKms) + } + + if v, ok := d.GetOk("tags"); ok { + if restricted { + return fmt.Errorf("This region does not allow for tags on S3 objects") + } + + // The tag-set must be encoded as URL Query parameters. + values := url.Values{} + for k, v := range v.(map[string]interface{}) { + values.Add(k, v.(string)) + } + putInput.Tagging = aws.String(values.Encode()) + } + + resp, err := s3conn.PutObject(putInput) + if err != nil { + return fmt.Errorf("Error putting object in S3 bucket (%s): %s", bucket, err) + } + + // See https://forums.aws.amazon.com/thread.jspa?threadID=44003 + d.Set("etag", strings.Trim(*resp.ETag, `"`)) + + d.Set("version_id", resp.VersionId) + d.SetId(key) + return resourceAwsS3BucketObjectRead(d, meta) +} + +func resourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + + restricted := meta.(*AWSClient).IsGovCloud() || meta.(*AWSClient).IsChinaCloud() + + bucket := d.Get("bucket").(string) + key := d.Get("key").(string) + + resp, err := s3conn.HeadObject( + &s3.HeadObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + }) + + if err != nil { + // If S3 returns a 404 Request Failure, mark the object as destroyed + if awsErr, ok := err.(awserr.RequestFailure); ok && awsErr.StatusCode() == 404 { + d.SetId("") + log.Printf("[WARN] Error Reading Object (%s), object not found (HTTP status 404)", key) + return nil + } + return err + } + log.Printf("[DEBUG] Reading S3 Bucket Object meta: %s", resp) + + d.Set("cache_control", resp.CacheControl) + d.Set("content_disposition", resp.ContentDisposition) + d.Set("content_encoding", resp.ContentEncoding) + d.Set("content_language", resp.ContentLanguage) + d.Set("content_type", resp.ContentType) + d.Set("version_id", resp.VersionId) + d.Set("server_side_encryption", resp.ServerSideEncryption) + + // Only set non-default KMS key ID (one that doesn't match default) + if resp.SSEKMSKeyId != nil { + // retrieve S3 KMS Default Master Key + kmsconn := meta.(*AWSClient).kmsconn + kmsresp, err := kmsconn.DescribeKey(&kms.DescribeKeyInput{ + KeyId: aws.String("alias/aws/s3"), + }) + if err != nil { + return fmt.Errorf("Failed to describe default S3 KMS key (alias/aws/s3): %s", err) + } + + if *resp.SSEKMSKeyId != *kmsresp.KeyMetadata.Arn { + log.Printf("[DEBUG] S3 object is encrypted using a non-default KMS Key ID: %s", *resp.SSEKMSKeyId) + d.Set("kms_key_id", resp.SSEKMSKeyId) + } + } + d.Set("etag", strings.Trim(*resp.ETag, `"`)) + + // The "STANDARD" (which is also the default) storage + // class when set would not be included in the results. + d.Set("storage_class", s3.StorageClassStandard) + if resp.StorageClass != nil { + d.Set("storage_class", resp.StorageClass) + } + + if !restricted { + tagResp, err := s3conn.GetObjectTagging( + &s3.GetObjectTaggingInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + }) + if err != nil { + return fmt.Errorf("Failed to get object tags (bucket: %s, key: %s): %s", bucket, key, err) + } + d.Set("tags", tagsToMapS3(tagResp.TagSet)) + } + + return nil +} + +func resourceAwsS3BucketObjectDelete(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + + bucket := d.Get("bucket").(string) + key := d.Get("key").(string) + + if _, ok := d.GetOk("version_id"); ok { + // Bucket is versioned, we need to delete all versions + vInput := s3.ListObjectVersionsInput{ + Bucket: aws.String(bucket), + Prefix: aws.String(key), + } + out, err := s3conn.ListObjectVersions(&vInput) + if err != nil { + return fmt.Errorf("Failed listing S3 object versions: %s", err) + } + + for _, v := range out.Versions { + input := s3.DeleteObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + VersionId: v.VersionId, + } + _, err := s3conn.DeleteObject(&input) + if err != nil { + return fmt.Errorf("Error deleting S3 object version of %s:\n %s:\n %s", + key, v, err) + } + } + } else { + // Just delete the object + input := s3.DeleteObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + _, err := s3conn.DeleteObject(&input) + if err != nil { + return fmt.Errorf("Error deleting S3 bucket object: %s Bucket: %q Object: %q", err, bucket, key) + } + } + + return nil +} + +func validateS3BucketObjectAclType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + cannedAcls := map[string]bool{ + s3.ObjectCannedACLPrivate: true, + s3.ObjectCannedACLPublicRead: true, + s3.ObjectCannedACLPublicReadWrite: true, + s3.ObjectCannedACLAuthenticatedRead: true, + s3.ObjectCannedACLAwsExecRead: true, + s3.ObjectCannedACLBucketOwnerRead: true, + s3.ObjectCannedACLBucketOwnerFullControl: true, + } + + sentenceJoin := func(m map[string]bool) string { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, fmt.Sprintf("%q", k)) + } + sort.Strings(keys) + + length := len(keys) + words := make([]string, length) + copy(words, keys) + + words[length-1] = fmt.Sprintf("or %s", words[length-1]) + return strings.Join(words, ", ") + } + + if _, ok := cannedAcls[value]; !ok { + errors = append(errors, fmt.Errorf( + "%q contains an invalid canned ACL type %q. Valid types are either %s", + k, value, sentenceJoin(cannedAcls))) + } + return +} + +func validateS3BucketObjectStorageClassType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + storageClass := map[string]bool{ + s3.StorageClassStandard: true, + s3.StorageClassReducedRedundancy: true, + s3.StorageClassStandardIa: true, + } + + if _, ok := storageClass[value]; !ok { + errors = append(errors, fmt.Errorf( + "%q contains an invalid Storage Class type %q. Valid types are either %q, %q, or %q", + k, value, s3.StorageClassStandard, s3.StorageClassReducedRedundancy, + s3.StorageClassStandardIa)) + } + return +} + +func validateS3BucketObjectServerSideEncryption(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + serverSideEncryption := map[string]bool{ + s3.ServerSideEncryptionAes256: true, + s3.ServerSideEncryptionAwsKms: true, + } + + if _, ok := serverSideEncryption[value]; !ok { + errors = append(errors, fmt.Errorf( + "%q contains an invalid Server Side Encryption value %q. Valid values are %q and %q", + k, value, s3.ServerSideEncryptionAes256, s3.ServerSideEncryptionAwsKms)) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_policy.go new file mode 100644 index 000000000..593d144fb --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_s3_bucket_policy.go @@ -0,0 +1,110 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsS3BucketPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsS3BucketPolicyPut, + Read: resourceAwsS3BucketPolicyRead, + Update: resourceAwsS3BucketPolicyPut, + Delete: resourceAwsS3BucketPolicyDelete, + + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "policy": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateJsonString, + DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, + }, + }, + } +} + +func resourceAwsS3BucketPolicyPut(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + + bucket := d.Get("bucket").(string) + policy := d.Get("policy").(string) + + d.SetId(bucket) + + log.Printf("[DEBUG] S3 bucket: %s, put policy: %s", bucket, policy) + + params := &s3.PutBucketPolicyInput{ + Bucket: aws.String(bucket), + Policy: aws.String(policy), + } + + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + if _, err := s3conn.PutBucketPolicy(params); err != nil { + if awserr, ok := err.(awserr.Error); ok { + if awserr.Code() == "MalformedPolicy" { + return resource.RetryableError(awserr) + } + } + return resource.NonRetryableError(err) + } + return nil + }) + + if err != nil { + return fmt.Errorf("Error putting S3 policy: %s", err) + } + + return nil +} + +func resourceAwsS3BucketPolicyRead(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + + log.Printf("[DEBUG] S3 bucket policy, read for bucket: %s", d.Id()) + pol, err := s3conn.GetBucketPolicy(&s3.GetBucketPolicyInput{ + Bucket: aws.String(d.Id()), + }) + + v := "" + if err == nil && pol.Policy != nil { + v = *pol.Policy + } + if err := d.Set("policy", v); err != nil { + return err + } + + return nil +} + +func resourceAwsS3BucketPolicyDelete(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn + + bucket := d.Get("bucket").(string) + + log.Printf("[DEBUG] S3 bucket: %s, delete policy", bucket) + _, err := s3conn.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{ + Bucket: aws.String(bucket), + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoSuchBucket" { + return nil + } + return fmt.Errorf("Error deleting S3 policy: %s", err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group.go new file mode 100644 index 000000000..0148a0ff8 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group.go @@ -0,0 +1,1212 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSecurityGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSecurityGroupCreate, + Read: resourceAwsSecurityGroupRead, + Update: resourceAwsSecurityGroupUpdate, + Delete: resourceAwsSecurityGroupDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsSecurityGroupImportState, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 255 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 255 characters", k)) + } + return + }, + }, + + "name_prefix": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 100 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 100 characters, name is limited to 255", k)) + } + return + }, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "Managed by Terraform", + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 255 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 255 characters", k)) + } + return + }, + }, + + "vpc_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "ingress": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from_port": { + Type: schema.TypeInt, + Required: true, + }, + + "to_port": { + Type: schema.TypeInt, + Required: true, + }, + + "protocol": { + Type: schema.TypeString, + Required: true, + StateFunc: protocolStateFunc, + }, + + "cidr_blocks": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateCIDRNetworkAddress, + }, + }, + + "ipv6_cidr_blocks": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateCIDRNetworkAddress, + }, + }, + + "security_groups": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "self": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + Set: resourceAwsSecurityGroupRuleHash, + }, + + "egress": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from_port": { + Type: schema.TypeInt, + Required: true, + }, + + "to_port": { + Type: schema.TypeInt, + Required: true, + }, + + "protocol": { + Type: schema.TypeString, + Required: true, + StateFunc: protocolStateFunc, + }, + + "cidr_blocks": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateCIDRNetworkAddress, + }, + }, + + "ipv6_cidr_blocks": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateCIDRNetworkAddress, + }, + }, + + "prefix_list_ids": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "security_groups": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "self": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + Set: resourceAwsSecurityGroupRuleHash, + }, + + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + securityGroupOpts := &ec2.CreateSecurityGroupInput{} + + if v, ok := d.GetOk("vpc_id"); ok { + securityGroupOpts.VpcId = aws.String(v.(string)) + } + + if v := d.Get("description"); v != nil { + securityGroupOpts.Description = aws.String(v.(string)) + } + + var groupName string + if v, ok := d.GetOk("name"); ok { + groupName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + groupName = resource.PrefixedUniqueId(v.(string)) + } else { + groupName = resource.UniqueId() + } + securityGroupOpts.GroupName = aws.String(groupName) + + var err error + log.Printf( + "[DEBUG] Security Group create configuration: %#v", securityGroupOpts) + createResp, err := conn.CreateSecurityGroup(securityGroupOpts) + if err != nil { + return fmt.Errorf("Error creating Security Group: %s", err) + } + + d.SetId(*createResp.GroupId) + + log.Printf("[INFO] Security Group ID: %s", d.Id()) + + // Wait for the security group to truly exist + log.Printf( + "[DEBUG] Waiting for Security Group (%s) to exist", + d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{""}, + Target: []string{"exists"}, + Refresh: SGStateRefreshFunc(conn, d.Id()), + Timeout: 10 * time.Minute, + } + + resp, err := stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for Security Group (%s) to become available: %s", + d.Id(), err) + } + + if err := setTags(conn, d); err != nil { + return err + } + + // AWS defaults all Security Groups to have an ALLOW ALL egress rule. Here we + // revoke that rule, so users don't unknowingly have/use it. + group := resp.(*ec2.SecurityGroup) + if group.VpcId != nil && *group.VpcId != "" { + log.Printf("[DEBUG] Revoking default egress rule for Security Group for %s", d.Id()) + + req := &ec2.RevokeSecurityGroupEgressInput{ + GroupId: createResp.GroupId, + IpPermissions: []*ec2.IpPermission{ + { + FromPort: aws.Int64(int64(0)), + ToPort: aws.Int64(int64(0)), + IpRanges: []*ec2.IpRange{ + { + CidrIp: aws.String("0.0.0.0/0"), + }, + }, + IpProtocol: aws.String("-1"), + }, + }, + } + + if _, err = conn.RevokeSecurityGroupEgress(req); err != nil { + return fmt.Errorf( + "Error revoking default egress rule for Security Group (%s): %s", + d.Id(), err) + } + + log.Printf("[DEBUG] Revoking default IPv6 egress rule for Security Group for %s", d.Id()) + req = &ec2.RevokeSecurityGroupEgressInput{ + GroupId: createResp.GroupId, + IpPermissions: []*ec2.IpPermission{ + { + FromPort: aws.Int64(int64(0)), + ToPort: aws.Int64(int64(0)), + Ipv6Ranges: []*ec2.Ipv6Range{ + { + CidrIpv6: aws.String("::/0"), + }, + }, + IpProtocol: aws.String("-1"), + }, + }, + } + + _, err = conn.RevokeSecurityGroupEgress(req) + if err != nil { + //If we have a NotFound, then we are trying to remove the default IPv6 egress of a non-IPv6 + //enabled SG + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() != "InvalidPermission.NotFound" { + return fmt.Errorf( + "Error revoking default IPv6 egress rule for Security Group (%s): %s", + d.Id(), err) + } + } + + } + + return resourceAwsSecurityGroupUpdate(d, meta) +} + +func resourceAwsSecurityGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + sgRaw, _, err := SGStateRefreshFunc(conn, d.Id())() + if err != nil { + return err + } + if sgRaw == nil { + d.SetId("") + return nil + } + + sg := sgRaw.(*ec2.SecurityGroup) + + remoteIngressRules := resourceAwsSecurityGroupIPPermGather(d.Id(), sg.IpPermissions, sg.OwnerId) + remoteEgressRules := resourceAwsSecurityGroupIPPermGather(d.Id(), sg.IpPermissionsEgress, sg.OwnerId) + + localIngressRules := d.Get("ingress").(*schema.Set).List() + localEgressRules := d.Get("egress").(*schema.Set).List() + + // Loop through the local state of rules, doing a match against the remote + // ruleSet we built above. + ingressRules := matchRules("ingress", localIngressRules, remoteIngressRules) + egressRules := matchRules("egress", localEgressRules, remoteEgressRules) + + d.Set("description", sg.Description) + d.Set("name", sg.GroupName) + d.Set("vpc_id", sg.VpcId) + d.Set("owner_id", sg.OwnerId) + + if err := d.Set("ingress", ingressRules); err != nil { + log.Printf("[WARN] Error setting Ingress rule set for (%s): %s", d.Id(), err) + } + + if err := d.Set("egress", egressRules); err != nil { + log.Printf("[WARN] Error setting Egress rule set for (%s): %s", d.Id(), err) + } + + d.Set("tags", tagsToMap(sg.Tags)) + return nil +} + +func resourceAwsSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + sgRaw, _, err := SGStateRefreshFunc(conn, d.Id())() + if err != nil { + return err + } + if sgRaw == nil { + d.SetId("") + return nil + } + + group := sgRaw.(*ec2.SecurityGroup) + + err = resourceAwsSecurityGroupUpdateRules(d, "ingress", meta, group) + if err != nil { + return err + } + + if d.Get("vpc_id") != nil { + err = resourceAwsSecurityGroupUpdateRules(d, "egress", meta, group) + if err != nil { + return err + } + } + + if !d.IsNewResource() { + if err := setTags(conn, d); err != nil { + return err + } + d.SetPartial("tags") + } + + return resourceAwsSecurityGroupRead(d, meta) +} + +func resourceAwsSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + log.Printf("[DEBUG] Security Group destroy: %v", d.Id()) + + if err := deleteLingeringLambdaENIs(conn, d); err != nil { + return fmt.Errorf("Failed to delete Lambda ENIs: %s", err) + } + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteSecurityGroup(&ec2.DeleteSecurityGroupInput{ + GroupId: aws.String(d.Id()), + }) + if err != nil { + ec2err, ok := err.(awserr.Error) + if !ok { + return resource.RetryableError(err) + } + + switch ec2err.Code() { + case "InvalidGroup.NotFound": + return nil + case "DependencyViolation": + // If it is a dependency violation, we want to retry + return resource.RetryableError(err) + default: + // Any other error, we want to quit the retry loop immediately + return resource.NonRetryableError(err) + } + } + + return nil + }) +} + +func resourceAwsSecurityGroupRuleHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%d-", m["from_port"].(int))) + buf.WriteString(fmt.Sprintf("%d-", m["to_port"].(int))) + p := protocolForValue(m["protocol"].(string)) + buf.WriteString(fmt.Sprintf("%s-", p)) + buf.WriteString(fmt.Sprintf("%t-", m["self"].(bool))) + + // We need to make sure to sort the strings below so that we always + // generate the same hash code no matter what is in the set. + if v, ok := m["cidr_blocks"]; ok { + vs := v.([]interface{}) + s := make([]string, len(vs)) + for i, raw := range vs { + s[i] = raw.(string) + } + sort.Strings(s) + + for _, v := range s { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + } + if v, ok := m["ipv6_cidr_blocks"]; ok { + vs := v.([]interface{}) + s := make([]string, len(vs)) + for i, raw := range vs { + s[i] = raw.(string) + } + sort.Strings(s) + + for _, v := range s { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + } + if v, ok := m["prefix_list_ids"]; ok { + vs := v.([]interface{}) + s := make([]string, len(vs)) + for i, raw := range vs { + s[i] = raw.(string) + } + sort.Strings(s) + + for _, v := range s { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + } + if v, ok := m["security_groups"]; ok { + vs := v.(*schema.Set).List() + s := make([]string, len(vs)) + for i, raw := range vs { + s[i] = raw.(string) + } + sort.Strings(s) + + for _, v := range s { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + } + + return hashcode.String(buf.String()) +} + +func resourceAwsSecurityGroupIPPermGather(groupId string, permissions []*ec2.IpPermission, ownerId *string) []map[string]interface{} { + ruleMap := make(map[string]map[string]interface{}) + for _, perm := range permissions { + var fromPort, toPort int64 + if v := perm.FromPort; v != nil { + fromPort = *v + } + if v := perm.ToPort; v != nil { + toPort = *v + } + + k := fmt.Sprintf("%s-%d-%d", *perm.IpProtocol, fromPort, toPort) + m, ok := ruleMap[k] + if !ok { + m = make(map[string]interface{}) + ruleMap[k] = m + } + + m["from_port"] = fromPort + m["to_port"] = toPort + m["protocol"] = *perm.IpProtocol + + if len(perm.IpRanges) > 0 { + raw, ok := m["cidr_blocks"] + if !ok { + raw = make([]string, 0, len(perm.IpRanges)) + } + list := raw.([]string) + + for _, ip := range perm.IpRanges { + list = append(list, *ip.CidrIp) + } + + m["cidr_blocks"] = list + } + + if len(perm.Ipv6Ranges) > 0 { + raw, ok := m["ipv6_cidr_blocks"] + if !ok { + raw = make([]string, 0, len(perm.Ipv6Ranges)) + } + list := raw.([]string) + + for _, ip := range perm.Ipv6Ranges { + list = append(list, *ip.CidrIpv6) + } + + m["ipv6_cidr_blocks"] = list + } + + if len(perm.PrefixListIds) > 0 { + raw, ok := m["prefix_list_ids"] + if !ok { + raw = make([]string, 0, len(perm.PrefixListIds)) + } + list := raw.([]string) + + for _, pl := range perm.PrefixListIds { + list = append(list, *pl.PrefixListId) + } + + m["prefix_list_ids"] = list + } + + groups := flattenSecurityGroups(perm.UserIdGroupPairs, ownerId) + for i, g := range groups { + if *g.GroupId == groupId { + groups[i], groups = groups[len(groups)-1], groups[:len(groups)-1] + m["self"] = true + } + } + + if len(groups) > 0 { + raw, ok := m["security_groups"] + if !ok { + raw = schema.NewSet(schema.HashString, nil) + } + list := raw.(*schema.Set) + + for _, g := range groups { + if g.GroupName != nil { + list.Add(*g.GroupName) + } else { + list.Add(*g.GroupId) + } + } + + m["security_groups"] = list + } + } + rules := make([]map[string]interface{}, 0, len(ruleMap)) + for _, m := range ruleMap { + rules = append(rules, m) + } + + return rules +} + +func resourceAwsSecurityGroupUpdateRules( + d *schema.ResourceData, ruleset string, + meta interface{}, group *ec2.SecurityGroup) error { + + if d.HasChange(ruleset) { + o, n := d.GetChange(ruleset) + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + remove, err := expandIPPerms(group, os.Difference(ns).List()) + if err != nil { + return err + } + add, err := expandIPPerms(group, ns.Difference(os).List()) + if err != nil { + return err + } + + // TODO: We need to handle partial state better in the in-between + // in this update. + + // TODO: It'd be nicer to authorize before removing, but then we have + // to deal with complicated unrolling to get individual CIDR blocks + // to avoid authorizing already authorized sources. Removing before + // adding is easier here, and Terraform should be fast enough to + // not have service issues. + + if len(remove) > 0 || len(add) > 0 { + conn := meta.(*AWSClient).ec2conn + + var err error + if len(remove) > 0 { + log.Printf("[DEBUG] Revoking security group %#v %s rule: %#v", + group, ruleset, remove) + + if ruleset == "egress" { + req := &ec2.RevokeSecurityGroupEgressInput{ + GroupId: group.GroupId, + IpPermissions: remove, + } + _, err = conn.RevokeSecurityGroupEgress(req) + } else { + req := &ec2.RevokeSecurityGroupIngressInput{ + GroupId: group.GroupId, + IpPermissions: remove, + } + if group.VpcId == nil || *group.VpcId == "" { + req.GroupId = nil + req.GroupName = group.GroupName + } + _, err = conn.RevokeSecurityGroupIngress(req) + } + + if err != nil { + return fmt.Errorf( + "Error revoking security group %s rules: %s", + ruleset, err) + } + } + + if len(add) > 0 { + log.Printf("[DEBUG] Authorizing security group %#v %s rule: %#v", + group, ruleset, add) + // Authorize the new rules + if ruleset == "egress" { + req := &ec2.AuthorizeSecurityGroupEgressInput{ + GroupId: group.GroupId, + IpPermissions: add, + } + _, err = conn.AuthorizeSecurityGroupEgress(req) + } else { + req := &ec2.AuthorizeSecurityGroupIngressInput{ + GroupId: group.GroupId, + IpPermissions: add, + } + if group.VpcId == nil || *group.VpcId == "" { + req.GroupId = nil + req.GroupName = group.GroupName + } + + _, err = conn.AuthorizeSecurityGroupIngress(req) + } + + if err != nil { + return fmt.Errorf( + "Error authorizing security group %s rules: %s", + ruleset, err) + } + } + } + } + return nil +} + +// SGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// a security group. +func SGStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + req := &ec2.DescribeSecurityGroupsInput{ + GroupIds: []*string{aws.String(id)}, + } + resp, err := conn.DescribeSecurityGroups(req) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok { + if ec2err.Code() == "InvalidSecurityGroupID.NotFound" || + ec2err.Code() == "InvalidGroup.NotFound" { + resp = nil + err = nil + } + } + + if err != nil { + log.Printf("Error on SGStateRefresh: %s", err) + return nil, "", err + } + } + + if resp == nil { + return nil, "", nil + } + + group := resp.SecurityGroups[0] + return group, "exists", nil + } +} + +// matchRules receives the group id, type of rules, and the local / remote maps +// of rules. We iterate through the local set of rules trying to find a matching +// remote rule, which may be structured differently because of how AWS +// aggregates the rules under the to, from, and type. +// +// +// Matching rules are written to state, with their elements removed from the +// remote set +// +// If no match is found, we'll write the remote rule to state and let the graph +// sort things out +func matchRules(rType string, local []interface{}, remote []map[string]interface{}) []map[string]interface{} { + // For each local ip or security_group, we need to match against the remote + // ruleSet until all ips or security_groups are found + + // saves represents the rules that have been identified to be saved to state, + // in the appropriate d.Set("{ingress,egress}") call. + var saves []map[string]interface{} + for _, raw := range local { + l := raw.(map[string]interface{}) + + var selfVal bool + if v, ok := l["self"]; ok { + selfVal = v.(bool) + } + + // matching against self is required to detect rules that only include self + // as the rule. resourceAwsSecurityGroupIPPermGather parses the group out + // and replaces it with self if it's ID is found + localHash := idHash(rType, l["protocol"].(string), int64(l["to_port"].(int)), int64(l["from_port"].(int)), selfVal) + + // loop remote rules, looking for a matching hash + for _, r := range remote { + var remoteSelfVal bool + if v, ok := r["self"]; ok { + remoteSelfVal = v.(bool) + } + + // hash this remote rule and compare it for a match consideration with the + // local rule we're examining + rHash := idHash(rType, r["protocol"].(string), r["to_port"].(int64), r["from_port"].(int64), remoteSelfVal) + if rHash == localHash { + var numExpectedCidrs, numExpectedIpv6Cidrs, numExpectedPrefixLists, numExpectedSGs, numRemoteCidrs, numRemoteIpv6Cidrs, numRemotePrefixLists, numRemoteSGs int + var matchingCidrs []string + var matchingIpv6Cidrs []string + var matchingSGs []string + var matchingPrefixLists []string + + // grab the local/remote cidr and sg groups, capturing the expected and + // actual counts + lcRaw, ok := l["cidr_blocks"] + if ok { + numExpectedCidrs = len(l["cidr_blocks"].([]interface{})) + } + liRaw, ok := l["ipv6_cidr_blocks"] + if ok { + numExpectedIpv6Cidrs = len(l["ipv6_cidr_blocks"].([]interface{})) + } + lpRaw, ok := l["prefix_list_ids"] + if ok { + numExpectedPrefixLists = len(l["prefix_list_ids"].([]interface{})) + } + lsRaw, ok := l["security_groups"] + if ok { + numExpectedSGs = len(l["security_groups"].(*schema.Set).List()) + } + + rcRaw, ok := r["cidr_blocks"] + if ok { + numRemoteCidrs = len(r["cidr_blocks"].([]string)) + } + riRaw, ok := r["ipv6_cidr_blocks"] + if ok { + numRemoteIpv6Cidrs = len(r["ipv6_cidr_blocks"].([]string)) + } + rpRaw, ok := r["prefix_list_ids"] + if ok { + numRemotePrefixLists = len(r["prefix_list_ids"].([]string)) + } + + rsRaw, ok := r["security_groups"] + if ok { + numRemoteSGs = len(r["security_groups"].(*schema.Set).List()) + } + + // check some early failures + if numExpectedCidrs > numRemoteCidrs { + log.Printf("[DEBUG] Local rule has more CIDR blocks, continuing (%d/%d)", numExpectedCidrs, numRemoteCidrs) + continue + } + if numExpectedIpv6Cidrs > numRemoteIpv6Cidrs { + log.Printf("[DEBUG] Local rule has more IPV6 CIDR blocks, continuing (%d/%d)", numExpectedIpv6Cidrs, numRemoteIpv6Cidrs) + continue + } + if numExpectedPrefixLists > numRemotePrefixLists { + log.Printf("[DEBUG] Local rule has more prefix lists, continuing (%d/%d)", numExpectedPrefixLists, numRemotePrefixLists) + continue + } + if numExpectedSGs > numRemoteSGs { + log.Printf("[DEBUG] Local rule has more Security Groups, continuing (%d/%d)", numExpectedSGs, numRemoteSGs) + continue + } + + // match CIDRs by converting both to sets, and using Set methods + var localCidrs []interface{} + if lcRaw != nil { + localCidrs = lcRaw.([]interface{}) + } + localCidrSet := schema.NewSet(schema.HashString, localCidrs) + + // remote cidrs are presented as a slice of strings, so we need to + // reformat them into a slice of interfaces to be used in creating the + // remote cidr set + var remoteCidrs []string + if rcRaw != nil { + remoteCidrs = rcRaw.([]string) + } + // convert remote cidrs to a set, for easy comparisons + var list []interface{} + for _, s := range remoteCidrs { + list = append(list, s) + } + remoteCidrSet := schema.NewSet(schema.HashString, list) + + // Build up a list of local cidrs that are found in the remote set + for _, s := range localCidrSet.List() { + if remoteCidrSet.Contains(s) { + matchingCidrs = append(matchingCidrs, s.(string)) + } + } + + //IPV6 CIDRs + var localIpv6Cidrs []interface{} + if liRaw != nil { + localIpv6Cidrs = liRaw.([]interface{}) + } + localIpv6CidrSet := schema.NewSet(schema.HashString, localIpv6Cidrs) + + var remoteIpv6Cidrs []string + if riRaw != nil { + remoteIpv6Cidrs = riRaw.([]string) + } + var listIpv6 []interface{} + for _, s := range remoteIpv6Cidrs { + listIpv6 = append(listIpv6, s) + } + remoteIpv6CidrSet := schema.NewSet(schema.HashString, listIpv6) + + for _, s := range localIpv6CidrSet.List() { + if remoteIpv6CidrSet.Contains(s) { + matchingIpv6Cidrs = append(matchingIpv6Cidrs, s.(string)) + } + } + + // match prefix lists by converting both to sets, and using Set methods + var localPrefixLists []interface{} + if lpRaw != nil { + localPrefixLists = lpRaw.([]interface{}) + } + localPrefixListsSet := schema.NewSet(schema.HashString, localPrefixLists) + + // remote prefix lists are presented as a slice of strings, so we need to + // reformat them into a slice of interfaces to be used in creating the + // remote prefix list set + var remotePrefixLists []string + if rpRaw != nil { + remotePrefixLists = rpRaw.([]string) + } + // convert remote prefix lists to a set, for easy comparison + list = nil + for _, s := range remotePrefixLists { + list = append(list, s) + } + remotePrefixListsSet := schema.NewSet(schema.HashString, list) + + // Build up a list of local prefix lists that are found in the remote set + for _, s := range localPrefixListsSet.List() { + if remotePrefixListsSet.Contains(s) { + matchingPrefixLists = append(matchingPrefixLists, s.(string)) + } + } + + // match SGs. Both local and remote are already sets + var localSGSet *schema.Set + if lsRaw == nil { + localSGSet = schema.NewSet(schema.HashString, nil) + } else { + localSGSet = lsRaw.(*schema.Set) + } + + var remoteSGSet *schema.Set + if rsRaw == nil { + remoteSGSet = schema.NewSet(schema.HashString, nil) + } else { + remoteSGSet = rsRaw.(*schema.Set) + } + + // Build up a list of local security groups that are found in the remote set + for _, s := range localSGSet.List() { + if remoteSGSet.Contains(s) { + matchingSGs = append(matchingSGs, s.(string)) + } + } + + // compare equalities for matches. + // If we found the number of cidrs and number of sgs, we declare a + // match, and then remove those elements from the remote rule, so that + // this remote rule can still be considered by other local rules + if numExpectedCidrs == len(matchingCidrs) { + if numExpectedIpv6Cidrs == len(matchingIpv6Cidrs) { + if numExpectedPrefixLists == len(matchingPrefixLists) { + if numExpectedSGs == len(matchingSGs) { + // confirm that self references match + var lSelf bool + var rSelf bool + if _, ok := l["self"]; ok { + lSelf = l["self"].(bool) + } + if _, ok := r["self"]; ok { + rSelf = r["self"].(bool) + } + if rSelf == lSelf { + delete(r, "self") + // pop local cidrs from remote + diffCidr := remoteCidrSet.Difference(localCidrSet) + var newCidr []string + for _, cRaw := range diffCidr.List() { + newCidr = append(newCidr, cRaw.(string)) + } + + // reassigning + if len(newCidr) > 0 { + r["cidr_blocks"] = newCidr + } else { + delete(r, "cidr_blocks") + } + + //// IPV6 + //// Comparison + diffIpv6Cidr := remoteIpv6CidrSet.Difference(localIpv6CidrSet) + var newIpv6Cidr []string + for _, cRaw := range diffIpv6Cidr.List() { + newIpv6Cidr = append(newIpv6Cidr, cRaw.(string)) + } + + // reassigning + if len(newIpv6Cidr) > 0 { + r["ipv6_cidr_blocks"] = newIpv6Cidr + } else { + delete(r, "ipv6_cidr_blocks") + } + + // pop local prefix lists from remote + diffPrefixLists := remotePrefixListsSet.Difference(localPrefixListsSet) + var newPrefixLists []string + for _, pRaw := range diffPrefixLists.List() { + newPrefixLists = append(newPrefixLists, pRaw.(string)) + } + + // reassigning + if len(newPrefixLists) > 0 { + r["prefix_list_ids"] = newPrefixLists + } else { + delete(r, "prefix_list_ids") + } + + // pop local sgs from remote + diffSGs := remoteSGSet.Difference(localSGSet) + if len(diffSGs.List()) > 0 { + r["security_groups"] = diffSGs + } else { + delete(r, "security_groups") + } + + saves = append(saves, l) + } + } + } + + } + } + } + } + } + // Here we catch any remote rules that have not been stripped of all self, + // cidrs, and security groups. We'll add remote rules here that have not been + // matched locally, and let the graph sort things out. This will happen when + // rules are added externally to Terraform + for _, r := range remote { + var lenCidr, lenIpv6Cidr, lenPrefixLists, lenSGs int + if rCidrs, ok := r["cidr_blocks"]; ok { + lenCidr = len(rCidrs.([]string)) + } + if rIpv6Cidrs, ok := r["ipv6_cidr_blocks"]; ok { + lenIpv6Cidr = len(rIpv6Cidrs.([]string)) + } + if rPrefixLists, ok := r["prefix_list_ids"]; ok { + lenPrefixLists = len(rPrefixLists.([]string)) + } + if rawSGs, ok := r["security_groups"]; ok { + lenSGs = len(rawSGs.(*schema.Set).List()) + } + + if _, ok := r["self"]; ok { + if r["self"].(bool) == true { + lenSGs++ + } + } + + if lenSGs+lenCidr+lenIpv6Cidr+lenPrefixLists > 0 { + log.Printf("[DEBUG] Found a remote Rule that wasn't empty: (%#v)", r) + saves = append(saves, r) + } + } + + return saves +} + +// Creates a unique hash for the type, ports, and protocol, used as a key in +// maps +func idHash(rType, protocol string, toPort, fromPort int64, self bool) string { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("%s-", rType)) + buf.WriteString(fmt.Sprintf("%d-", toPort)) + buf.WriteString(fmt.Sprintf("%d-", fromPort)) + buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(protocol))) + buf.WriteString(fmt.Sprintf("%t-", self)) + + return fmt.Sprintf("rule-%d", hashcode.String(buf.String())) +} + +// protocolStateFunc ensures we only store a string in any protocol field +func protocolStateFunc(v interface{}) string { + switch v.(type) { + case string: + p := protocolForValue(v.(string)) + return p + default: + log.Printf("[WARN] Non String value given for Protocol: %#v", v) + return "" + } +} + +// protocolForValue converts a valid Internet Protocol number into it's name +// representation. If a name is given, it validates that it's a proper protocol +// name. Names/numbers are as defined at +// https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml +func protocolForValue(v string) string { + // special case -1 + protocol := strings.ToLower(v) + if protocol == "-1" || protocol == "all" { + return "-1" + } + // if it's a name like tcp, return that + if _, ok := sgProtocolIntegers()[protocol]; ok { + return protocol + } + // convert to int, look for that value + p, err := strconv.Atoi(protocol) + if err != nil { + // we were unable to convert to int, suggesting a string name, but it wasn't + // found above + log.Printf("[WARN] Unable to determine valid protocol: %s", err) + return protocol + } + + for k, v := range sgProtocolIntegers() { + if p == v { + // guard against protocolIntegers sometime in the future not having lower + // case ids in the map + return strings.ToLower(k) + } + } + + // fall through + log.Printf("[WARN] Unable to determine valid protocol: no matching protocols found") + return protocol +} + +// a map of protocol names and their codes, defined at +// https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml, +// documented to be supported by AWS Security Groups +// http://docs.aws.amazon.com/fr_fr/AWSEC2/latest/APIReference/API_IpPermission.html +// Similar to protocolIntegers() used by Network ACLs, but explicitly only +// supports "tcp", "udp", "icmp", and "all" +func sgProtocolIntegers() map[string]int { + var protocolIntegers = make(map[string]int) + protocolIntegers = map[string]int{ + "udp": 17, + "tcp": 6, + "icmp": 1, + "all": -1, + } + return protocolIntegers +} + +// The AWS Lambda service creates ENIs behind the scenes and keeps these around for a while +// which would prevent SGs attached to such ENIs from being destroyed +func deleteLingeringLambdaENIs(conn *ec2.EC2, d *schema.ResourceData) error { + // Here we carefully find the offenders + params := &ec2.DescribeNetworkInterfacesInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("group-id"), + Values: []*string{aws.String(d.Id())}, + }, + { + Name: aws.String("description"), + Values: []*string{aws.String("AWS Lambda VPC ENI: *")}, + }, + }, + } + networkInterfaceResp, err := conn.DescribeNetworkInterfaces(params) + if err != nil { + return err + } + + // Then we detach and finally delete those + v := networkInterfaceResp.NetworkInterfaces + for _, eni := range v { + if eni.Attachment != nil { + detachNetworkInterfaceParams := &ec2.DetachNetworkInterfaceInput{ + AttachmentId: eni.Attachment.AttachmentId, + } + _, detachNetworkInterfaceErr := conn.DetachNetworkInterface(detachNetworkInterfaceParams) + + if detachNetworkInterfaceErr != nil { + return detachNetworkInterfaceErr + } + + log.Printf("[DEBUG] Waiting for ENI (%s) to become detached", *eni.NetworkInterfaceId) + stateConf := &resource.StateChangeConf{ + Pending: []string{"true"}, + Target: []string{"false"}, + Refresh: networkInterfaceAttachedRefreshFunc(conn, *eni.NetworkInterfaceId), + Timeout: 10 * time.Minute, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for ENI (%s) to become detached: %s", *eni.NetworkInterfaceId, err) + } + } + + deleteNetworkInterfaceParams := &ec2.DeleteNetworkInterfaceInput{ + NetworkInterfaceId: eni.NetworkInterfaceId, + } + _, deleteNetworkInterfaceErr := conn.DeleteNetworkInterface(deleteNetworkInterfaceParams) + + if deleteNetworkInterfaceErr != nil { + return deleteNetworkInterfaceErr + } + } + + return nil +} + +func networkInterfaceAttachedRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + describe_network_interfaces_request := &ec2.DescribeNetworkInterfacesInput{ + NetworkInterfaceIds: []*string{aws.String(id)}, + } + describeResp, err := conn.DescribeNetworkInterfaces(describe_network_interfaces_request) + + if err != nil { + log.Printf("[ERROR] Could not find network interface %s. %s", id, err) + return nil, "", err + } + + eni := describeResp.NetworkInterfaces[0] + hasAttachment := strconv.FormatBool(eni.Attachment != nil) + log.Printf("[DEBUG] ENI %s has attachment state %s", id, hasAttachment) + return eni, hasAttachment, nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group_rule.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group_rule.go new file mode 100644 index 000000000..1372bc83d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group_rule.go @@ -0,0 +1,674 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "sort" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSecurityGroupRule() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSecurityGroupRuleCreate, + Read: resourceAwsSecurityGroupRuleRead, + Delete: resourceAwsSecurityGroupRuleDelete, + + SchemaVersion: 2, + MigrateState: resourceAwsSecurityGroupRuleMigrateState, + + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Type of rule, ingress (inbound) or egress (outbound).", + ValidateFunc: validateSecurityRuleType, + }, + + "from_port": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "to_port": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "protocol": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: protocolStateFunc, + }, + + "cidr_blocks": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateCIDRNetworkAddress, + }, + }, + + "ipv6_cidr_blocks": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateCIDRNetworkAddress, + }, + }, + + "prefix_list_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "security_group_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "source_security_group_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + ConflictsWith: []string{"cidr_blocks", "self"}, + }, + + "self": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + ConflictsWith: []string{"cidr_blocks"}, + }, + }, + } +} + +func resourceAwsSecurityGroupRuleCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + sg_id := d.Get("security_group_id").(string) + + awsMutexKV.Lock(sg_id) + defer awsMutexKV.Unlock(sg_id) + + sg, err := findResourceSecurityGroup(conn, sg_id) + if err != nil { + return err + } + + perm, err := expandIPPerm(d, sg) + if err != nil { + return err + } + + // Verify that either 'cidr_blocks', 'self', or 'source_security_group_id' is set + // If they are not set the AWS API will silently fail. This causes TF to hit a timeout + // at 5-minutes waiting for the security group rule to appear, when it was never actually + // created. + if err := validateAwsSecurityGroupRule(d); err != nil { + return err + } + + ruleType := d.Get("type").(string) + isVPC := sg.VpcId != nil && *sg.VpcId != "" + + var autherr error + switch ruleType { + case "ingress": + log.Printf("[DEBUG] Authorizing security group %s %s rule: %s", + sg_id, "Ingress", perm) + + req := &ec2.AuthorizeSecurityGroupIngressInput{ + GroupId: sg.GroupId, + IpPermissions: []*ec2.IpPermission{perm}, + } + + if !isVPC { + req.GroupId = nil + req.GroupName = sg.GroupName + } + + _, autherr = conn.AuthorizeSecurityGroupIngress(req) + + case "egress": + log.Printf("[DEBUG] Authorizing security group %s %s rule: %#v", + sg_id, "Egress", perm) + + req := &ec2.AuthorizeSecurityGroupEgressInput{ + GroupId: sg.GroupId, + IpPermissions: []*ec2.IpPermission{perm}, + } + + _, autherr = conn.AuthorizeSecurityGroupEgress(req) + + default: + return fmt.Errorf("Security Group Rule must be type 'ingress' or type 'egress'") + } + + if autherr != nil { + if awsErr, ok := autherr.(awserr.Error); ok { + if awsErr.Code() == "InvalidPermission.Duplicate" { + return fmt.Errorf(`[WARN] A duplicate Security Group rule was found on (%s). This may be +a side effect of a now-fixed Terraform issue causing two security groups with +identical attributes but different source_security_group_ids to overwrite each +other in the state. See https://github.com/hashicorp/terraform/pull/2376 for more +information and instructions for recovery. Error message: %s`, sg_id, awsErr.Message()) + } + } + + return fmt.Errorf( + "Error authorizing security group rule type %s: %s", + ruleType, autherr) + } + + id := ipPermissionIDHash(sg_id, ruleType, perm) + log.Printf("[DEBUG] Computed group rule ID %s", id) + + retErr := resource.Retry(5*time.Minute, func() *resource.RetryError { + sg, err := findResourceSecurityGroup(conn, sg_id) + + if err != nil { + log.Printf("[DEBUG] Error finding Security Group (%s) for Rule (%s): %s", sg_id, id, err) + return resource.NonRetryableError(err) + } + + var rules []*ec2.IpPermission + switch ruleType { + case "ingress": + rules = sg.IpPermissions + default: + rules = sg.IpPermissionsEgress + } + + rule := findRuleMatch(perm, rules, isVPC) + + if rule == nil { + log.Printf("[DEBUG] Unable to find matching %s Security Group Rule (%s) for Group %s", + ruleType, id, sg_id) + return resource.RetryableError(fmt.Errorf("No match found")) + } + + log.Printf("[DEBUG] Found rule for Security Group Rule (%s): %s", id, rule) + return nil + }) + + if retErr != nil { + return fmt.Errorf("Error finding matching %s Security Group Rule (%s) for Group %s", + ruleType, id, sg_id) + } + + d.SetId(id) + return nil +} + +func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + sg_id := d.Get("security_group_id").(string) + sg, err := findResourceSecurityGroup(conn, sg_id) + if _, notFound := err.(securityGroupNotFound); notFound { + // The security group containing this rule no longer exists. + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error finding security group (%s) for rule (%s): %s", sg_id, d.Id(), err) + } + + isVPC := sg.VpcId != nil && *sg.VpcId != "" + + var rule *ec2.IpPermission + var rules []*ec2.IpPermission + ruleType := d.Get("type").(string) + switch ruleType { + case "ingress": + rules = sg.IpPermissions + default: + rules = sg.IpPermissionsEgress + } + + p, err := expandIPPerm(d, sg) + if err != nil { + return err + } + + if len(rules) == 0 { + log.Printf("[WARN] No %s rules were found for Security Group (%s) looking for Security Group Rule (%s)", + ruleType, *sg.GroupName, d.Id()) + d.SetId("") + return nil + } + + rule = findRuleMatch(p, rules, isVPC) + + if rule == nil { + log.Printf("[DEBUG] Unable to find matching %s Security Group Rule (%s) for Group %s", + ruleType, d.Id(), sg_id) + d.SetId("") + return nil + } + + log.Printf("[DEBUG] Found rule for Security Group Rule (%s): %s", d.Id(), rule) + + d.Set("type", ruleType) + if err := setFromIPPerm(d, sg, p); err != nil { + return errwrap.Wrapf("Error setting IP Permission for Security Group Rule: {{err}}", err) + } + return nil +} + +func resourceAwsSecurityGroupRuleDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + sg_id := d.Get("security_group_id").(string) + + awsMutexKV.Lock(sg_id) + defer awsMutexKV.Unlock(sg_id) + + sg, err := findResourceSecurityGroup(conn, sg_id) + if err != nil { + return err + } + + perm, err := expandIPPerm(d, sg) + if err != nil { + return err + } + ruleType := d.Get("type").(string) + switch ruleType { + case "ingress": + log.Printf("[DEBUG] Revoking rule (%s) from security group %s:\n%s", + "ingress", sg_id, perm) + req := &ec2.RevokeSecurityGroupIngressInput{ + GroupId: sg.GroupId, + IpPermissions: []*ec2.IpPermission{perm}, + } + + _, err = conn.RevokeSecurityGroupIngress(req) + + if err != nil { + return fmt.Errorf( + "Error revoking security group %s rules: %s", + sg_id, err) + } + case "egress": + + log.Printf("[DEBUG] Revoking security group %#v %s rule: %#v", + sg_id, "egress", perm) + req := &ec2.RevokeSecurityGroupEgressInput{ + GroupId: sg.GroupId, + IpPermissions: []*ec2.IpPermission{perm}, + } + + _, err = conn.RevokeSecurityGroupEgress(req) + + if err != nil { + return fmt.Errorf( + "Error revoking security group %s rules: %s", + sg_id, err) + } + } + + d.SetId("") + + return nil +} + +func findResourceSecurityGroup(conn *ec2.EC2, id string) (*ec2.SecurityGroup, error) { + req := &ec2.DescribeSecurityGroupsInput{ + GroupIds: []*string{aws.String(id)}, + } + resp, err := conn.DescribeSecurityGroups(req) + if err, ok := err.(awserr.Error); ok && err.Code() == "InvalidGroup.NotFound" { + return nil, securityGroupNotFound{id, nil} + } + if err != nil { + return nil, err + } + if resp == nil { + return nil, securityGroupNotFound{id, nil} + } + if len(resp.SecurityGroups) != 1 || resp.SecurityGroups[0] == nil { + return nil, securityGroupNotFound{id, resp.SecurityGroups} + } + + return resp.SecurityGroups[0], nil +} + +type securityGroupNotFound struct { + id string + securityGroups []*ec2.SecurityGroup +} + +func (err securityGroupNotFound) Error() string { + if err.securityGroups == nil { + return fmt.Sprintf("No security group with ID %q", err.id) + } + return fmt.Sprintf("Expected to find one security group with ID %q, got: %#v", + err.id, err.securityGroups) +} + +// ByGroupPair implements sort.Interface for []*ec2.UserIDGroupPairs based on +// GroupID or GroupName field (only one should be set). +type ByGroupPair []*ec2.UserIdGroupPair + +func (b ByGroupPair) Len() int { return len(b) } +func (b ByGroupPair) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b ByGroupPair) Less(i, j int) bool { + if b[i].GroupId != nil && b[j].GroupId != nil { + return *b[i].GroupId < *b[j].GroupId + } + if b[i].GroupName != nil && b[j].GroupName != nil { + return *b[i].GroupName < *b[j].GroupName + } + + panic("mismatched security group rules, may be a terraform bug") +} + +func findRuleMatch(p *ec2.IpPermission, rules []*ec2.IpPermission, isVPC bool) *ec2.IpPermission { + var rule *ec2.IpPermission + for _, r := range rules { + if r.ToPort != nil && *p.ToPort != *r.ToPort { + continue + } + + if r.FromPort != nil && *p.FromPort != *r.FromPort { + continue + } + + if r.IpProtocol != nil && *p.IpProtocol != *r.IpProtocol { + continue + } + + remaining := len(p.IpRanges) + for _, ip := range p.IpRanges { + for _, rip := range r.IpRanges { + if *ip.CidrIp == *rip.CidrIp { + remaining-- + } + } + } + + if remaining > 0 { + continue + } + + remaining = len(p.Ipv6Ranges) + for _, ipv6 := range p.Ipv6Ranges { + for _, ipv6ip := range r.Ipv6Ranges { + if *ipv6.CidrIpv6 == *ipv6ip.CidrIpv6 { + remaining-- + } + } + } + + if remaining > 0 { + continue + } + + remaining = len(p.PrefixListIds) + for _, pl := range p.PrefixListIds { + for _, rpl := range r.PrefixListIds { + if *pl.PrefixListId == *rpl.PrefixListId { + remaining-- + } + } + } + + if remaining > 0 { + continue + } + + remaining = len(p.UserIdGroupPairs) + for _, ip := range p.UserIdGroupPairs { + for _, rip := range r.UserIdGroupPairs { + if isVPC { + if *ip.GroupId == *rip.GroupId { + remaining-- + } + } else { + if *ip.GroupName == *rip.GroupName { + remaining-- + } + } + } + } + + if remaining > 0 { + continue + } + + rule = r + } + return rule +} + +func ipPermissionIDHash(sg_id, ruleType string, ip *ec2.IpPermission) string { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("%s-", sg_id)) + if ip.FromPort != nil && *ip.FromPort > 0 { + buf.WriteString(fmt.Sprintf("%d-", *ip.FromPort)) + } + if ip.ToPort != nil && *ip.ToPort > 0 { + buf.WriteString(fmt.Sprintf("%d-", *ip.ToPort)) + } + buf.WriteString(fmt.Sprintf("%s-", *ip.IpProtocol)) + buf.WriteString(fmt.Sprintf("%s-", ruleType)) + + // We need to make sure to sort the strings below so that we always + // generate the same hash code no matter what is in the set. + if len(ip.IpRanges) > 0 { + s := make([]string, len(ip.IpRanges)) + for i, r := range ip.IpRanges { + s[i] = *r.CidrIp + } + sort.Strings(s) + + for _, v := range s { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + } + + if len(ip.Ipv6Ranges) > 0 { + s := make([]string, len(ip.Ipv6Ranges)) + for i, r := range ip.Ipv6Ranges { + s[i] = *r.CidrIpv6 + } + sort.Strings(s) + + for _, v := range s { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + } + + if len(ip.PrefixListIds) > 0 { + s := make([]string, len(ip.PrefixListIds)) + for i, pl := range ip.PrefixListIds { + s[i] = *pl.PrefixListId + } + sort.Strings(s) + + for _, v := range s { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + } + + if len(ip.UserIdGroupPairs) > 0 { + sort.Sort(ByGroupPair(ip.UserIdGroupPairs)) + for _, pair := range ip.UserIdGroupPairs { + if pair.GroupId != nil { + buf.WriteString(fmt.Sprintf("%s-", *pair.GroupId)) + } else { + buf.WriteString("-") + } + if pair.GroupName != nil { + buf.WriteString(fmt.Sprintf("%s-", *pair.GroupName)) + } else { + buf.WriteString("-") + } + } + } + + return fmt.Sprintf("sgrule-%d", hashcode.String(buf.String())) +} + +func expandIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup) (*ec2.IpPermission, error) { + var perm ec2.IpPermission + + perm.FromPort = aws.Int64(int64(d.Get("from_port").(int))) + perm.ToPort = aws.Int64(int64(d.Get("to_port").(int))) + protocol := protocolForValue(d.Get("protocol").(string)) + perm.IpProtocol = aws.String(protocol) + + // build a group map that behaves like a set + groups := make(map[string]bool) + if raw, ok := d.GetOk("source_security_group_id"); ok { + groups[raw.(string)] = true + } + + if v, ok := d.GetOk("self"); ok && v.(bool) { + if sg.VpcId != nil && *sg.VpcId != "" { + groups[*sg.GroupId] = true + } else { + groups[*sg.GroupName] = true + } + } + + if len(groups) > 0 { + perm.UserIdGroupPairs = make([]*ec2.UserIdGroupPair, len(groups)) + // build string list of group name/ids + var gl []string + for k, _ := range groups { + gl = append(gl, k) + } + + for i, name := range gl { + ownerId, id := "", name + if items := strings.Split(id, "/"); len(items) > 1 { + ownerId, id = items[0], items[1] + } + + perm.UserIdGroupPairs[i] = &ec2.UserIdGroupPair{ + GroupId: aws.String(id), + UserId: aws.String(ownerId), + } + + if sg.VpcId == nil || *sg.VpcId == "" { + perm.UserIdGroupPairs[i].GroupId = nil + perm.UserIdGroupPairs[i].GroupName = aws.String(id) + perm.UserIdGroupPairs[i].UserId = nil + } + } + } + + if raw, ok := d.GetOk("cidr_blocks"); ok { + list := raw.([]interface{}) + perm.IpRanges = make([]*ec2.IpRange, len(list)) + for i, v := range list { + cidrIP, ok := v.(string) + if !ok { + return nil, fmt.Errorf("empty element found in cidr_blocks - consider using the compact function") + } + perm.IpRanges[i] = &ec2.IpRange{CidrIp: aws.String(cidrIP)} + } + } + + if raw, ok := d.GetOk("ipv6_cidr_blocks"); ok { + list := raw.([]interface{}) + perm.Ipv6Ranges = make([]*ec2.Ipv6Range, len(list)) + for i, v := range list { + cidrIP, ok := v.(string) + if !ok { + return nil, fmt.Errorf("empty element found in ipv6_cidr_blocks - consider using the compact function") + } + perm.Ipv6Ranges[i] = &ec2.Ipv6Range{CidrIpv6: aws.String(cidrIP)} + } + } + + if raw, ok := d.GetOk("prefix_list_ids"); ok { + list := raw.([]interface{}) + perm.PrefixListIds = make([]*ec2.PrefixListId, len(list)) + for i, v := range list { + prefixListID, ok := v.(string) + if !ok { + return nil, fmt.Errorf("empty element found in prefix_list_ids - consider using the compact function") + } + perm.PrefixListIds[i] = &ec2.PrefixListId{PrefixListId: aws.String(prefixListID)} + } + } + + return &perm, nil +} + +func setFromIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup, rule *ec2.IpPermission) error { + isVPC := sg.VpcId != nil && *sg.VpcId != "" + + d.Set("from_port", rule.FromPort) + d.Set("to_port", rule.ToPort) + d.Set("protocol", rule.IpProtocol) + + var cb []string + for _, c := range rule.IpRanges { + cb = append(cb, *c.CidrIp) + } + + d.Set("cidr_blocks", cb) + + var ipv6 []string + for _, ip := range rule.Ipv6Ranges { + ipv6 = append(ipv6, *ip.CidrIpv6) + } + d.Set("ipv6_cidr_blocks", ipv6) + + var pl []string + for _, p := range rule.PrefixListIds { + pl = append(pl, *p.PrefixListId) + } + d.Set("prefix_list_ids", pl) + + if len(rule.UserIdGroupPairs) > 0 { + s := rule.UserIdGroupPairs[0] + + if isVPC { + d.Set("source_security_group_id", *s.GroupId) + } else { + d.Set("source_security_group_id", *s.GroupName) + } + } + + return nil +} + +// Validates that either 'cidr_blocks', 'ipv6_cidr_blocks', 'self', or 'source_security_group_id' is set +func validateAwsSecurityGroupRule(d *schema.ResourceData) error { + _, blocksOk := d.GetOk("cidr_blocks") + _, ipv6Ok := d.GetOk("ipv6_cidr_blocks") + _, sourceOk := d.GetOk("source_security_group_id") + _, selfOk := d.GetOk("self") + _, prefixOk := d.GetOk("prefix_list_ids") + if !blocksOk && !sourceOk && !selfOk && !prefixOk && !ipv6Ok { + return fmt.Errorf( + "One of ['cidr_blocks', 'ipv6_cidr_blocks', 'self', 'source_security_group_id', 'prefix_list_ids'] must be set to create an AWS Security Group Rule") + } + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group_rule_migrate.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group_rule_migrate.go new file mode 100644 index 000000000..12788054e --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_security_group_rule_migrate.go @@ -0,0 +1,105 @@ +package aws + +import ( + "fmt" + "log" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/terraform" +) + +func resourceAwsSecurityGroupRuleMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found AWS Security Group State v0; migrating to v1") + return migrateSGRuleStateV0toV1(is) + case 1: + log.Println("[INFO] Found AWS Security Group State v1; migrating to v2") + // migrating to version 2 of the schema is the same as 0->1, since the + // method signature has changed now and will use the security group id in + // the hash + return migrateSGRuleStateV0toV1(is) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateSGRuleStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + perm, err := migrateExpandIPPerm(is.Attributes) + + if err != nil { + return nil, fmt.Errorf("[WARN] Error making new IP Permission in Security Group migration") + } + + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + newID := ipPermissionIDHash(is.Attributes["security_group_id"], is.Attributes["type"], perm) + is.Attributes["id"] = newID + is.ID = newID + log.Printf("[DEBUG] Attributes after migration: %#v, new id: %s", is.Attributes, newID) + return is, nil +} + +func migrateExpandIPPerm(attrs map[string]string) (*ec2.IpPermission, error) { + var perm ec2.IpPermission + tp, err := strconv.Atoi(attrs["to_port"]) + if err != nil { + return nil, fmt.Errorf("Error converting to_port in Security Group migration") + } + + fp, err := strconv.Atoi(attrs["from_port"]) + if err != nil { + return nil, fmt.Errorf("Error converting from_port in Security Group migration") + } + + perm.ToPort = aws.Int64(int64(tp)) + perm.FromPort = aws.Int64(int64(fp)) + perm.IpProtocol = aws.String(attrs["protocol"]) + + groups := make(map[string]bool) + if attrs["self"] == "true" { + groups[attrs["security_group_id"]] = true + } + + if attrs["source_security_group_id"] != "" { + groups[attrs["source_security_group_id"]] = true + } + + if len(groups) > 0 { + perm.UserIdGroupPairs = make([]*ec2.UserIdGroupPair, len(groups)) + // build string list of group name/ids + var gl []string + for k, _ := range groups { + gl = append(gl, k) + } + + for i, name := range gl { + perm.UserIdGroupPairs[i] = &ec2.UserIdGroupPair{ + GroupId: aws.String(name), + } + } + } + + var cb []string + for k, v := range attrs { + if k != "cidr_blocks.#" && strings.HasPrefix(k, "cidr_blocks") { + cb = append(cb, v) + } + } + if len(cb) > 0 { + perm.IpRanges = make([]*ec2.IpRange, len(cb)) + for i, v := range cb { + perm.IpRanges[i] = &ec2.IpRange{CidrIp: aws.String(v)} + } + } + + return &perm, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_active_receipt_rule_set.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_active_receipt_rule_set.go new file mode 100644 index 000000000..854d645a6 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_active_receipt_rule_set.go @@ -0,0 +1,80 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSesActiveReceiptRuleSet() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSesActiveReceiptRuleSetUpdate, + Update: resourceAwsSesActiveReceiptRuleSetUpdate, + Read: resourceAwsSesActiveReceiptRuleSetRead, + Delete: resourceAwsSesActiveReceiptRuleSetDelete, + + Schema: map[string]*schema.Schema{ + "rule_set_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceAwsSesActiveReceiptRuleSetUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + ruleSetName := d.Get("rule_set_name").(string) + + createOpts := &ses.SetActiveReceiptRuleSetInput{ + RuleSetName: aws.String(ruleSetName), + } + + _, err := conn.SetActiveReceiptRuleSet(createOpts) + if err != nil { + return fmt.Errorf("Error setting active SES rule set: %s", err) + } + + d.SetId(ruleSetName) + + return resourceAwsSesActiveReceiptRuleSetRead(d, meta) +} + +func resourceAwsSesActiveReceiptRuleSetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + describeOpts := &ses.DescribeActiveReceiptRuleSetInput{} + + response, err := conn.DescribeActiveReceiptRuleSet(describeOpts) + if err != nil { + return err + } + + if response.Metadata != nil { + d.Set("rule_set_name", response.Metadata.Name) + } else { + log.Print("[WARN] No active Receipt Rule Set found") + d.SetId("") + } + + return nil +} + +func resourceAwsSesActiveReceiptRuleSetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + deleteOpts := &ses.SetActiveReceiptRuleSetInput{ + RuleSetName: nil, + } + + _, err := conn.SetActiveReceiptRuleSet(deleteOpts) + if err != nil { + return fmt.Errorf("Error deleting active SES rule set: %s", err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_configuration_set.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_configuration_set.go new file mode 100644 index 000000000..e631b887c --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_configuration_set.go @@ -0,0 +1,110 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSesConfigurationSet() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSesConfigurationSetCreate, + Read: resourceAwsSesConfigurationSetRead, + Delete: resourceAwsSesConfigurationSetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsSesConfigurationSetCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + configurationSetName := d.Get("name").(string) + + createOpts := &ses.CreateConfigurationSetInput{ + ConfigurationSet: &ses.ConfigurationSet{ + Name: aws.String(configurationSetName), + }, + } + + _, err := conn.CreateConfigurationSet(createOpts) + if err != nil { + return fmt.Errorf("Error creating SES configuration set: %s", err) + } + + d.SetId(configurationSetName) + + return resourceAwsSesConfigurationSetRead(d, meta) +} + +func resourceAwsSesConfigurationSetRead(d *schema.ResourceData, meta interface{}) error { + configurationSetExists, err := findConfigurationSet(d.Id(), nil, meta) + + if !configurationSetExists { + log.Printf("[WARN] SES Configuration Set (%s) not found", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return err + } + + d.Set("name", d.Id()) + + return nil +} + +func resourceAwsSesConfigurationSetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + log.Printf("[DEBUG] SES Delete Configuration Rule Set: %s", d.Id()) + _, err := conn.DeleteConfigurationSet(&ses.DeleteConfigurationSetInput{ + ConfigurationSetName: aws.String(d.Id()), + }) + + if err != nil { + return err + } + + return nil +} + +func findConfigurationSet(name string, token *string, meta interface{}) (bool, error) { + conn := meta.(*AWSClient).sesConn + + configurationSetExists := false + + listOpts := &ses.ListConfigurationSetsInput{ + NextToken: token, + } + + response, err := conn.ListConfigurationSets(listOpts) + for _, element := range response.ConfigurationSets { + if *element.Name == name { + configurationSetExists = true + } + } + + if err != nil && !configurationSetExists && response.NextToken != nil { + configurationSetExists, err = findConfigurationSet(name, response.NextToken, meta) + } + + if err != nil { + return false, err + } + + return configurationSetExists, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_domain_identity.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_domain_identity.go new file mode 100644 index 000000000..734030cc7 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_domain_identity.go @@ -0,0 +1,103 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSesDomainIdentity() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSesDomainIdentityCreate, + Read: resourceAwsSesDomainIdentityRead, + Delete: resourceAwsSesDomainIdentityDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "domain": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "verification_token": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsSesDomainIdentityCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + domainName := d.Get("domain").(string) + + createOpts := &ses.VerifyDomainIdentityInput{ + Domain: aws.String(domainName), + } + + _, err := conn.VerifyDomainIdentity(createOpts) + if err != nil { + return fmt.Errorf("Error requesting SES domain identity verification: %s", err) + } + + d.SetId(domainName) + + return resourceAwsSesDomainIdentityRead(d, meta) +} + +func resourceAwsSesDomainIdentityRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + domainName := d.Id() + d.Set("domain", domainName) + + readOpts := &ses.GetIdentityVerificationAttributesInput{ + Identities: []*string{ + aws.String(domainName), + }, + } + + response, err := conn.GetIdentityVerificationAttributes(readOpts) + if err != nil { + log.Printf("[WARN] Error fetching identity verification attributes for %s: %s", d.Id(), err) + return err + } + + verificationAttrs, ok := response.VerificationAttributes[domainName] + if !ok { + log.Printf("[WARN] Domain not listed in response when fetching verification attributes for %s", d.Id()) + d.SetId("") + return nil + } + + d.Set("arn", fmt.Sprintf("arn:%s:ses:%s:%s:identity/%s", meta.(*AWSClient).partition, meta.(*AWSClient).region, meta.(*AWSClient).accountid, d.Id())) + d.Set("verification_token", verificationAttrs.VerificationToken) + return nil +} + +func resourceAwsSesDomainIdentityDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + domainName := d.Get("domain").(string) + + deleteOpts := &ses.DeleteIdentityInput{ + Identity: aws.String(domainName), + } + + _, err := conn.DeleteIdentity(deleteOpts) + if err != nil { + return fmt.Errorf("Error deleting SES domain identity: %s", err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_event_destination.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_event_destination.go new file mode 100644 index 000000000..2dde76e5a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_event_destination.go @@ -0,0 +1,214 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSesEventDestination() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSesEventDestinationCreate, + Read: resourceAwsSesEventDestinationRead, + Delete: resourceAwsSesEventDestinationDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "configuration_set_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "matching_types": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateMatchingTypes, + }, + }, + + "cloudwatch_destination": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"kinesis_destination"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "dimension_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "value_source": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validateDimensionValueSource, + }, + }, + }, + }, + + "kinesis_destination": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"cloudwatch_destination"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "stream_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "role_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsSesEventDestinationCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + configurationSetName := d.Get("configuration_set_name").(string) + eventDestinationName := d.Get("name").(string) + enabled := d.Get("enabled").(bool) + matchingEventTypes := d.Get("matching_types").(*schema.Set).List() + + createOpts := &ses.CreateConfigurationSetEventDestinationInput{ + ConfigurationSetName: aws.String(configurationSetName), + EventDestination: &ses.EventDestination{ + Name: aws.String(eventDestinationName), + Enabled: aws.Bool(enabled), + MatchingEventTypes: expandStringList(matchingEventTypes), + }, + } + + if v, ok := d.GetOk("cloudwatch_destination"); ok { + destination := v.(*schema.Set).List() + createOpts.EventDestination.CloudWatchDestination = &ses.CloudWatchDestination{ + DimensionConfigurations: generateCloudWatchDestination(destination), + } + log.Printf("[DEBUG] Creating cloudwatch destination: %#v", destination) + } + + if v, ok := d.GetOk("kinesis_destination"); ok { + destination := v.(*schema.Set).List() + if len(destination) > 1 { + return fmt.Errorf("You can only define a single kinesis destination per record") + } + kinesis := destination[0].(map[string]interface{}) + createOpts.EventDestination.KinesisFirehoseDestination = &ses.KinesisFirehoseDestination{ + DeliveryStreamARN: aws.String(kinesis["stream_arn"].(string)), + IAMRoleARN: aws.String(kinesis["role_arn"].(string)), + } + log.Printf("[DEBUG] Creating kinesis destination: %#v", kinesis) + } + + _, err := conn.CreateConfigurationSetEventDestination(createOpts) + if err != nil { + return fmt.Errorf("Error creating SES configuration set event destination: %s", err) + } + + d.SetId(eventDestinationName) + + log.Printf("[WARN] SES DONE") + return resourceAwsSesEventDestinationRead(d, meta) +} + +func resourceAwsSesEventDestinationRead(d *schema.ResourceData, meta interface{}) error { + + return nil +} + +func resourceAwsSesEventDestinationDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + log.Printf("[DEBUG] SES Delete Configuration Set Destination: %s", d.Id()) + _, err := conn.DeleteConfigurationSetEventDestination(&ses.DeleteConfigurationSetEventDestinationInput{ + ConfigurationSetName: aws.String(d.Get("configuration_set_name").(string)), + EventDestinationName: aws.String(d.Id()), + }) + + if err != nil { + return err + } + + return nil +} + +func validateMatchingTypes(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + matchingTypes := map[string]bool{ + "send": true, + "reject": true, + "bounce": true, + "complaint": true, + "delivery": true, + } + + if !matchingTypes[value] { + errors = append(errors, fmt.Errorf("%q must be a valid matching event type value: %q", k, value)) + } + return +} + +func validateDimensionValueSource(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + matchingSource := map[string]bool{ + "messageTag": true, + "emailHeader": true, + } + + if !matchingSource[value] { + errors = append(errors, fmt.Errorf("%q must be a valid dimension value: %q", k, value)) + } + return +} + +func generateCloudWatchDestination(v []interface{}) []*ses.CloudWatchDimensionConfiguration { + + b := make([]*ses.CloudWatchDimensionConfiguration, len(v)) + + for i, vI := range v { + cloudwatch := vI.(map[string]interface{}) + b[i] = &ses.CloudWatchDimensionConfiguration{ + DefaultDimensionValue: aws.String(cloudwatch["default_value"].(string)), + DimensionName: aws.String(cloudwatch["dimension_name"].(string)), + DimensionValueSource: aws.String(cloudwatch["value_source"].(string)), + } + } + + return b +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_filter.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_filter.go new file mode 100644 index 000000000..2242d7eca --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_filter.go @@ -0,0 +1,109 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSesReceiptFilter() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSesReceiptFilterCreate, + Read: resourceAwsSesReceiptFilterRead, + Delete: resourceAwsSesReceiptFilterDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "cidr": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "policy": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsSesReceiptFilterCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + name := d.Get("name").(string) + + createOpts := &ses.CreateReceiptFilterInput{ + Filter: &ses.ReceiptFilter{ + Name: aws.String(name), + IpFilter: &ses.ReceiptIpFilter{ + Cidr: aws.String(d.Get("cidr").(string)), + Policy: aws.String(d.Get("policy").(string)), + }, + }, + } + + _, err := conn.CreateReceiptFilter(createOpts) + if err != nil { + return fmt.Errorf("Error creating SES receipt filter: %s", err) + } + + d.SetId(name) + + return resourceAwsSesReceiptFilterRead(d, meta) +} + +func resourceAwsSesReceiptFilterRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + listOpts := &ses.ListReceiptFiltersInput{} + + response, err := conn.ListReceiptFilters(listOpts) + if err != nil { + return err + } + + found := false + for _, element := range response.Filters { + if *element.Name == d.Id() { + d.Set("cidr", element.IpFilter.Cidr) + d.Set("policy", element.IpFilter.Policy) + d.Set("name", element.Name) + found = true + } + } + + if !found { + log.Printf("[WARN] SES Receipt Filter (%s) not found", d.Id()) + d.SetId("") + } + + return nil +} + +func resourceAwsSesReceiptFilterDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + deleteOpts := &ses.DeleteReceiptFilterInput{ + FilterName: aws.String(d.Id()), + } + + _, err := conn.DeleteReceiptFilter(deleteOpts) + if err != nil { + return fmt.Errorf("Error deleting SES receipt filter: %s", err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_rule.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_rule.go new file mode 100644 index 000000000..912620acd --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_rule.go @@ -0,0 +1,765 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "sort" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSesReceiptRule() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSesReceiptRuleCreate, + Update: resourceAwsSesReceiptRuleUpdate, + Read: resourceAwsSesReceiptRuleRead, + Delete: resourceAwsSesReceiptRuleDelete, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "rule_set_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "after": { + Type: schema.TypeString, + Optional: true, + }, + + "enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "recipients": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + }, + + "scan_enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "tls_policy": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "add_header_action": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header_name": { + Type: schema.TypeString, + Required: true, + }, + + "header_value": { + Type: schema.TypeString, + Required: true, + }, + + "position": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["header_name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["header_value"].(string))) + buf.WriteString(fmt.Sprintf("%d-", m["position"].(int))) + + return hashcode.String(buf.String()) + }, + }, + + "bounce_action": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Required: true, + }, + + "sender": { + Type: schema.TypeString, + Required: true, + }, + + "smtp_reply_code": { + Type: schema.TypeString, + Required: true, + }, + + "status_code": { + Type: schema.TypeString, + Optional: true, + }, + + "topic_arn": { + Type: schema.TypeString, + Optional: true, + }, + + "position": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["message"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["sender"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["smtp_reply_code"].(string))) + + if _, ok := m["status_code"]; ok { + buf.WriteString(fmt.Sprintf("%s-", m["status_code"].(string))) + } + + if _, ok := m["topic_arn"]; ok { + buf.WriteString(fmt.Sprintf("%s-", m["topic_arn"].(string))) + } + + buf.WriteString(fmt.Sprintf("%d-", m["position"].(int))) + + return hashcode.String(buf.String()) + }, + }, + + "lambda_action": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "function_arn": { + Type: schema.TypeString, + Required: true, + }, + + "invocation_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "topic_arn": { + Type: schema.TypeString, + Optional: true, + }, + + "position": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["function_arn"].(string))) + + if _, ok := m["invocation_type"]; ok { + buf.WriteString(fmt.Sprintf("%s-", m["invocation_type"].(string))) + } + + if _, ok := m["topic_arn"]; ok { + buf.WriteString(fmt.Sprintf("%s-", m["topic_arn"].(string))) + } + + buf.WriteString(fmt.Sprintf("%d-", m["position"].(int))) + + return hashcode.String(buf.String()) + }, + }, + + "s3_action": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket_name": { + Type: schema.TypeString, + Required: true, + }, + + "kms_key_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, + + "object_key_prefix": { + Type: schema.TypeString, + Optional: true, + }, + + "topic_arn": { + Type: schema.TypeString, + Optional: true, + }, + + "position": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["bucket_name"].(string))) + + if _, ok := m["kms_key_arn"]; ok { + buf.WriteString(fmt.Sprintf("%s-", m["kms_key_arn"].(string))) + } + + if _, ok := m["object_key_prefix"]; ok { + buf.WriteString(fmt.Sprintf("%s-", m["object_key_prefix"].(string))) + } + + if _, ok := m["topic_arn"]; ok { + buf.WriteString(fmt.Sprintf("%s-", m["topic_arn"].(string))) + } + + buf.WriteString(fmt.Sprintf("%d-", m["position"].(int))) + + return hashcode.String(buf.String()) + }, + }, + + "sns_action": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "topic_arn": { + Type: schema.TypeString, + Required: true, + }, + + "position": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["topic_arn"].(string))) + buf.WriteString(fmt.Sprintf("%d-", m["position"].(int))) + + return hashcode.String(buf.String()) + }, + }, + + "stop_action": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scope": { + Type: schema.TypeString, + Required: true, + }, + + "topic_arn": { + Type: schema.TypeString, + Optional: true, + }, + + "position": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["scope"].(string))) + + if _, ok := m["topic_arn"]; ok { + buf.WriteString(fmt.Sprintf("%s-", m["topic_arn"].(string))) + } + + buf.WriteString(fmt.Sprintf("%d-", m["position"].(int))) + + return hashcode.String(buf.String()) + }, + }, + + "workmail_action": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "organization_arn": { + Type: schema.TypeString, + Required: true, + }, + + "topic_arn": { + Type: schema.TypeString, + Optional: true, + }, + + "position": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["organization_arn"].(string))) + + if _, ok := m["topic_arn"]; ok { + buf.WriteString(fmt.Sprintf("%s-", m["topic_arn"].(string))) + } + + buf.WriteString(fmt.Sprintf("%d-", m["position"].(int))) + + return hashcode.String(buf.String()) + }, + }, + }, + } +} + +func resourceAwsSesReceiptRuleCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + createOpts := &ses.CreateReceiptRuleInput{ + Rule: buildReceiptRule(d, meta), + RuleSetName: aws.String(d.Get("rule_set_name").(string)), + } + + if v, ok := d.GetOk("after"); ok { + createOpts.After = aws.String(v.(string)) + } + + _, err := conn.CreateReceiptRule(createOpts) + if err != nil { + return fmt.Errorf("Error creating SES rule: %s", err) + } + + d.SetId(d.Get("name").(string)) + + return resourceAwsSesReceiptRuleUpdate(d, meta) +} + +func resourceAwsSesReceiptRuleUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + updateOpts := &ses.UpdateReceiptRuleInput{ + Rule: buildReceiptRule(d, meta), + RuleSetName: aws.String(d.Get("rule_set_name").(string)), + } + + _, err := conn.UpdateReceiptRule(updateOpts) + if err != nil { + return fmt.Errorf("Error updating SES rule: %s", err) + } + + if d.HasChange("after") { + changePosOpts := &ses.SetReceiptRulePositionInput{ + After: aws.String(d.Get("after").(string)), + RuleName: aws.String(d.Get("name").(string)), + RuleSetName: aws.String(d.Get("rule_set_name").(string)), + } + + _, err := conn.SetReceiptRulePosition(changePosOpts) + if err != nil { + return fmt.Errorf("Error updating SES rule: %s", err) + } + } + + return resourceAwsSesReceiptRuleRead(d, meta) +} + +func resourceAwsSesReceiptRuleRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + describeOpts := &ses.DescribeReceiptRuleInput{ + RuleName: aws.String(d.Id()), + RuleSetName: aws.String(d.Get("rule_set_name").(string)), + } + + response, err := conn.DescribeReceiptRule(describeOpts) + if err != nil { + _, ok := err.(awserr.Error) + if ok && err.(awserr.Error).Code() == "RuleDoesNotExist" { + log.Printf("[WARN] SES Receipt Rule (%s) not found", d.Id()) + d.SetId("") + return nil + } else { + return err + } + } + + d.Set("enabled", *response.Rule.Enabled) + d.Set("recipients", flattenStringList(response.Rule.Recipients)) + d.Set("scan_enabled", *response.Rule.ScanEnabled) + d.Set("tls_policy", *response.Rule.TlsPolicy) + + addHeaderActionList := []map[string]interface{}{} + bounceActionList := []map[string]interface{}{} + lambdaActionList := []map[string]interface{}{} + s3ActionList := []map[string]interface{}{} + snsActionList := []map[string]interface{}{} + stopActionList := []map[string]interface{}{} + workmailActionList := []map[string]interface{}{} + + for i, element := range response.Rule.Actions { + if element.AddHeaderAction != nil { + addHeaderAction := map[string]interface{}{ + "header_name": *element.AddHeaderAction.HeaderName, + "header_value": *element.AddHeaderAction.HeaderValue, + "position": i + 1, + } + addHeaderActionList = append(addHeaderActionList, addHeaderAction) + } + + if element.BounceAction != nil { + bounceAction := map[string]interface{}{ + "message": *element.BounceAction.Message, + "sender": *element.BounceAction.Sender, + "smtp_reply_code": *element.BounceAction.SmtpReplyCode, + "position": i + 1, + } + + if element.BounceAction.StatusCode != nil { + bounceAction["status_code"] = *element.BounceAction.StatusCode + } + + if element.BounceAction.TopicArn != nil { + bounceAction["topic_arn"] = *element.BounceAction.TopicArn + } + + bounceActionList = append(bounceActionList, bounceAction) + } + + if element.LambdaAction != nil { + lambdaAction := map[string]interface{}{ + "function_arn": *element.LambdaAction.FunctionArn, + "position": i + 1, + } + + if element.LambdaAction.InvocationType != nil { + lambdaAction["invocation_type"] = *element.LambdaAction.InvocationType + } + + if element.LambdaAction.TopicArn != nil { + lambdaAction["topic_arn"] = *element.LambdaAction.TopicArn + } + + lambdaActionList = append(lambdaActionList, lambdaAction) + } + + if element.S3Action != nil { + s3Action := map[string]interface{}{ + "bucket_name": *element.S3Action.BucketName, + "position": i + 1, + } + + if element.S3Action.KmsKeyArn != nil { + s3Action["kms_key_arn"] = *element.S3Action.KmsKeyArn + } + + if element.S3Action.ObjectKeyPrefix != nil { + s3Action["object_key_prefix"] = *element.S3Action.ObjectKeyPrefix + } + + if element.S3Action.TopicArn != nil { + s3Action["topic_arn"] = *element.S3Action.TopicArn + } + + s3ActionList = append(s3ActionList, s3Action) + } + + if element.SNSAction != nil { + snsAction := map[string]interface{}{ + "topic_arn": *element.SNSAction.TopicArn, + "position": i + 1, + } + + snsActionList = append(snsActionList, snsAction) + } + + if element.StopAction != nil { + stopAction := map[string]interface{}{ + "scope": *element.StopAction.Scope, + "position": i + 1, + } + + if element.StopAction.TopicArn != nil { + stopAction["topic_arn"] = *element.StopAction.TopicArn + } + + stopActionList = append(stopActionList, stopAction) + } + + if element.WorkmailAction != nil { + workmailAction := map[string]interface{}{ + "organization_arn": *element.WorkmailAction.OrganizationArn, + "position": i + 1, + } + + if element.WorkmailAction.TopicArn != nil { + workmailAction["topic_arn"] = *element.WorkmailAction.TopicArn + } + + workmailActionList = append(workmailActionList, workmailAction) + } + + } + + err = d.Set("add_header_action", addHeaderActionList) + if err != nil { + return err + } + + err = d.Set("bounce_action", bounceActionList) + if err != nil { + return err + } + + err = d.Set("lambda_action", lambdaActionList) + if err != nil { + return err + } + + err = d.Set("s3_action", s3ActionList) + if err != nil { + return err + } + + err = d.Set("sns_action", snsActionList) + if err != nil { + return err + } + + err = d.Set("stop_action", stopActionList) + if err != nil { + return err + } + + err = d.Set("workmail_action", workmailActionList) + if err != nil { + return err + } + + return nil +} + +func resourceAwsSesReceiptRuleDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + deleteOpts := &ses.DeleteReceiptRuleInput{ + RuleName: aws.String(d.Id()), + RuleSetName: aws.String(d.Get("rule_set_name").(string)), + } + + _, err := conn.DeleteReceiptRule(deleteOpts) + if err != nil { + return fmt.Errorf("Error deleting SES receipt rule: %s", err) + } + + return nil +} + +func buildReceiptRule(d *schema.ResourceData, meta interface{}) *ses.ReceiptRule { + receiptRule := &ses.ReceiptRule{ + Name: aws.String(d.Get("name").(string)), + } + + if v, ok := d.GetOk("enabled"); ok { + receiptRule.Enabled = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("recipients"); ok { + receiptRule.Recipients = expandStringList(v.(*schema.Set).List()) + } + + if v, ok := d.GetOk("scan_enabled"); ok { + receiptRule.ScanEnabled = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("tls_policy"); ok { + receiptRule.TlsPolicy = aws.String(v.(string)) + } + + actions := make(map[int]*ses.ReceiptAction) + + if v, ok := d.GetOk("add_header_action"); ok { + for _, element := range v.(*schema.Set).List() { + elem := element.(map[string]interface{}) + + actions[elem["position"].(int)] = &ses.ReceiptAction{ + AddHeaderAction: &ses.AddHeaderAction{ + HeaderName: aws.String(elem["header_name"].(string)), + HeaderValue: aws.String(elem["header_value"].(string)), + }, + } + } + } + + if v, ok := d.GetOk("bounce_action"); ok { + for _, element := range v.(*schema.Set).List() { + elem := element.(map[string]interface{}) + + bounceAction := &ses.BounceAction{ + Message: aws.String(elem["message"].(string)), + Sender: aws.String(elem["sender"].(string)), + SmtpReplyCode: aws.String(elem["smtp_reply_code"].(string)), + } + + if elem["status_code"] != "" { + bounceAction.StatusCode = aws.String(elem["status_code"].(string)) + } + + if elem["topic_arn"] != "" { + bounceAction.TopicArn = aws.String(elem["topic_arn"].(string)) + } + + actions[elem["position"].(int)] = &ses.ReceiptAction{ + BounceAction: bounceAction, + } + } + } + + if v, ok := d.GetOk("lambda_action"); ok { + for _, element := range v.(*schema.Set).List() { + elem := element.(map[string]interface{}) + + lambdaAction := &ses.LambdaAction{ + FunctionArn: aws.String(elem["function_arn"].(string)), + } + + if elem["invocation_type"] != "" { + lambdaAction.InvocationType = aws.String(elem["invocation_type"].(string)) + } + + if elem["topic_arn"] != "" { + lambdaAction.TopicArn = aws.String(elem["topic_arn"].(string)) + } + + actions[elem["position"].(int)] = &ses.ReceiptAction{ + LambdaAction: lambdaAction, + } + } + } + + if v, ok := d.GetOk("s3_action"); ok { + for _, element := range v.(*schema.Set).List() { + elem := element.(map[string]interface{}) + + s3Action := &ses.S3Action{ + BucketName: aws.String(elem["bucket_name"].(string)), + KmsKeyArn: aws.String(elem["kms_key_arn"].(string)), + ObjectKeyPrefix: aws.String(elem["object_key_prefix"].(string)), + } + + if elem["topic_arn"] != "" { + s3Action.TopicArn = aws.String(elem["topic_arn"].(string)) + } + + actions[elem["position"].(int)] = &ses.ReceiptAction{ + S3Action: s3Action, + } + } + } + + if v, ok := d.GetOk("sns_action"); ok { + for _, element := range v.(*schema.Set).List() { + elem := element.(map[string]interface{}) + + snsAction := &ses.SNSAction{ + TopicArn: aws.String(elem["topic_arn"].(string)), + } + + actions[elem["position"].(int)] = &ses.ReceiptAction{ + SNSAction: snsAction, + } + } + } + + if v, ok := d.GetOk("stop_action"); ok { + for _, element := range v.(*schema.Set).List() { + elem := element.(map[string]interface{}) + + stopAction := &ses.StopAction{ + Scope: aws.String(elem["scope"].(string)), + } + + if elem["topic_arn"] != "" { + stopAction.TopicArn = aws.String(elem["topic_arn"].(string)) + } + + actions[elem["position"].(int)] = &ses.ReceiptAction{ + StopAction: stopAction, + } + } + } + + if v, ok := d.GetOk("workmail_action"); ok { + for _, element := range v.(*schema.Set).List() { + elem := element.(map[string]interface{}) + + workmailAction := &ses.WorkmailAction{ + OrganizationArn: aws.String(elem["organization_arn"].(string)), + } + + if elem["topic_arn"] != "" { + workmailAction.TopicArn = aws.String(elem["topic_arn"].(string)) + } + + actions[elem["position"].(int)] = &ses.ReceiptAction{ + WorkmailAction: workmailAction, + } + } + } + + var keys []int + for k := range actions { + keys = append(keys, k) + } + sort.Ints(keys) + + sortedActions := []*ses.ReceiptAction{} + for _, k := range keys { + sortedActions = append(sortedActions, actions[k]) + } + + receiptRule.Actions = sortedActions + + return receiptRule +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_rule_set.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_rule_set.go new file mode 100644 index 000000000..dfaf98cf8 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ses_receipt_rule_set.go @@ -0,0 +1,108 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSesReceiptRuleSet() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSesReceiptRuleSetCreate, + Read: resourceAwsSesReceiptRuleSetRead, + Delete: resourceAwsSesReceiptRuleSetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "rule_set_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsSesReceiptRuleSetCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + ruleSetName := d.Get("rule_set_name").(string) + + createOpts := &ses.CreateReceiptRuleSetInput{ + RuleSetName: aws.String(ruleSetName), + } + + _, err := conn.CreateReceiptRuleSet(createOpts) + if err != nil { + return fmt.Errorf("Error creating SES rule set: %s", err) + } + + d.SetId(ruleSetName) + + return resourceAwsSesReceiptRuleSetRead(d, meta) +} + +func resourceAwsSesReceiptRuleSetRead(d *schema.ResourceData, meta interface{}) error { + ruleSetExists, err := findRuleSet(d.Id(), nil, meta) + + if !ruleSetExists { + log.Printf("[WARN] SES Receipt Rule Set (%s) not found", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return err + } + + d.Set("rule_set_name", d.Id()) + + return nil +} + +func resourceAwsSesReceiptRuleSetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + log.Printf("[DEBUG] SES Delete Receipt Rule Set: %s", d.Id()) + _, err := conn.DeleteReceiptRuleSet(&ses.DeleteReceiptRuleSetInput{ + RuleSetName: aws.String(d.Id()), + }) + + if err != nil { + return err + } + + return nil +} + +func findRuleSet(name string, token *string, meta interface{}) (bool, error) { + conn := meta.(*AWSClient).sesConn + + ruleSetExists := false + + listOpts := &ses.ListReceiptRuleSetsInput{ + NextToken: token, + } + + response, err := conn.ListReceiptRuleSets(listOpts) + for _, element := range response.RuleSets { + if *element.Name == name { + ruleSetExists = true + } + } + + if err != nil && !ruleSetExists && response.NextToken != nil { + ruleSetExists, err = findRuleSet(name, response.NextToken, meta) + } + + if err != nil { + return false, err + } + + return ruleSetExists, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sfn_activity.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sfn_activity.go new file mode 100644 index 000000000..7ed65d1f4 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sfn_activity.go @@ -0,0 +1,97 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/sfn" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSfnActivity() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSfnActivityCreate, + Read: resourceAwsSfnActivityRead, + Delete: resourceAwsSfnActivityDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateSfnActivityName, + }, + + "creation_date": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsSfnActivityCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sfnconn + log.Print("[DEBUG] Creating Step Function Activity") + + params := &sfn.CreateActivityInput{ + Name: aws.String(d.Get("name").(string)), + } + + activity, err := conn.CreateActivity(params) + if err != nil { + return fmt.Errorf("Error creating Step Function Activity: %s", err) + } + + d.SetId(*activity.ActivityArn) + + return resourceAwsSfnActivityRead(d, meta) +} + +func resourceAwsSfnActivityRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sfnconn + log.Printf("[DEBUG] Reading Step Function Activity: %s", d.Id()) + + sm, err := conn.DescribeActivity(&sfn.DescribeActivityInput{ + ActivityArn: aws.String(d.Id()), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ActivityDoesNotExist" { + d.SetId("") + return nil + } + return err + } + + d.Set("name", sm.Name) + + if err := d.Set("creation_date", sm.CreationDate.Format(time.RFC3339)); err != nil { + log.Printf("[DEBUG] Error setting creation_date: %s", err) + } + + return nil +} + +func resourceAwsSfnActivityDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sfnconn + log.Printf("[DEBUG] Deleting Step Functions Activity: %s", d.Id()) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteActivity(&sfn.DeleteActivityInput{ + ActivityArn: aws.String(d.Id()), + }) + + if err == nil { + return nil + } + + return resource.NonRetryableError(err) + }) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sfn_state_machine.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sfn_state_machine.go new file mode 100644 index 000000000..9d0fc4ca7 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sfn_state_machine.go @@ -0,0 +1,140 @@ +package aws + +import ( + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/sfn" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSfnStateMachine() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSfnStateMachineCreate, + Read: resourceAwsSfnStateMachineRead, + Delete: resourceAwsSfnStateMachineDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "definition": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateSfnStateMachineDefinition, + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateSfnStateMachineName, + }, + + "role_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + + "creation_date": { + Type: schema.TypeString, + Computed: true, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsSfnStateMachineCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sfnconn + log.Print("[DEBUG] Creating Step Function State Machine") + + params := &sfn.CreateStateMachineInput{ + Definition: aws.String(d.Get("definition").(string)), + Name: aws.String(d.Get("name").(string)), + RoleArn: aws.String(d.Get("role_arn").(string)), + } + + var activity *sfn.CreateStateMachineOutput + + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + var err error + activity, err = conn.CreateStateMachine(params) + + if err != nil { + // Note: the instance may be in a deleting mode, hence the retry + // when creating the step function. This can happen when we are + // updating the resource (since there is no update API call). + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "StateMachineDeleting" { + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + } + + return nil + }) + + if err != nil { + return errwrap.Wrapf("Error creating Step Function State Machine: {{err}}", err) + } + + d.SetId(*activity.StateMachineArn) + + return resourceAwsSfnStateMachineRead(d, meta) +} + +func resourceAwsSfnStateMachineRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sfnconn + log.Printf("[DEBUG] Reading Step Function State Machine: %s", d.Id()) + + sm, err := conn.DescribeStateMachine(&sfn.DescribeStateMachineInput{ + StateMachineArn: aws.String(d.Id()), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" { + d.SetId("") + return nil + } + return err + } + + d.Set("definition", sm.Definition) + d.Set("name", sm.Name) + d.Set("role_arn", sm.RoleArn) + d.Set("status", sm.Status) + + if err := d.Set("creation_date", sm.CreationDate.Format(time.RFC3339)); err != nil { + log.Printf("[DEBUG] Error setting creation_date: %s", err) + } + + return nil +} + +func resourceAwsSfnStateMachineDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sfnconn + log.Printf("[DEBUG] Deleting Step Function State Machine: %s", d.Id()) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteStateMachine(&sfn.DeleteStateMachineInput{ + StateMachineArn: aws.String(d.Id()), + }) + + if err == nil { + return nil + } + + return resource.NonRetryableError(err) + }) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_simpledb_domain.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_simpledb_domain.go new file mode 100644 index 000000000..8450342e3 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_simpledb_domain.go @@ -0,0 +1,84 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/simpledb" +) + +func resourceAwsSimpleDBDomain() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSimpleDBDomainCreate, + Read: resourceAwsSimpleDBDomainRead, + Delete: resourceAwsSimpleDBDomainDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsSimpleDBDomainCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).simpledbconn + + name := d.Get("name").(string) + input := &simpledb.CreateDomainInput{ + DomainName: aws.String(name), + } + _, err := conn.CreateDomain(input) + if err != nil { + return fmt.Errorf("Create SimpleDB Domain failed: %s", err) + } + + d.SetId(name) + return nil +} + +func resourceAwsSimpleDBDomainRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).simpledbconn + + input := &simpledb.DomainMetadataInput{ + DomainName: aws.String(d.Id()), + } + _, err := conn.DomainMetadata(input) + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "NoSuchDomain" { + log.Printf("[WARN] Removing SimpleDB domain %q because it's gone.", d.Id()) + d.SetId("") + return nil + } + } + if err != nil { + return err + } + + d.Set("name", d.Id()) + return nil +} + +func resourceAwsSimpleDBDomainDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).simpledbconn + + input := &simpledb.DeleteDomainInput{ + DomainName: aws.String(d.Id()), + } + _, err := conn.DeleteDomain(input) + if err != nil { + return fmt.Errorf("Delete SimpleDB Domain failed: %s", err) + } + + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_snapshot_create_volume_permission.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_snapshot_create_volume_permission.go new file mode 100644 index 000000000..6a7fd40a1 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_snapshot_create_volume_permission.go @@ -0,0 +1,152 @@ +package aws + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSnapshotCreateVolumePermission() *schema.Resource { + return &schema.Resource{ + Exists: resourceAwsSnapshotCreateVolumePermissionExists, + Create: resourceAwsSnapshotCreateVolumePermissionCreate, + Read: resourceAwsSnapshotCreateVolumePermissionRead, + Delete: resourceAwsSnapshotCreateVolumePermissionDelete, + + Schema: map[string]*schema.Schema{ + "snapshot_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "account_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsSnapshotCreateVolumePermissionExists(d *schema.ResourceData, meta interface{}) (bool, error) { + conn := meta.(*AWSClient).ec2conn + + snapshot_id := d.Get("snapshot_id").(string) + account_id := d.Get("account_id").(string) + return hasCreateVolumePermission(conn, snapshot_id, account_id) +} + +func resourceAwsSnapshotCreateVolumePermissionCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + snapshot_id := d.Get("snapshot_id").(string) + account_id := d.Get("account_id").(string) + + _, err := conn.ModifySnapshotAttribute(&ec2.ModifySnapshotAttributeInput{ + SnapshotId: aws.String(snapshot_id), + Attribute: aws.String("createVolumePermission"), + CreateVolumePermission: &ec2.CreateVolumePermissionModifications{ + Add: []*ec2.CreateVolumePermission{ + &ec2.CreateVolumePermission{UserId: aws.String(account_id)}, + }, + }, + }) + if err != nil { + return fmt.Errorf("Error adding snapshot createVolumePermission: %s", err) + } + + d.SetId(fmt.Sprintf("%s-%s", snapshot_id, account_id)) + + // Wait for the account to appear in the permission list + stateConf := &resource.StateChangeConf{ + Pending: []string{"denied"}, + Target: []string{"granted"}, + Refresh: resourceAwsSnapshotCreateVolumePermissionStateRefreshFunc(conn, snapshot_id, account_id), + Timeout: 5 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for snapshot createVolumePermission (%s) to be added: %s", + d.Id(), err) + } + + return nil +} + +func resourceAwsSnapshotCreateVolumePermissionRead(d *schema.ResourceData, meta interface{}) error { + return nil +} + +func resourceAwsSnapshotCreateVolumePermissionDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + snapshot_id := d.Get("snapshot_id").(string) + account_id := d.Get("account_id").(string) + + _, err := conn.ModifySnapshotAttribute(&ec2.ModifySnapshotAttributeInput{ + SnapshotId: aws.String(snapshot_id), + Attribute: aws.String("createVolumePermission"), + CreateVolumePermission: &ec2.CreateVolumePermissionModifications{ + Remove: []*ec2.CreateVolumePermission{ + &ec2.CreateVolumePermission{UserId: aws.String(account_id)}, + }, + }, + }) + if err != nil { + return fmt.Errorf("Error removing snapshot createVolumePermission: %s", err) + } + + // Wait for the account to disappear from the permission list + stateConf := &resource.StateChangeConf{ + Pending: []string{"granted"}, + Target: []string{"denied"}, + Refresh: resourceAwsSnapshotCreateVolumePermissionStateRefreshFunc(conn, snapshot_id, account_id), + Timeout: 5 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for snapshot createVolumePermission (%s) to be removed: %s", + d.Id(), err) + } + + return nil +} + +func hasCreateVolumePermission(conn *ec2.EC2, snapshot_id string, account_id string) (bool, error) { + _, state, err := resourceAwsSnapshotCreateVolumePermissionStateRefreshFunc(conn, snapshot_id, account_id)() + if err != nil { + return false, err + } + if state == "granted" { + return true, nil + } else { + return false, nil + } +} + +func resourceAwsSnapshotCreateVolumePermissionStateRefreshFunc(conn *ec2.EC2, snapshot_id string, account_id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + attrs, err := conn.DescribeSnapshotAttribute(&ec2.DescribeSnapshotAttributeInput{ + SnapshotId: aws.String(snapshot_id), + Attribute: aws.String("createVolumePermission"), + }) + if err != nil { + return nil, "", fmt.Errorf("Error refreshing snapshot createVolumePermission state: %s", err) + } + + for _, vp := range attrs.CreateVolumePermissions { + if *vp.UserId == account_id { + return attrs, "granted", nil + } + } + return attrs, "denied", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic.go new file mode 100644 index 000000000..63d308518 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic.go @@ -0,0 +1,227 @@ +package aws + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/sns" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +// Mutable attributes +var SNSAttributeMap = map[string]string{ + "arn": "TopicArn", + "display_name": "DisplayName", + "policy": "Policy", + "delivery_policy": "DeliveryPolicy", +} + +func resourceAwsSnsTopic() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSnsTopicCreate, + Read: resourceAwsSnsTopicRead, + Update: resourceAwsSnsTopicUpdate, + Delete: resourceAwsSnsTopicDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "display_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "policy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateJsonString, + DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, + StateFunc: func(v interface{}) string { + json, _ := normalizeJsonString(v) + return json + }, + }, + "delivery_policy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + ValidateFunc: validateJsonString, + DiffSuppressFunc: suppressEquivalentJsonDiffs, + StateFunc: func(v interface{}) string { + json, _ := normalizeJsonString(v) + return json + }, + }, + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsSnsTopicCreate(d *schema.ResourceData, meta interface{}) error { + snsconn := meta.(*AWSClient).snsconn + + name := d.Get("name").(string) + + log.Printf("[DEBUG] SNS create topic: %s", name) + + req := &sns.CreateTopicInput{ + Name: aws.String(name), + } + + output, err := snsconn.CreateTopic(req) + if err != nil { + return fmt.Errorf("Error creating SNS topic: %s", err) + } + + d.SetId(*output.TopicArn) + + // Write the ARN to the 'arn' field for export + d.Set("arn", *output.TopicArn) + + return resourceAwsSnsTopicUpdate(d, meta) +} + +func resourceAwsSnsTopicUpdate(d *schema.ResourceData, meta interface{}) error { + r := *resourceAwsSnsTopic() + + for k, _ := range r.Schema { + if attrKey, ok := SNSAttributeMap[k]; ok { + if d.HasChange(k) { + log.Printf("[DEBUG] Updating %s", attrKey) + _, n := d.GetChange(k) + // Ignore an empty policy + if !(k == "policy" && n == "") { + // Make API call to update attributes + req := sns.SetTopicAttributesInput{ + TopicArn: aws.String(d.Id()), + AttributeName: aws.String(attrKey), + AttributeValue: aws.String(n.(string)), + } + + // Retry the update in the event of an eventually consistent style of + // error, where say an IAM resource is successfully created but not + // actually available. See https://github.com/hashicorp/terraform/issues/3660 + log.Printf("[DEBUG] Updating SNS Topic (%s) attributes request: %s", d.Id(), req) + stateConf := &resource.StateChangeConf{ + Pending: []string{"retrying"}, + Target: []string{"success"}, + Refresh: resourceAwsSNSUpdateRefreshFunc(meta, req), + Timeout: 1 * time.Minute, + MinTimeout: 3 * time.Second, + } + _, err := stateConf.WaitForState() + if err != nil { + return err + } + } + } + } + } + + return resourceAwsSnsTopicRead(d, meta) +} + +func resourceAwsSNSUpdateRefreshFunc( + meta interface{}, params sns.SetTopicAttributesInput) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + snsconn := meta.(*AWSClient).snsconn + if _, err := snsconn.SetTopicAttributes(¶ms); err != nil { + log.Printf("[WARN] Erroring updating topic attributes: %s", err) + if awsErr, ok := err.(awserr.Error); ok { + // if the error contains the PrincipalNotFound message, we can retry + if strings.Contains(awsErr.Message(), "PrincipalNotFound") { + log.Printf("[DEBUG] Retrying AWS SNS Topic Update: %s", params) + return nil, "retrying", nil + } + } + return nil, "failed", err + } + return 42, "success", nil + } +} + +func resourceAwsSnsTopicRead(d *schema.ResourceData, meta interface{}) error { + snsconn := meta.(*AWSClient).snsconn + + attributeOutput, err := snsconn.GetTopicAttributes(&sns.GetTopicAttributesInput{ + TopicArn: aws.String(d.Id()), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFound" { + log.Printf("[WARN] SNS Topic (%s) not found, error code (404)", d.Id()) + d.SetId("") + return nil + } + + return err + } + + if attributeOutput.Attributes != nil && len(attributeOutput.Attributes) > 0 { + attrmap := attributeOutput.Attributes + resource := *resourceAwsSnsTopic() + // iKey = internal struct key, oKey = AWS Attribute Map key + for iKey, oKey := range SNSAttributeMap { + log.Printf("[DEBUG] Reading %s => %s", iKey, oKey) + + if attrmap[oKey] != nil { + // Some of the fetched attributes are stateful properties such as + // the number of subscriptions, the owner, etc. skip those + if resource.Schema[iKey] != nil { + var value string + if iKey == "policy" { + value, err = normalizeJsonString(*attrmap[oKey]) + if err != nil { + return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err) + } + } else { + value = *attrmap[oKey] + } + log.Printf("[DEBUG] Reading %s => %s -> %s", iKey, oKey, value) + d.Set(iKey, value) + } + } + } + } + + // If we have no name set (import) then determine it from the ARN. + // This is a bit of a heuristic for now since AWS provides no other + // way to get it. + if _, ok := d.GetOk("name"); !ok { + arn := d.Get("arn").(string) + idx := strings.LastIndex(arn, ":") + if idx > -1 { + d.Set("name", arn[idx+1:]) + } + } + + return nil +} + +func resourceAwsSnsTopicDelete(d *schema.ResourceData, meta interface{}) error { + snsconn := meta.(*AWSClient).snsconn + + log.Printf("[DEBUG] SNS Delete Topic: %s", d.Id()) + _, err := snsconn.DeleteTopic(&sns.DeleteTopicInput{ + TopicArn: aws.String(d.Id()), + }) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic_policy.go new file mode 100644 index 000000000..288a9a449 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic_policy.go @@ -0,0 +1,179 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/sns" +) + +func resourceAwsSnsTopicPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSnsTopicPolicyUpsert, + Read: resourceAwsSnsTopicPolicyRead, + Update: resourceAwsSnsTopicPolicyUpsert, + Delete: resourceAwsSnsTopicPolicyDelete, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "policy": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateJsonString, + DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, + }, + }, + } +} + +func resourceAwsSnsTopicPolicyUpsert(d *schema.ResourceData, meta interface{}) error { + arn := d.Get("arn").(string) + req := sns.SetTopicAttributesInput{ + TopicArn: aws.String(arn), + AttributeName: aws.String("Policy"), + AttributeValue: aws.String(d.Get("policy").(string)), + } + + d.SetId(arn) + + // Retry the update in the event of an eventually consistent style of + // error, where say an IAM resource is successfully created but not + // actually available. See https://github.com/hashicorp/terraform/issues/3660 + log.Printf("[DEBUG] Updating SNS Topic Policy: %s", req) + stateConf := &resource.StateChangeConf{ + Pending: []string{"retrying"}, + Target: []string{"success"}, + Refresh: resourceAwsSNSUpdateRefreshFunc(meta, req), + Timeout: 3 * time.Minute, + MinTimeout: 3 * time.Second, + } + _, err := stateConf.WaitForState() + if err != nil { + return err + } + + return resourceAwsSnsTopicPolicyRead(d, meta) +} + +func resourceAwsSnsTopicPolicyRead(d *schema.ResourceData, meta interface{}) error { + snsconn := meta.(*AWSClient).snsconn + + attributeOutput, err := snsconn.GetTopicAttributes(&sns.GetTopicAttributesInput{ + TopicArn: aws.String(d.Id()), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFound" { + log.Printf("[WARN] SNS Topic (%s) not found, error code (404)", d.Id()) + d.SetId("") + return nil + } + + return err + } + + if attributeOutput.Attributes == nil { + log.Printf("[WARN] SNS Topic (%q) attributes not found (nil)", d.Id()) + d.SetId("") + return nil + } + attrmap := attributeOutput.Attributes + + policy, ok := attrmap["Policy"] + if !ok { + log.Printf("[WARN] SNS Topic (%q) policy not found in attributes", d.Id()) + d.SetId("") + return nil + } + + d.Set("policy", policy) + + return nil +} + +func resourceAwsSnsTopicPolicyDelete(d *schema.ResourceData, meta interface{}) error { + accountId, err := getAccountIdFromSnsTopicArn(d.Id(), meta.(*AWSClient).partition) + if err != nil { + return err + } + + req := sns.SetTopicAttributesInput{ + TopicArn: aws.String(d.Id()), + AttributeName: aws.String("Policy"), + // It is impossible to delete a policy or set to empty + // (confirmed by AWS Support representative) + // so we instead set it back to the default one + AttributeValue: aws.String(buildDefaultSnsTopicPolicy(d.Id(), accountId)), + } + + // Retry the update in the event of an eventually consistent style of + // error, where say an IAM resource is successfully created but not + // actually available. See https://github.com/hashicorp/terraform/issues/3660 + log.Printf("[DEBUG] Resetting SNS Topic Policy to default: %s", req) + stateConf := &resource.StateChangeConf{ + Pending: []string{"retrying"}, + Target: []string{"success"}, + Refresh: resourceAwsSNSUpdateRefreshFunc(meta, req), + Timeout: 3 * time.Minute, + MinTimeout: 3 * time.Second, + } + _, err = stateConf.WaitForState() + if err != nil { + return err + } + return nil +} + +func getAccountIdFromSnsTopicArn(arn, partition string) (string, error) { + // arn:aws:sns:us-west-2:123456789012:test-new + // arn:aws-us-gov:sns:us-west-2:123456789012:test-new + re := regexp.MustCompile(fmt.Sprintf("^arn:%s:sns:[^:]+:([0-9]{12}):.+", partition)) + matches := re.FindStringSubmatch(arn) + if len(matches) != 2 { + return "", fmt.Errorf("Unable to get account ID from ARN (%q)", arn) + } + return matches[1], nil +} + +func buildDefaultSnsTopicPolicy(topicArn, accountId string) string { + return fmt.Sprintf(`{ + "Version": "2008-10-17", + "Id": "__default_policy_ID", + "Statement": [ + { + "Sid": "__default_statement_ID", + "Effect": "Allow", + "Principal": { + "AWS": "*" + }, + "Action": [ + "SNS:GetTopicAttributes", + "SNS:SetTopicAttributes", + "SNS:AddPermission", + "SNS:RemovePermission", + "SNS:DeleteTopic", + "SNS:Subscribe", + "SNS:ListSubscriptionsByTopic", + "SNS:Publish", + "SNS:Receive" + ], + "Resource": "%s", + "Condition": { + "StringEquals": { + "AWS:SourceOwner": "%s" + } + } + } + ] +}`, topicArn, accountId) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic_subscription.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic_subscription.go new file mode 100644 index 000000000..0259230a4 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sns_topic_subscription.go @@ -0,0 +1,298 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/sns" +) + +const awsSNSPendingConfirmationMessage = "pending confirmation" +const awsSNSPendingConfirmationMessageWithoutSpaces = "pendingconfirmation" + +var SNSSubscriptionAttributeMap = map[string]string{ + "topic_arn": "TopicArn", + "endpoint": "Endpoint", + "protocol": "Protocol", + "raw_message_delivery": "RawMessageDelivery", +} + +func resourceAwsSnsTopicSubscription() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSnsTopicSubscriptionCreate, + Read: resourceAwsSnsTopicSubscriptionRead, + Update: resourceAwsSnsTopicSubscriptionUpdate, + Delete: resourceAwsSnsTopicSubscriptionDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + ValidateFunc: validateSNSSubscriptionProtocol, + }, + "endpoint": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "endpoint_auto_confirms": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "confirmation_timeout_in_minutes": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 1, + }, + "topic_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "delivery_policy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "raw_message_delivery": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsSnsTopicSubscriptionCreate(d *schema.ResourceData, meta interface{}) error { + snsconn := meta.(*AWSClient).snsconn + + output, err := subscribeToSNSTopic(d, snsconn) + + if err != nil { + return err + } + + if subscriptionHasPendingConfirmation(output.SubscriptionArn) { + log.Printf("[WARN] Invalid SNS Subscription, received a \"%s\" ARN", awsSNSPendingConfirmationMessage) + return nil + } + + log.Printf("New subscription ARN: %s", *output.SubscriptionArn) + d.SetId(*output.SubscriptionArn) + + // Write the ARN to the 'arn' field for export + d.Set("arn", *output.SubscriptionArn) + + return resourceAwsSnsTopicSubscriptionUpdate(d, meta) +} + +func resourceAwsSnsTopicSubscriptionUpdate(d *schema.ResourceData, meta interface{}) error { + snsconn := meta.(*AWSClient).snsconn + + // If any changes happened, un-subscribe and re-subscribe + if d.HasChange("protocol") || d.HasChange("endpoint") || d.HasChange("topic_arn") { + log.Printf("[DEBUG] Updating subscription %s", d.Id()) + // Unsubscribe + _, err := snsconn.Unsubscribe(&sns.UnsubscribeInput{ + SubscriptionArn: aws.String(d.Id()), + }) + + if err != nil { + return fmt.Errorf("Error unsubscribing from SNS topic: %s", err) + } + + // Re-subscribe and set id + output, err := subscribeToSNSTopic(d, snsconn) + d.SetId(*output.SubscriptionArn) + d.Set("arn", *output.SubscriptionArn) + } + + if d.HasChange("raw_message_delivery") { + _, n := d.GetChange("raw_message_delivery") + + attrValue := "false" + + if n.(bool) { + attrValue = "true" + } + + req := &sns.SetSubscriptionAttributesInput{ + SubscriptionArn: aws.String(d.Id()), + AttributeName: aws.String("RawMessageDelivery"), + AttributeValue: aws.String(attrValue), + } + _, err := snsconn.SetSubscriptionAttributes(req) + + if err != nil { + return fmt.Errorf("Unable to set raw message delivery attribute on subscription") + } + } + + return resourceAwsSnsTopicSubscriptionRead(d, meta) +} + +func resourceAwsSnsTopicSubscriptionRead(d *schema.ResourceData, meta interface{}) error { + snsconn := meta.(*AWSClient).snsconn + + log.Printf("[DEBUG] Loading subscription %s", d.Id()) + + attributeOutput, err := snsconn.GetSubscriptionAttributes(&sns.GetSubscriptionAttributesInput{ + SubscriptionArn: aws.String(d.Id()), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFound" { + log.Printf("[WARN] SNS Topic Subscription (%s) not found, error code (404)", d.Id()) + d.SetId("") + return nil + } + + return err + } + + if attributeOutput.Attributes != nil && len(attributeOutput.Attributes) > 0 { + attrHash := attributeOutput.Attributes + resource := *resourceAwsSnsTopicSubscription() + + for iKey, oKey := range SNSSubscriptionAttributeMap { + log.Printf("[DEBUG] Reading %s => %s", iKey, oKey) + + if attrHash[oKey] != nil { + if resource.Schema[iKey] != nil { + var value string + value = *attrHash[oKey] + log.Printf("[DEBUG] Reading %s => %s -> %s", iKey, oKey, value) + d.Set(iKey, value) + } + } + } + } + + return nil +} + +func resourceAwsSnsTopicSubscriptionDelete(d *schema.ResourceData, meta interface{}) error { + snsconn := meta.(*AWSClient).snsconn + + log.Printf("[DEBUG] SNS delete topic subscription: %s", d.Id()) + _, err := snsconn.Unsubscribe(&sns.UnsubscribeInput{ + SubscriptionArn: aws.String(d.Id()), + }) + if err != nil { + return err + } + return nil +} + +func subscribeToSNSTopic(d *schema.ResourceData, snsconn *sns.SNS) (output *sns.SubscribeOutput, err error) { + protocol := d.Get("protocol").(string) + endpoint := d.Get("endpoint").(string) + topic_arn := d.Get("topic_arn").(string) + endpoint_auto_confirms := d.Get("endpoint_auto_confirms").(bool) + confirmation_timeout_in_minutes := d.Get("confirmation_timeout_in_minutes").(int) + + if strings.Contains(protocol, "http") && !endpoint_auto_confirms { + return nil, fmt.Errorf("Protocol http/https is only supported for endpoints which auto confirms!") + } + + log.Printf("[DEBUG] SNS create topic subscription: %s (%s) @ '%s'", endpoint, protocol, topic_arn) + + req := &sns.SubscribeInput{ + Protocol: aws.String(protocol), + Endpoint: aws.String(endpoint), + TopicArn: aws.String(topic_arn), + } + + output, err = snsconn.Subscribe(req) + if err != nil { + return nil, fmt.Errorf("Error creating SNS topic: %s", err) + } + + log.Printf("[DEBUG] Finished subscribing to topic %s with subscription arn %s", topic_arn, *output.SubscriptionArn) + + if strings.Contains(protocol, "http") && subscriptionHasPendingConfirmation(output.SubscriptionArn) { + + log.Printf("[DEBUG] SNS create topic subscription is pending so fetching the subscription list for topic : %s (%s) @ '%s'", endpoint, protocol, topic_arn) + + err = resource.Retry(time.Duration(confirmation_timeout_in_minutes)*time.Minute, func() *resource.RetryError { + + subscription, err := findSubscriptionByNonID(d, snsconn) + + if subscription != nil { + output.SubscriptionArn = subscription.SubscriptionArn + return nil + } + + if err != nil { + return resource.RetryableError( + fmt.Errorf("Error fetching subscriptions for SNS topic %s: %s", topic_arn, err)) + } + + return resource.RetryableError( + fmt.Errorf("Endpoint (%s) did not autoconfirm the subscription for topic %s", endpoint, topic_arn)) + }) + + if err != nil { + return nil, err + } + } + + log.Printf("[DEBUG] Created new subscription! %s", *output.SubscriptionArn) + return output, nil +} + +// finds a subscription using protocol, endpoint and topic_arn (which is a key in sns subscription) +func findSubscriptionByNonID(d *schema.ResourceData, snsconn *sns.SNS) (*sns.Subscription, error) { + protocol := d.Get("protocol").(string) + endpoint := d.Get("endpoint").(string) + topic_arn := d.Get("topic_arn").(string) + + req := &sns.ListSubscriptionsByTopicInput{ + TopicArn: aws.String(topic_arn), + } + + for { + + res, err := snsconn.ListSubscriptionsByTopic(req) + + if err != nil { + return nil, fmt.Errorf("Error fetching subscripitions for topic %s : %s", topic_arn, err) + } + + for _, subscription := range res.Subscriptions { + log.Printf("[DEBUG] check subscription with EndPoint %s, Protocol %s, topicARN %s and SubscriptionARN %s", *subscription.Endpoint, *subscription.Protocol, *subscription.TopicArn, *subscription.SubscriptionArn) + if *subscription.Endpoint == endpoint && *subscription.Protocol == protocol && *subscription.TopicArn == topic_arn && !subscriptionHasPendingConfirmation(subscription.SubscriptionArn) { + return subscription, nil + } + } + + // if there are more than 100 subscriptions then go to the next 100 otherwise return nil + if res.NextToken != nil { + req.NextToken = res.NextToken + } else { + return nil, nil + } + } +} + +// returns true if arn is nil or has both pending and confirmation words in the arn +func subscriptionHasPendingConfirmation(arn *string) bool { + if arn != nil && !strings.Contains(strings.Replace(strings.ToLower(*arn), " ", "", -1), awsSNSPendingConfirmationMessageWithoutSpaces) { + return false + } + + return true +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_datafeed_subscription.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_datafeed_subscription.go new file mode 100644 index 000000000..2e3322710 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_datafeed_subscription.go @@ -0,0 +1,93 @@ +package aws + +import ( + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSpotDataFeedSubscription() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSpotDataFeedSubscriptionCreate, + Read: resourceAwsSpotDataFeedSubscriptionRead, + Delete: resourceAwsSpotDataFeedSubscriptionDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsSpotDataFeedSubscriptionCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + params := &ec2.CreateSpotDatafeedSubscriptionInput{ + Bucket: aws.String(d.Get("bucket").(string)), + } + + if v, ok := d.GetOk("prefix"); ok { + params.Prefix = aws.String(v.(string)) + } + + log.Printf("[INFO] Creating Spot Datafeed Subscription") + _, err := conn.CreateSpotDatafeedSubscription(params) + if err != nil { + return errwrap.Wrapf("Error Creating Spot Datafeed Subscription: {{err}}", err) + } + + d.SetId("spot-datafeed-subscription") + + return resourceAwsSpotDataFeedSubscriptionRead(d, meta) +} +func resourceAwsSpotDataFeedSubscriptionRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + resp, err := conn.DescribeSpotDatafeedSubscription(&ec2.DescribeSpotDatafeedSubscriptionInput{}) + if err != nil { + cgw, ok := err.(awserr.Error) + if ok && cgw.Code() == "InvalidSpotDatafeed.NotFound" { + log.Printf("[WARNING] Spot Datafeed Subscription Not Found so refreshing from state") + d.SetId("") + return nil + } + return errwrap.Wrapf("Error Describing Spot Datafeed Subscription: {{err}}", err) + } + + if resp == nil { + log.Printf("[WARNING] Spot Datafeed Subscription Not Found so refreshing from state") + d.SetId("") + return nil + } + + subscription := *resp.SpotDatafeedSubscription + d.Set("bucket", subscription.Bucket) + d.Set("prefix", subscription.Prefix) + + return nil +} +func resourceAwsSpotDataFeedSubscriptionDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + log.Printf("[INFO] Deleting Spot Datafeed Subscription") + _, err := conn.DeleteSpotDatafeedSubscription(&ec2.DeleteSpotDatafeedSubscriptionInput{}) + if err != nil { + return errwrap.Wrapf("Error deleting Spot Datafeed Subscription: {{err}}", err) + } + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_fleet_request.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_fleet_request.go new file mode 100644 index 000000000..cf1a21987 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_fleet_request.go @@ -0,0 +1,1041 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSpotFleetRequest() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSpotFleetRequestCreate, + Read: resourceAwsSpotFleetRequestRead, + Delete: resourceAwsSpotFleetRequestDelete, + Update: resourceAwsSpotFleetRequestUpdate, + + SchemaVersion: 1, + MigrateState: resourceAwsSpotFleetRequestMigrateState, + + Schema: map[string]*schema.Schema{ + "iam_fleet_role": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "replace_unhealthy_instances": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + // http://docs.aws.amazon.com/sdk-for-go/api/service/ec2.html#type-SpotFleetLaunchSpecification + // http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotFleetLaunchSpecification.html + "launch_specification": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vpc_security_group_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "associate_public_ip_address": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "ebs_block_device": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delete_on_termination": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + "device_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "encrypted": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + "iops": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + "snapshot_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "volume_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + "volume_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, + Set: hashEbsBlockDevice, + }, + "ephemeral_block_device": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "device_name": { + Type: schema.TypeString, + Required: true, + }, + "virtual_name": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + Set: hashEphemeralBlockDevice, + }, + "root_block_device": { + // TODO: This is a set because we don't support singleton + // sub-resources today. We'll enforce that the set only ever has + // length zero or one below. When TF gains support for + // sub-resources this can be converted. + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + // "You can only modify the volume size, volume type, and Delete on + // Termination flag on the block device mapping entry for the root + // device volume." - bit.ly/ec2bdmap + Schema: map[string]*schema.Schema{ + "delete_on_termination": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + "iops": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + "volume_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + "volume_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, + Set: hashRootBlockDevice, + }, + "ebs_optimized": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "iam_instance_profile": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "ami": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "instance_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + ValidateFunc: validateSpotFleetRequestKeyName, + }, + "monitoring": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "placement_group": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "placement_tenancy": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "spot_price": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "user_data": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + StateFunc: func(v interface{}) string { + switch v.(type) { + case string: + return userDataHashSum(v.(string)) + default: + return "" + } + }, + }, + "weighted_capacity": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "subnet_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "availability_zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, + Set: hashLaunchSpecification, + }, + // Everything on a spot fleet is ForceNew except target_capacity + "target_capacity": { + Type: schema.TypeInt, + Required: true, + ForceNew: false, + }, + "allocation_strategy": { + Type: schema.TypeString, + Optional: true, + Default: "lowestPrice", + ForceNew: true, + }, + "excess_capacity_termination_policy": { + Type: schema.TypeString, + Optional: true, + Default: "Default", + ForceNew: false, + }, + "spot_price": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "terminate_instances_with_expiration": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "valid_from": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "valid_until": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "spot_request_state": { + Type: schema.TypeString, + Computed: true, + }, + "client_token": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func buildSpotFleetLaunchSpecification(d map[string]interface{}, meta interface{}) (*ec2.SpotFleetLaunchSpecification, error) { + conn := meta.(*AWSClient).ec2conn + + opts := &ec2.SpotFleetLaunchSpecification{ + ImageId: aws.String(d["ami"].(string)), + InstanceType: aws.String(d["instance_type"].(string)), + SpotPrice: aws.String(d["spot_price"].(string)), + } + + placement := new(ec2.SpotPlacement) + if v, ok := d["availability_zone"]; ok { + placement.AvailabilityZone = aws.String(v.(string)) + opts.Placement = placement + } + + if v, ok := d["placement_tenancy"]; ok { + placement.Tenancy = aws.String(v.(string)) + opts.Placement = placement + } + + if v, ok := d["ebs_optimized"]; ok { + opts.EbsOptimized = aws.Bool(v.(bool)) + } + + if v, ok := d["monitoring"]; ok { + opts.Monitoring = &ec2.SpotFleetMonitoring{ + Enabled: aws.Bool(v.(bool)), + } + } + + if v, ok := d["iam_instance_profile"]; ok { + opts.IamInstanceProfile = &ec2.IamInstanceProfileSpecification{ + Name: aws.String(v.(string)), + } + } + + if v, ok := d["user_data"]; ok { + opts.UserData = aws.String(base64Encode([]byte(v.(string)))) + } + + if v, ok := d["key_name"]; ok { + opts.KeyName = aws.String(v.(string)) + } + + if v, ok := d["weighted_capacity"]; ok && v != "" { + wc, err := strconv.ParseFloat(v.(string), 64) + if err != nil { + return nil, err + } + opts.WeightedCapacity = aws.Float64(wc) + } + + var securityGroupIds []*string + if v, ok := d["vpc_security_group_ids"]; ok { + if s := v.(*schema.Set); s.Len() > 0 { + for _, v := range s.List() { + securityGroupIds = append(securityGroupIds, aws.String(v.(string))) + } + } + } + + subnetId, hasSubnetId := d["subnet_id"] + if hasSubnetId { + opts.SubnetId = aws.String(subnetId.(string)) + } + + associatePublicIpAddress, hasPublicIpAddress := d["associate_public_ip_address"] + if hasPublicIpAddress && associatePublicIpAddress.(bool) == true && hasSubnetId { + + // If we have a non-default VPC / Subnet specified, we can flag + // AssociatePublicIpAddress to get a Public IP assigned. By default these are not provided. + // You cannot specify both SubnetId and the NetworkInterface.0.* parameters though, otherwise + // you get: Network interfaces and an instance-level subnet ID may not be specified on the same request + // You also need to attach Security Groups to the NetworkInterface instead of the instance, + // to avoid: Network interfaces and an instance-level security groups may not be specified on + // the same request + ni := &ec2.InstanceNetworkInterfaceSpecification{ + AssociatePublicIpAddress: aws.Bool(true), + DeleteOnTermination: aws.Bool(true), + DeviceIndex: aws.Int64(int64(0)), + SubnetId: aws.String(subnetId.(string)), + Groups: securityGroupIds, + } + + opts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{ni} + opts.SubnetId = aws.String("") + } else { + for _, id := range securityGroupIds { + opts.SecurityGroups = append(opts.SecurityGroups, &ec2.GroupIdentifier{GroupId: id}) + } + } + + blockDevices, err := readSpotFleetBlockDeviceMappingsFromConfig(d, conn) + if err != nil { + return nil, err + } + if len(blockDevices) > 0 { + opts.BlockDeviceMappings = blockDevices + } + + return opts, nil +} + +func validateSpotFleetRequestKeyName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if value == "" { + errors = append(errors, fmt.Errorf("Key name cannot be empty.")) + } + + return +} + +func readSpotFleetBlockDeviceMappingsFromConfig( + d map[string]interface{}, conn *ec2.EC2) ([]*ec2.BlockDeviceMapping, error) { + blockDevices := make([]*ec2.BlockDeviceMapping, 0) + + if v, ok := d["ebs_block_device"]; ok { + vL := v.(*schema.Set).List() + for _, v := range vL { + bd := v.(map[string]interface{}) + ebs := &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)), + } + + if v, ok := bd["snapshot_id"].(string); ok && v != "" { + ebs.SnapshotId = aws.String(v) + } + + if v, ok := bd["encrypted"].(bool); ok && v { + ebs.Encrypted = aws.Bool(v) + } + + if v, ok := bd["volume_size"].(int); ok && v != 0 { + ebs.VolumeSize = aws.Int64(int64(v)) + } + + if v, ok := bd["volume_type"].(string); ok && v != "" { + ebs.VolumeType = aws.String(v) + } + + if v, ok := bd["iops"].(int); ok && v > 0 { + ebs.Iops = aws.Int64(int64(v)) + } + + blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{ + DeviceName: aws.String(bd["device_name"].(string)), + Ebs: ebs, + }) + } + } + + if v, ok := d["ephemeral_block_device"]; ok { + vL := v.(*schema.Set).List() + for _, v := range vL { + bd := v.(map[string]interface{}) + blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{ + DeviceName: aws.String(bd["device_name"].(string)), + VirtualName: aws.String(bd["virtual_name"].(string)), + }) + } + } + + if v, ok := d["root_block_device"]; ok { + vL := v.(*schema.Set).List() + if len(vL) > 1 { + return nil, fmt.Errorf("Cannot specify more than one root_block_device.") + } + for _, v := range vL { + bd := v.(map[string]interface{}) + ebs := &ec2.EbsBlockDevice{ + DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)), + } + + if v, ok := bd["volume_size"].(int); ok && v != 0 { + ebs.VolumeSize = aws.Int64(int64(v)) + } + + if v, ok := bd["volume_type"].(string); ok && v != "" { + ebs.VolumeType = aws.String(v) + } + + if v, ok := bd["iops"].(int); ok && v > 0 { + ebs.Iops = aws.Int64(int64(v)) + } + + if dn, err := fetchRootDeviceName(d["ami"].(string), conn); err == nil { + if dn == nil { + return nil, fmt.Errorf( + "Expected 1 AMI for ID: %s, got none", + d["ami"].(string)) + } + + blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{ + DeviceName: dn, + Ebs: ebs, + }) + } else { + return nil, err + } + } + } + + return blockDevices, nil +} + +func buildAwsSpotFleetLaunchSpecifications( + d *schema.ResourceData, meta interface{}) ([]*ec2.SpotFleetLaunchSpecification, error) { + + user_specs := d.Get("launch_specification").(*schema.Set).List() + specs := make([]*ec2.SpotFleetLaunchSpecification, len(user_specs)) + for i, user_spec := range user_specs { + user_spec_map := user_spec.(map[string]interface{}) + // panic: interface conversion: interface {} is map[string]interface {}, not *schema.ResourceData + opts, err := buildSpotFleetLaunchSpecification(user_spec_map, meta) + if err != nil { + return nil, err + } + specs[i] = opts + } + + return specs, nil +} + +func resourceAwsSpotFleetRequestCreate(d *schema.ResourceData, meta interface{}) error { + // http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotFleet.html + conn := meta.(*AWSClient).ec2conn + + launch_specs, err := buildAwsSpotFleetLaunchSpecifications(d, meta) + if err != nil { + return err + } + + // http://docs.aws.amazon.com/sdk-for-go/api/service/ec2.html#type-SpotFleetRequestConfigData + spotFleetConfig := &ec2.SpotFleetRequestConfigData{ + IamFleetRole: aws.String(d.Get("iam_fleet_role").(string)), + LaunchSpecifications: launch_specs, + SpotPrice: aws.String(d.Get("spot_price").(string)), + TargetCapacity: aws.Int64(int64(d.Get("target_capacity").(int))), + ClientToken: aws.String(resource.UniqueId()), + TerminateInstancesWithExpiration: aws.Bool(d.Get("terminate_instances_with_expiration").(bool)), + ReplaceUnhealthyInstances: aws.Bool(d.Get("replace_unhealthy_instances").(bool)), + } + + if v, ok := d.GetOk("excess_capacity_termination_policy"); ok { + spotFleetConfig.ExcessCapacityTerminationPolicy = aws.String(v.(string)) + } + + if v, ok := d.GetOk("allocation_strategy"); ok { + spotFleetConfig.AllocationStrategy = aws.String(v.(string)) + } else { + spotFleetConfig.AllocationStrategy = aws.String("lowestPrice") + } + + if v, ok := d.GetOk("valid_from"); ok { + valid_from, err := time.Parse(awsAutoscalingScheduleTimeLayout, v.(string)) + if err != nil { + return err + } + spotFleetConfig.ValidFrom = &valid_from + } + + if v, ok := d.GetOk("valid_until"); ok { + valid_until, err := time.Parse(awsAutoscalingScheduleTimeLayout, v.(string)) + if err != nil { + return err + } + spotFleetConfig.ValidUntil = &valid_until + } else { + valid_until := time.Now().Add(24 * time.Hour) + spotFleetConfig.ValidUntil = &valid_until + } + + // http://docs.aws.amazon.com/sdk-for-go/api/service/ec2.html#type-RequestSpotFleetInput + spotFleetOpts := &ec2.RequestSpotFleetInput{ + SpotFleetRequestConfig: spotFleetConfig, + DryRun: aws.Bool(false), + } + + log.Printf("[DEBUG] Requesting spot fleet with these opts: %+v", spotFleetOpts) + + // Since IAM is eventually consistent, we retry creation as a newly created role may not + // take effect immediately, resulting in an InvalidSpotFleetRequestConfig error + var resp *ec2.RequestSpotFleetOutput + err = resource.Retry(1*time.Minute, func() *resource.RetryError { + var err error + resp, err = conn.RequestSpotFleet(spotFleetOpts) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + // IAM is eventually consistent :/ + if awsErr.Code() == "InvalidSpotFleetRequestConfig" { + return resource.RetryableError( + fmt.Errorf("[WARN] Error creating Spot fleet request, retrying: %s", err)) + } + } + return resource.NonRetryableError(err) + } + return nil + }) + + if err != nil { + return fmt.Errorf("Error requesting spot fleet: %s", err) + } + + d.SetId(*resp.SpotFleetRequestId) + + log.Printf("[INFO] Spot Fleet Request ID: %s", d.Id()) + log.Println("[INFO] Waiting for Spot Fleet Request to be active") + stateConf := &resource.StateChangeConf{ + Pending: []string{"submitted"}, + Target: []string{"active"}, + Refresh: resourceAwsSpotFleetRequestStateRefreshFunc(d, meta), + Timeout: 10 * time.Minute, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + return resourceAwsSpotFleetRequestRead(d, meta) +} + +func resourceAwsSpotFleetRequestStateRefreshFunc(d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + conn := meta.(*AWSClient).ec2conn + req := &ec2.DescribeSpotFleetRequestsInput{ + SpotFleetRequestIds: []*string{aws.String(d.Id())}, + } + resp, err := conn.DescribeSpotFleetRequests(req) + + if err != nil { + log.Printf("Error on retrieving Spot Fleet Request when waiting: %s", err) + return nil, "", nil + } + + if resp == nil { + return nil, "", nil + } + + if len(resp.SpotFleetRequestConfigs) == 0 { + return nil, "", nil + } + + spotFleetRequest := resp.SpotFleetRequestConfigs[0] + + return spotFleetRequest, *spotFleetRequest.SpotFleetRequestState, nil + } +} + +func resourceAwsSpotFleetRequestRead(d *schema.ResourceData, meta interface{}) error { + // http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSpotFleetRequests.html + conn := meta.(*AWSClient).ec2conn + + req := &ec2.DescribeSpotFleetRequestsInput{ + SpotFleetRequestIds: []*string{aws.String(d.Id())}, + } + resp, err := conn.DescribeSpotFleetRequests(req) + + if err != nil { + // If the spot request was not found, return nil so that we can show + // that it is gone. + ec2err, ok := err.(awserr.Error) + if ok && ec2err.Code() == "InvalidSpotFleetRequestId.NotFound" { + d.SetId("") + return nil + } + + // Some other error, report it + return err + } + + sfr := resp.SpotFleetRequestConfigs[0] + + // if the request is cancelled, then it is gone + cancelledStates := map[string]bool{ + "cancelled": true, + "cancelled_running": true, + "cancelled_terminating": true, + } + if _, ok := cancelledStates[*sfr.SpotFleetRequestState]; ok { + d.SetId("") + return nil + } + + d.SetId(*sfr.SpotFleetRequestId) + d.Set("spot_request_state", aws.StringValue(sfr.SpotFleetRequestState)) + + config := sfr.SpotFleetRequestConfig + + if config.AllocationStrategy != nil { + d.Set("allocation_strategy", aws.StringValue(config.AllocationStrategy)) + } + + if config.ClientToken != nil { + d.Set("client_token", aws.StringValue(config.ClientToken)) + } + + if config.ExcessCapacityTerminationPolicy != nil { + d.Set("excess_capacity_termination_policy", + aws.StringValue(config.ExcessCapacityTerminationPolicy)) + } + + if config.IamFleetRole != nil { + d.Set("iam_fleet_role", aws.StringValue(config.IamFleetRole)) + } + + if config.SpotPrice != nil { + d.Set("spot_price", aws.StringValue(config.SpotPrice)) + } + + if config.TargetCapacity != nil { + d.Set("target_capacity", aws.Int64Value(config.TargetCapacity)) + } + + if config.TerminateInstancesWithExpiration != nil { + d.Set("terminate_instances_with_expiration", + aws.BoolValue(config.TerminateInstancesWithExpiration)) + } + + if config.ValidFrom != nil { + d.Set("valid_from", + aws.TimeValue(config.ValidFrom).Format(awsAutoscalingScheduleTimeLayout)) + } + + if config.ValidUntil != nil { + d.Set("valid_until", + aws.TimeValue(config.ValidUntil).Format(awsAutoscalingScheduleTimeLayout)) + } + + d.Set("replace_unhealthy_instances", config.ReplaceUnhealthyInstances) + d.Set("launch_specification", launchSpecsToSet(config.LaunchSpecifications, conn)) + + return nil +} + +func launchSpecsToSet(launchSpecs []*ec2.SpotFleetLaunchSpecification, conn *ec2.EC2) *schema.Set { + specSet := &schema.Set{F: hashLaunchSpecification} + for _, spec := range launchSpecs { + rootDeviceName, err := fetchRootDeviceName(aws.StringValue(spec.ImageId), conn) + if err != nil { + log.Panic(err) + } + + specSet.Add(launchSpecToMap(spec, rootDeviceName)) + } + return specSet +} + +func launchSpecToMap(l *ec2.SpotFleetLaunchSpecification, rootDevName *string) map[string]interface{} { + m := make(map[string]interface{}) + + m["root_block_device"] = rootBlockDeviceToSet(l.BlockDeviceMappings, rootDevName) + m["ebs_block_device"] = ebsBlockDevicesToSet(l.BlockDeviceMappings, rootDevName) + m["ephemeral_block_device"] = ephemeralBlockDevicesToSet(l.BlockDeviceMappings) + + if l.ImageId != nil { + m["ami"] = aws.StringValue(l.ImageId) + } + + if l.InstanceType != nil { + m["instance_type"] = aws.StringValue(l.InstanceType) + } + + if l.SpotPrice != nil { + m["spot_price"] = aws.StringValue(l.SpotPrice) + } + + if l.EbsOptimized != nil { + m["ebs_optimized"] = aws.BoolValue(l.EbsOptimized) + } + + if l.Monitoring != nil && l.Monitoring.Enabled != nil { + m["monitoring"] = aws.BoolValue(l.Monitoring.Enabled) + } + + if l.IamInstanceProfile != nil && l.IamInstanceProfile.Name != nil { + m["iam_instance_profile"] = aws.StringValue(l.IamInstanceProfile.Name) + } + + if l.UserData != nil { + m["user_data"] = userDataHashSum(aws.StringValue(l.UserData)) + } + + if l.KeyName != nil { + m["key_name"] = aws.StringValue(l.KeyName) + } + + if l.Placement != nil { + m["availability_zone"] = aws.StringValue(l.Placement.AvailabilityZone) + } + + if l.SubnetId != nil { + m["subnet_id"] = aws.StringValue(l.SubnetId) + } + + securityGroupIds := &schema.Set{F: schema.HashString} + if len(l.NetworkInterfaces) > 0 { + m["associate_public_ip_address"] = aws.BoolValue(l.NetworkInterfaces[0].AssociatePublicIpAddress) + m["subnet_id"] = aws.StringValue(l.NetworkInterfaces[0].SubnetId) + + for _, group := range l.NetworkInterfaces[0].Groups { + securityGroupIds.Add(aws.StringValue(group)) + } + } else { + for _, group := range l.SecurityGroups { + securityGroupIds.Add(aws.StringValue(group.GroupId)) + } + } + m["vpc_security_group_ids"] = securityGroupIds + + if l.WeightedCapacity != nil { + m["weighted_capacity"] = strconv.FormatFloat(*l.WeightedCapacity, 'f', 0, 64) + } + + return m +} + +func ebsBlockDevicesToSet(bdm []*ec2.BlockDeviceMapping, rootDevName *string) *schema.Set { + set := &schema.Set{F: hashEbsBlockDevice} + + for _, val := range bdm { + if val.Ebs != nil { + m := make(map[string]interface{}) + + ebs := val.Ebs + + if val.DeviceName != nil { + if aws.StringValue(rootDevName) == aws.StringValue(val.DeviceName) { + continue + } + + m["device_name"] = aws.StringValue(val.DeviceName) + } + + if ebs.DeleteOnTermination != nil { + m["delete_on_termination"] = aws.BoolValue(ebs.DeleteOnTermination) + } + + if ebs.SnapshotId != nil { + m["snapshot_id"] = aws.StringValue(ebs.SnapshotId) + } + + if ebs.Encrypted != nil { + m["encrypted"] = aws.BoolValue(ebs.Encrypted) + } + + if ebs.VolumeSize != nil { + m["volume_size"] = aws.Int64Value(ebs.VolumeSize) + } + + if ebs.VolumeType != nil { + m["volume_type"] = aws.StringValue(ebs.VolumeType) + } + + if ebs.Iops != nil { + m["iops"] = aws.Int64Value(ebs.Iops) + } + + set.Add(m) + } + } + + return set +} + +func ephemeralBlockDevicesToSet(bdm []*ec2.BlockDeviceMapping) *schema.Set { + set := &schema.Set{F: hashEphemeralBlockDevice} + + for _, val := range bdm { + if val.VirtualName != nil { + m := make(map[string]interface{}) + m["virtual_name"] = aws.StringValue(val.VirtualName) + + if val.DeviceName != nil { + m["device_name"] = aws.StringValue(val.DeviceName) + } + + set.Add(m) + } + } + + return set +} + +func rootBlockDeviceToSet( + bdm []*ec2.BlockDeviceMapping, + rootDevName *string, +) *schema.Set { + set := &schema.Set{F: hashRootBlockDevice} + + if rootDevName != nil { + for _, val := range bdm { + if aws.StringValue(val.DeviceName) == aws.StringValue(rootDevName) { + m := make(map[string]interface{}) + if val.Ebs.DeleteOnTermination != nil { + m["delete_on_termination"] = aws.BoolValue(val.Ebs.DeleteOnTermination) + } + + if val.Ebs.VolumeSize != nil { + m["volume_size"] = aws.Int64Value(val.Ebs.VolumeSize) + } + + if val.Ebs.VolumeType != nil { + m["volume_type"] = aws.StringValue(val.Ebs.VolumeType) + } + + if val.Ebs.Iops != nil { + m["iops"] = aws.Int64Value(val.Ebs.Iops) + } + + set.Add(m) + } + } + } + + return set +} + +func resourceAwsSpotFleetRequestUpdate(d *schema.ResourceData, meta interface{}) error { + // http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifySpotFleetRequest.html + conn := meta.(*AWSClient).ec2conn + + d.Partial(true) + + req := &ec2.ModifySpotFleetRequestInput{ + SpotFleetRequestId: aws.String(d.Id()), + } + + if val, ok := d.GetOk("target_capacity"); ok { + req.TargetCapacity = aws.Int64(int64(val.(int))) + } + + if val, ok := d.GetOk("excess_capacity_termination_policy"); ok { + req.ExcessCapacityTerminationPolicy = aws.String(val.(string)) + } + + resp, err := conn.ModifySpotFleetRequest(req) + if err == nil && aws.BoolValue(resp.Return) { + // TODO: rollback to old values? + } + + return nil +} + +func resourceAwsSpotFleetRequestDelete(d *schema.ResourceData, meta interface{}) error { + // http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CancelSpotFleetRequests.html + conn := meta.(*AWSClient).ec2conn + terminateInstances := d.Get("terminate_instances_with_expiration").(bool) + + log.Printf("[INFO] Cancelling spot fleet request: %s", d.Id()) + resp, err := conn.CancelSpotFleetRequests(&ec2.CancelSpotFleetRequestsInput{ + SpotFleetRequestIds: []*string{aws.String(d.Id())}, + TerminateInstances: aws.Bool(terminateInstances), + }) + + if err != nil { + return fmt.Errorf("Error cancelling spot request (%s): %s", d.Id(), err) + } + + // check response successfulFleetRequestSet to make sure our request was canceled + var found bool + for _, s := range resp.SuccessfulFleetRequests { + if *s.SpotFleetRequestId == d.Id() { + found = true + } + } + + if !found { + return fmt.Errorf("[ERR] Spot Fleet request (%s) was not found to be successfully canceled, dangling resources may exit", d.Id()) + } + + // Only wait for instance termination if requested + if !terminateInstances { + return nil + } + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + resp, err := conn.DescribeSpotFleetInstances(&ec2.DescribeSpotFleetInstancesInput{ + SpotFleetRequestId: aws.String(d.Id()), + }) + if err != nil { + return resource.NonRetryableError(err) + } + + if len(resp.ActiveInstances) == 0 { + log.Printf("[DEBUG] Active instance count is 0 for Spot Fleet Request (%s), removing", d.Id()) + return nil + } + + log.Printf("[DEBUG] Active instance count in Spot Fleet Request (%s): %d", d.Id(), len(resp.ActiveInstances)) + + return resource.RetryableError( + fmt.Errorf("fleet still has (%d) running instances", len(resp.ActiveInstances))) + }) +} + +func hashEphemeralBlockDevice(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["virtual_name"].(string))) + return hashcode.String(buf.String()) +} + +func hashRootBlockDevice(v interface{}) int { + // there can be only one root device; no need to hash anything + return 0 +} + +func hashLaunchSpecification(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["ami"].(string))) + if m["availability_zone"] != "" { + buf.WriteString(fmt.Sprintf("%s-", m["availability_zone"].(string))) + } + if m["subnet_id"] != "" { + buf.WriteString(fmt.Sprintf("%s-", m["subnet_id"].(string))) + } + buf.WriteString(fmt.Sprintf("%s-", m["instance_type"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["spot_price"].(string))) + return hashcode.String(buf.String()) +} + +func hashEbsBlockDevice(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + if name, ok := m["device_name"]; ok { + buf.WriteString(fmt.Sprintf("%s-", name.(string))) + } + if id, ok := m["snapshot_id"]; ok { + buf.WriteString(fmt.Sprintf("%s-", id.(string))) + } + return hashcode.String(buf.String()) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_fleet_request_migrate.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_fleet_request_migrate.go new file mode 100644 index 000000000..dea0a32e8 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_fleet_request_migrate.go @@ -0,0 +1,33 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceAwsSpotFleetRequestMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found AWS Spot Fleet Request State v0; migrating to v1") + return migrateSpotFleetRequestV0toV1(is) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateSpotFleetRequestV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty Spot Fleet Request State; nothing to migrate.") + return is, nil + } + + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + is.Attributes["associate_public_ip_address"] = "false" + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_instance_request.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_instance_request.go new file mode 100644 index 000000000..147c88f6d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_spot_instance_request.go @@ -0,0 +1,373 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSpotInstanceRequest() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSpotInstanceRequestCreate, + Read: resourceAwsSpotInstanceRequestRead, + Delete: resourceAwsSpotInstanceRequestDelete, + Update: resourceAwsSpotInstanceRequestUpdate, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: func() map[string]*schema.Schema { + // The Spot Instance Request Schema is based on the AWS Instance schema. + s := resourceAwsInstance().Schema + + // Everything on a spot instance is ForceNew except tags + for k, v := range s { + if k == "tags" { + continue + } + v.ForceNew = true + } + + s["volume_tags"] = &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + } + + s["spot_price"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + } + s["spot_type"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "persistent", + } + s["wait_for_fulfillment"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + } + s["spot_bid_status"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + s["spot_request_state"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + s["spot_instance_id"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + s["block_duration_minutes"] = &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + } + + return s + }(), + } +} + +func resourceAwsSpotInstanceRequestCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + instanceOpts, err := buildAwsInstanceOpts(d, meta) + if err != nil { + return err + } + + spotOpts := &ec2.RequestSpotInstancesInput{ + SpotPrice: aws.String(d.Get("spot_price").(string)), + Type: aws.String(d.Get("spot_type").(string)), + + // Though the AWS API supports creating spot instance requests for multiple + // instances, for TF purposes we fix this to one instance per request. + // Users can get equivalent behavior out of TF's "count" meta-parameter. + InstanceCount: aws.Int64(1), + + LaunchSpecification: &ec2.RequestSpotLaunchSpecification{ + BlockDeviceMappings: instanceOpts.BlockDeviceMappings, + EbsOptimized: instanceOpts.EBSOptimized, + Monitoring: instanceOpts.Monitoring, + IamInstanceProfile: instanceOpts.IAMInstanceProfile, + ImageId: instanceOpts.ImageID, + InstanceType: instanceOpts.InstanceType, + KeyName: instanceOpts.KeyName, + Placement: instanceOpts.SpotPlacement, + SecurityGroupIds: instanceOpts.SecurityGroupIDs, + SecurityGroups: instanceOpts.SecurityGroups, + SubnetId: instanceOpts.SubnetID, + UserData: instanceOpts.UserData64, + }, + } + + if v, ok := d.GetOk("block_duration_minutes"); ok { + spotOpts.BlockDurationMinutes = aws.Int64(int64(v.(int))) + } + + // If the instance is configured with a Network Interface (a subnet, has + // public IP, etc), then the instanceOpts.SecurityGroupIds and SubnetId will + // be nil + if len(instanceOpts.NetworkInterfaces) > 0 { + spotOpts.LaunchSpecification.SecurityGroupIds = instanceOpts.NetworkInterfaces[0].Groups + spotOpts.LaunchSpecification.SubnetId = instanceOpts.NetworkInterfaces[0].SubnetId + } + + // Make the spot instance request + log.Printf("[DEBUG] Requesting spot bid opts: %s", spotOpts) + + var resp *ec2.RequestSpotInstancesOutput + err = resource.Retry(15*time.Second, func() *resource.RetryError { + var err error + resp, err = conn.RequestSpotInstances(spotOpts) + // IAM instance profiles can take ~10 seconds to propagate in AWS: + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console + if isAWSErr(err, "InvalidParameterValue", "Invalid IAM Instance Profile") { + log.Printf("[DEBUG] Invalid IAM Instance Profile referenced, retrying...") + return resource.RetryableError(err) + } + // IAM roles can also take time to propagate in AWS: + if isAWSErr(err, "InvalidParameterValue", " has no associated IAM Roles") { + log.Printf("[DEBUG] IAM Instance Profile appears to have no IAM roles, retrying...") + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + }) + + if err != nil { + return fmt.Errorf("Error requesting spot instances: %s", err) + } + if len(resp.SpotInstanceRequests) != 1 { + return fmt.Errorf( + "Expected response with length 1, got: %s", resp) + } + + sir := *resp.SpotInstanceRequests[0] + d.SetId(*sir.SpotInstanceRequestId) + + if d.Get("wait_for_fulfillment").(bool) { + spotStateConf := &resource.StateChangeConf{ + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html + Pending: []string{"start", "pending-evaluation", "pending-fulfillment"}, + Target: []string{"fulfilled"}, + Refresh: SpotInstanceStateRefreshFunc(conn, sir), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + log.Printf("[DEBUG] waiting for spot bid to resolve... this may take several minutes.") + _, err = spotStateConf.WaitForState() + + if err != nil { + return fmt.Errorf("Error while waiting for spot request (%s) to resolve: %s", sir, err) + } + } + + return resourceAwsSpotInstanceRequestUpdate(d, meta) +} + +// Update spot state, etc +func resourceAwsSpotInstanceRequestRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + req := &ec2.DescribeSpotInstanceRequestsInput{ + SpotInstanceRequestIds: []*string{aws.String(d.Id())}, + } + resp, err := conn.DescribeSpotInstanceRequests(req) + + if err != nil { + // If the spot request was not found, return nil so that we can show + // that it is gone. + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidSpotInstanceRequestID.NotFound" { + d.SetId("") + return nil + } + + // Some other error, report it + return err + } + + // If nothing was found, then return no state + if len(resp.SpotInstanceRequests) == 0 { + d.SetId("") + return nil + } + + request := resp.SpotInstanceRequests[0] + + // if the request is cancelled, then it is gone + if *request.State == "cancelled" { + d.SetId("") + return nil + } + + d.Set("spot_bid_status", *request.Status.Code) + // Instance ID is not set if the request is still pending + if request.InstanceId != nil { + d.Set("spot_instance_id", *request.InstanceId) + // Read the instance data, setting up connection information + if err := readInstance(d, meta); err != nil { + return fmt.Errorf("[ERR] Error reading Spot Instance Data: %s", err) + } + } + + d.Set("spot_request_state", request.State) + d.Set("block_duration_minutes", request.BlockDurationMinutes) + d.Set("tags", tagsToMap(request.Tags)) + + return nil +} + +func readInstance(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + resp, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{ + InstanceIds: []*string{aws.String(d.Get("spot_instance_id").(string))}, + }) + if err != nil { + // If the instance was not found, return nil so that we can show + // that the instance is gone. + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidInstanceID.NotFound" { + return fmt.Errorf("no instance found") + } + + // Some other error, report it + return err + } + + // If nothing was found, then return no state + if len(resp.Reservations) == 0 { + return fmt.Errorf("no instances found") + } + + instance := resp.Reservations[0].Instances[0] + + // Set these fields for connection information + if instance != nil { + d.Set("public_dns", instance.PublicDnsName) + d.Set("public_ip", instance.PublicIpAddress) + d.Set("private_dns", instance.PrivateDnsName) + d.Set("private_ip", instance.PrivateIpAddress) + + // set connection information + if instance.PublicIpAddress != nil { + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": *instance.PublicIpAddress, + }) + } else if instance.PrivateIpAddress != nil { + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": *instance.PrivateIpAddress, + }) + } + if err := readBlockDevices(d, instance, conn); err != nil { + return err + } + + var ipv6Addresses []string + if len(instance.NetworkInterfaces) > 0 { + for _, ni := range instance.NetworkInterfaces { + if *ni.Attachment.DeviceIndex == 0 { + d.Set("subnet_id", ni.SubnetId) + d.Set("network_interface_id", ni.NetworkInterfaceId) + d.Set("associate_public_ip_address", ni.Association != nil) + d.Set("ipv6_address_count", len(ni.Ipv6Addresses)) + + for _, address := range ni.Ipv6Addresses { + ipv6Addresses = append(ipv6Addresses, *address.Ipv6Address) + } + } + } + } else { + d.Set("subnet_id", instance.SubnetId) + d.Set("network_interface_id", "") + } + + if err := d.Set("ipv6_addresses", ipv6Addresses); err != nil { + log.Printf("[WARN] Error setting ipv6_addresses for AWS Spot Instance (%s): %s", d.Id(), err) + } + } + + return nil +} + +func resourceAwsSpotInstanceRequestUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + d.Partial(true) + if err := setTags(conn, d); err != nil { + return err + } else { + d.SetPartial("tags") + } + + d.Partial(false) + + return resourceAwsSpotInstanceRequestRead(d, meta) +} + +func resourceAwsSpotInstanceRequestDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + log.Printf("[INFO] Cancelling spot request: %s", d.Id()) + _, err := conn.CancelSpotInstanceRequests(&ec2.CancelSpotInstanceRequestsInput{ + SpotInstanceRequestIds: []*string{aws.String(d.Id())}, + }) + + if err != nil { + return fmt.Errorf("Error cancelling spot request (%s): %s", d.Id(), err) + } + + if instanceId := d.Get("spot_instance_id").(string); instanceId != "" { + log.Printf("[INFO] Terminating instance: %s", instanceId) + if err := awsTerminateInstance(conn, instanceId, d); err != nil { + return fmt.Errorf("Error terminating spot instance: %s", err) + } + } + + return nil +} + +// SpotInstanceStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// an EC2 spot instance request +func SpotInstanceStateRefreshFunc( + conn *ec2.EC2, sir ec2.SpotInstanceRequest) resource.StateRefreshFunc { + + return func() (interface{}, string, error) { + resp, err := conn.DescribeSpotInstanceRequests(&ec2.DescribeSpotInstanceRequestsInput{ + SpotInstanceRequestIds: []*string{sir.SpotInstanceRequestId}, + }) + + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidSpotInstanceRequestID.NotFound" { + // Set this to nil as if we didn't find anything. + resp = nil + } else { + log.Printf("Error on StateRefresh: %s", err) + return nil, "", err + } + } + + if resp == nil || len(resp.SpotInstanceRequests) == 0 { + // Sometimes AWS just has consistency issues and doesn't see + // our request yet. Return an empty state. + return nil, "", nil + } + + req := resp.SpotInstanceRequests[0] + return req, *req.Status.Code, nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sqs_queue.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sqs_queue.go new file mode 100644 index 000000000..b7ce4c52b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sqs_queue.go @@ -0,0 +1,297 @@ +package aws + +import ( + "fmt" + "log" + "net/url" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/sqs" +) + +var AttributeMap = map[string]string{ + "delay_seconds": "DelaySeconds", + "max_message_size": "MaximumMessageSize", + "message_retention_seconds": "MessageRetentionPeriod", + "receive_wait_time_seconds": "ReceiveMessageWaitTimeSeconds", + "visibility_timeout_seconds": "VisibilityTimeout", + "policy": "Policy", + "redrive_policy": "RedrivePolicy", + "arn": "QueueArn", + "fifo_queue": "FifoQueue", + "content_based_deduplication": "ContentBasedDeduplication", +} + +// A number of these are marked as computed because if you don't +// provide a value, SQS will provide you with defaults (which are the +// default values specified below) +func resourceAwsSqsQueue() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSqsQueueCreate, + Read: resourceAwsSqsQueueRead, + Update: resourceAwsSqsQueueUpdate, + Delete: resourceAwsSqsQueueDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "delay_seconds": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + }, + "max_message_size": { + Type: schema.TypeInt, + Optional: true, + Default: 262144, + }, + "message_retention_seconds": { + Type: schema.TypeInt, + Optional: true, + Default: 345600, + }, + "receive_wait_time_seconds": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + }, + "visibility_timeout_seconds": { + Type: schema.TypeInt, + Optional: true, + Default: 30, + }, + "policy": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateJsonString, + DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, + }, + "redrive_policy": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateJsonString, + StateFunc: func(v interface{}) string { + json, _ := normalizeJsonString(v) + return json + }, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "fifo_queue": { + Type: schema.TypeBool, + Default: false, + ForceNew: true, + Optional: true, + }, + "content_based_deduplication": { + Type: schema.TypeBool, + Default: false, + Optional: true, + }, + }, + } +} + +func resourceAwsSqsQueueCreate(d *schema.ResourceData, meta interface{}) error { + sqsconn := meta.(*AWSClient).sqsconn + + name := d.Get("name").(string) + fq := d.Get("fifo_queue").(bool) + cbd := d.Get("content_based_deduplication").(bool) + + if fq { + if errors := validateSQSFifoQueueName(name, "name"); len(errors) > 0 { + return fmt.Errorf("Error validating the FIFO queue name: %v", errors) + } + } else { + if errors := validateSQSQueueName(name, "name"); len(errors) > 0 { + return fmt.Errorf("Error validating SQS queue name: %v", errors) + } + } + + if !fq && cbd { + return fmt.Errorf("Content based deduplication can only be set with FIFO queues") + } + + log.Printf("[DEBUG] SQS queue create: %s", name) + + req := &sqs.CreateQueueInput{ + QueueName: aws.String(name), + } + + attributes := make(map[string]*string) + + resource := *resourceAwsSqsQueue() + + for k, s := range resource.Schema { + if attrKey, ok := AttributeMap[k]; ok { + if value, ok := d.GetOk(k); ok { + switch s.Type { + case schema.TypeInt: + attributes[attrKey] = aws.String(strconv.Itoa(value.(int))) + case schema.TypeBool: + attributes[attrKey] = aws.String(strconv.FormatBool(value.(bool))) + default: + attributes[attrKey] = aws.String(value.(string)) + } + } + + } + } + + if len(attributes) > 0 { + req.Attributes = attributes + } + + output, err := sqsconn.CreateQueue(req) + if err != nil { + return fmt.Errorf("Error creating SQS queue: %s", err) + } + + d.SetId(*output.QueueUrl) + + return resourceAwsSqsQueueUpdate(d, meta) +} + +func resourceAwsSqsQueueUpdate(d *schema.ResourceData, meta interface{}) error { + sqsconn := meta.(*AWSClient).sqsconn + attributes := make(map[string]*string) + + resource := *resourceAwsSqsQueue() + + for k, s := range resource.Schema { + if attrKey, ok := AttributeMap[k]; ok { + if d.HasChange(k) { + log.Printf("[DEBUG] Updating %s", attrKey) + _, n := d.GetChange(k) + switch s.Type { + case schema.TypeInt: + attributes[attrKey] = aws.String(strconv.Itoa(n.(int))) + case schema.TypeBool: + attributes[attrKey] = aws.String(strconv.FormatBool(n.(bool))) + default: + attributes[attrKey] = aws.String(n.(string)) + } + } + } + } + + if len(attributes) > 0 { + req := &sqs.SetQueueAttributesInput{ + QueueUrl: aws.String(d.Id()), + Attributes: attributes, + } + if _, err := sqsconn.SetQueueAttributes(req); err != nil { + return fmt.Errorf("[ERR] Error updating SQS attributes: %s", err) + } + } + + return resourceAwsSqsQueueRead(d, meta) +} + +func resourceAwsSqsQueueRead(d *schema.ResourceData, meta interface{}) error { + sqsconn := meta.(*AWSClient).sqsconn + + attributeOutput, err := sqsconn.GetQueueAttributes(&sqs.GetQueueAttributesInput{ + QueueUrl: aws.String(d.Id()), + AttributeNames: []*string{aws.String("All")}, + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + log.Printf("ERROR Found %s", awsErr.Code()) + if "AWS.SimpleQueueService.NonExistentQueue" == awsErr.Code() { + d.SetId("") + log.Printf("[DEBUG] SQS Queue (%s) not found", d.Get("name").(string)) + return nil + } + } + return err + } + + name, err := extractNameFromSqsQueueUrl(d.Id()) + if err != nil { + return err + } + d.Set("name", name) + + if attributeOutput.Attributes != nil && len(attributeOutput.Attributes) > 0 { + attrmap := attributeOutput.Attributes + resource := *resourceAwsSqsQueue() + // iKey = internal struct key, oKey = AWS Attribute Map key + for iKey, oKey := range AttributeMap { + if attrmap[oKey] != nil { + switch resource.Schema[iKey].Type { + case schema.TypeInt: + value, err := strconv.Atoi(*attrmap[oKey]) + if err != nil { + return err + } + d.Set(iKey, value) + log.Printf("[DEBUG] Reading %s => %s -> %d", iKey, oKey, value) + case schema.TypeBool: + value, err := strconv.ParseBool(*attrmap[oKey]) + if err != nil { + return err + } + d.Set(iKey, value) + log.Printf("[DEBUG] Reading %s => %s -> %t", iKey, oKey, value) + default: + log.Printf("[DEBUG] Reading %s => %s -> %s", iKey, oKey, *attrmap[oKey]) + d.Set(iKey, *attrmap[oKey]) + } + } + } + } + + // Since AWS does not send the FifoQueue attribute back when the queue + // is a standard one (even to false), this enforces the queue to be set + // to the correct value. + d.Set("fifo_queue", d.Get("fifo_queue").(bool)) + d.Set("content_based_deduplication", d.Get("content_based_deduplication").(bool)) + + return nil +} + +func resourceAwsSqsQueueDelete(d *schema.ResourceData, meta interface{}) error { + sqsconn := meta.(*AWSClient).sqsconn + + log.Printf("[DEBUG] SQS Delete Queue: %s", d.Id()) + _, err := sqsconn.DeleteQueue(&sqs.DeleteQueueInput{ + QueueUrl: aws.String(d.Id()), + }) + if err != nil { + return err + } + return nil +} + +func extractNameFromSqsQueueUrl(queue string) (string, error) { + //http://sqs.us-west-2.amazonaws.com/123456789012/queueName + u, err := url.Parse(queue) + if err != nil { + return "", err + } + segments := strings.Split(u.Path, "/") + if len(segments) != 3 { + return "", fmt.Errorf("SQS Url not parsed correctly") + } + + return segments[2], nil + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sqs_queue_policy.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sqs_queue_policy.go new file mode 100644 index 000000000..343249799 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_sqs_queue_policy.go @@ -0,0 +1,100 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/sqs" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSqsQueuePolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSqsQueuePolicyUpsert, + Read: resourceAwsSqsQueuePolicyRead, + Update: resourceAwsSqsQueuePolicyUpsert, + Delete: resourceAwsSqsQueuePolicyDelete, + + Schema: map[string]*schema.Schema{ + "queue_url": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "policy": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validateJsonString, + DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, + }, + }, + } +} + +func resourceAwsSqsQueuePolicyUpsert(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sqsconn + url := d.Get("queue_url").(string) + + _, err := conn.SetQueueAttributes(&sqs.SetQueueAttributesInput{ + QueueUrl: aws.String(url), + Attributes: aws.StringMap(map[string]string{ + "Policy": d.Get("policy").(string), + }), + }) + if err != nil { + return fmt.Errorf("Error updating SQS attributes: %s", err) + } + + d.SetId("sqs-policy-" + url) + + return resourceAwsSqsQueuePolicyRead(d, meta) +} + +func resourceAwsSqsQueuePolicyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sqsconn + url := d.Get("queue_url").(string) + out, err := conn.GetQueueAttributes(&sqs.GetQueueAttributesInput{ + QueueUrl: aws.String(url), + AttributeNames: []*string{aws.String("Policy")}, + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "AWS.SimpleQueueService.NonExistentQueue" { + log.Printf("[WARN] SQS Queue (%s) not found", d.Id()) + d.SetId("") + return nil + } + return err + } + if out == nil { + return fmt.Errorf("Received empty response for SQS queue %s", d.Id()) + } + + policy, ok := out.Attributes["Policy"] + if !ok { + return fmt.Errorf("SQS Queue policy not found for %s", d.Id()) + } + + d.Set("policy", policy) + + return nil +} + +func resourceAwsSqsQueuePolicyDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sqsconn + + url := d.Get("queue_url").(string) + log.Printf("[DEBUG] Deleting SQS Queue Policy of %s", url) + _, err := conn.SetQueueAttributes(&sqs.SetQueueAttributesInput{ + QueueUrl: aws.String(url), + Attributes: aws.StringMap(map[string]string{ + "Policy": "", + }), + }) + if err != nil { + return fmt.Errorf("Error deleting SQS Queue policy: %s", err) + } + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_activation.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_activation.go new file mode 100644 index 000000000..9cceda4ae --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_activation.go @@ -0,0 +1,168 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSsmActivation() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSsmActivationCreate, + Read: resourceAwsSsmActivationRead, + Delete: resourceAwsSsmActivationDelete, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "expired": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "expiration_date": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "iam_role": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "registration_limit": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "registration_count": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func resourceAwsSsmActivationCreate(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[DEBUG] SSM activation create: %s", d.Id()) + + activationInput := &ssm.CreateActivationInput{ + IamRole: aws.String(d.Get("name").(string)), + } + + if _, ok := d.GetOk("name"); ok { + activationInput.DefaultInstanceName = aws.String(d.Get("name").(string)) + } + + if _, ok := d.GetOk("description"); ok { + activationInput.Description = aws.String(d.Get("description").(string)) + } + + if _, ok := d.GetOk("expiration_date"); ok { + activationInput.ExpirationDate = aws.Time(d.Get("expiration_date").(time.Time)) + } + + if _, ok := d.GetOk("iam_role"); ok { + activationInput.IamRole = aws.String(d.Get("iam_role").(string)) + } + + if _, ok := d.GetOk("registration_limit"); ok { + activationInput.RegistrationLimit = aws.Int64(int64(d.Get("registration_limit").(int))) + } + + // Retry to allow iam_role to be created and policy attachment to take place + var resp *ssm.CreateActivationOutput + err := resource.Retry(30*time.Second, func() *resource.RetryError { + var err error + + resp, err = ssmconn.CreateActivation(activationInput) + + if err != nil { + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + }) + + if err != nil { + return errwrap.Wrapf("[ERROR] Error creating SSM activation: {{err}}", err) + } + + if resp.ActivationId == nil { + return fmt.Errorf("[ERROR] ActivationId was nil") + } + d.SetId(*resp.ActivationId) + + return resourceAwsSsmActivationRead(d, meta) +} + +func resourceAwsSsmActivationRead(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[DEBUG] Reading SSM Activation: %s", d.Id()) + + params := &ssm.DescribeActivationsInput{ + Filters: []*ssm.DescribeActivationsFilter{ + { + FilterKey: aws.String("ActivationIds"), + FilterValues: []*string{ + aws.String(d.Id()), + }, + }, + }, + MaxResults: aws.Int64(1), + } + + resp, err := ssmconn.DescribeActivations(params) + + if err != nil { + return errwrap.Wrapf("[ERROR] Error reading SSM activation: {{err}}", err) + } + if resp.ActivationList == nil || len(resp.ActivationList) == 0 { + return fmt.Errorf("[ERROR] ActivationList was nil or empty") + } + + activation := resp.ActivationList[0] // Only 1 result as MaxResults is 1 above + d.Set("name", activation.DefaultInstanceName) + d.Set("description", activation.Description) + d.Set("expiration_date", activation.ExpirationDate) + d.Set("expired", activation.Expired) + d.Set("iam_role", activation.IamRole) + d.Set("registration_limit", activation.RegistrationLimit) + d.Set("registration_count", activation.RegistrationsCount) + + return nil +} + +func resourceAwsSsmActivationDelete(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[DEBUG] Deleting SSM Activation: %s", d.Id()) + + params := &ssm.DeleteActivationInput{ + ActivationId: aws.String(d.Id()), + } + + _, err := ssmconn.DeleteActivation(params) + + if err != nil { + return errwrap.Wrapf("[ERROR] Error deleting SSM activation: {{err}}", err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_association.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_association.go new file mode 100644 index 000000000..be83ce767 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_association.go @@ -0,0 +1,158 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSsmAssociation() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSsmAssociationCreate, + Read: resourceAwsSsmAssociationRead, + Delete: resourceAwsSsmAssociationDelete, + + Schema: map[string]*schema.Schema{ + "association_id": { + Type: schema.TypeString, + Computed: true, + }, + "instance_id": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "name": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "parameters": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Computed: true, + }, + "targets": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + } +} + +func resourceAwsSsmAssociationCreate(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[DEBUG] SSM association create: %s", d.Id()) + + assosciationInput := &ssm.CreateAssociationInput{ + Name: aws.String(d.Get("name").(string)), + } + + if v, ok := d.GetOk("instance_id"); ok { + assosciationInput.InstanceId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("parameters"); ok { + assosciationInput.Parameters = expandSSMDocumentParameters(v.(map[string]interface{})) + } + + if _, ok := d.GetOk("targets"); ok { + assosciationInput.Targets = expandAwsSsmTargets(d) + } + + resp, err := ssmconn.CreateAssociation(assosciationInput) + if err != nil { + return errwrap.Wrapf("[ERROR] Error creating SSM association: {{err}}", err) + } + + if resp.AssociationDescription == nil { + return fmt.Errorf("[ERROR] AssociationDescription was nil") + } + + d.SetId(*resp.AssociationDescription.Name) + d.Set("association_id", resp.AssociationDescription.AssociationId) + + return resourceAwsSsmAssociationRead(d, meta) +} + +func resourceAwsSsmAssociationRead(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[DEBUG] Reading SSM Association: %s", d.Id()) + + params := &ssm.DescribeAssociationInput{ + AssociationId: aws.String(d.Get("association_id").(string)), + } + + resp, err := ssmconn.DescribeAssociation(params) + + if err != nil { + return errwrap.Wrapf("[ERROR] Error reading SSM association: {{err}}", err) + } + if resp.AssociationDescription == nil { + return fmt.Errorf("[ERROR] AssociationDescription was nil") + } + + association := resp.AssociationDescription + d.Set("instance_id", association.InstanceId) + d.Set("name", association.Name) + d.Set("parameters", association.Parameters) + d.Set("association_id", association.AssociationId) + + if err := d.Set("targets", flattenAwsSsmTargets(association.Targets)); err != nil { + return fmt.Errorf("[DEBUG] Error setting targets error: %#v", err) + } + + return nil +} + +func resourceAwsSsmAssociationDelete(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[DEBUG] Deleting SSM Assosciation: %s", d.Id()) + + params := &ssm.DeleteAssociationInput{ + AssociationId: aws.String(d.Get("association_id").(string)), + } + + _, err := ssmconn.DeleteAssociation(params) + + if err != nil { + return errwrap.Wrapf("[ERROR] Error deleting SSM association: {{err}}", err) + } + + return nil +} + +func expandSSMDocumentParameters(params map[string]interface{}) map[string][]*string { + var docParams = make(map[string][]*string) + for k, v := range params { + values := make([]*string, 1) + values[0] = aws.String(v.(string)) + docParams[k] = values + } + + return docParams +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_document.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_document.go new file mode 100644 index 000000000..ad266d2bf --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_document.go @@ -0,0 +1,481 @@ +package aws + +import ( + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +const ( + MINIMUM_VERSIONED_SCHEMA = 2.0 +) + +func resourceAwsSsmDocument() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSsmDocumentCreate, + Read: resourceAwsSsmDocumentRead, + Update: resourceAwsSsmDocumentUpdate, + Delete: resourceAwsSsmDocumentDelete, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "content": { + Type: schema.TypeString, + Required: true, + }, + "document_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAwsSSMDocumentType, + }, + "schema_version": { + Type: schema.TypeString, + Computed: true, + }, + "created_date": { + Type: schema.TypeString, + Computed: true, + }, + "default_version": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "hash": { + Type: schema.TypeString, + Computed: true, + }, + "hash_type": { + Type: schema.TypeString, + Computed: true, + }, + "latest_version": { + Type: schema.TypeString, + Computed: true, + }, + "owner": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "platform_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "parameter": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + }, + "default_value": { + Type: schema.TypeString, + Optional: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "permissions": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + }, + "account_ids": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsSsmDocumentCreate(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[INFO] Creating SSM Document: %s", d.Get("name").(string)) + + docInput := &ssm.CreateDocumentInput{ + Name: aws.String(d.Get("name").(string)), + Content: aws.String(d.Get("content").(string)), + DocumentType: aws.String(d.Get("document_type").(string)), + } + + log.Printf("[DEBUG] Waiting for SSM Document %q to be created", d.Get("name").(string)) + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + resp, err := ssmconn.CreateDocument(docInput) + + if err != nil { + return resource.NonRetryableError(err) + } + + d.SetId(*resp.DocumentDescription.Name) + return nil + }) + + if err != nil { + return errwrap.Wrapf("[ERROR] Error creating SSM document: {{err}}", err) + } + + if v, ok := d.GetOk("permissions"); ok && v != nil { + if err := setDocumentPermissions(d, meta); err != nil { + return err + } + } else { + log.Printf("[DEBUG] Not setting permissions for %q", d.Id()) + } + + return resourceAwsSsmDocumentRead(d, meta) +} + +func resourceAwsSsmDocumentRead(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[DEBUG] Reading SSM Document: %s", d.Id()) + + docInput := &ssm.DescribeDocumentInput{ + Name: aws.String(d.Get("name").(string)), + } + + resp, err := ssmconn.DescribeDocument(docInput) + if err != nil { + if ssmErr, ok := err.(awserr.Error); ok && ssmErr.Code() == "InvalidDocument" { + log.Printf("[WARN] SSM Document not found so removing from state") + d.SetId("") + return nil + } + return errwrap.Wrapf("[ERROR] Error describing SSM document: {{err}}", err) + } + + doc := resp.Document + d.Set("created_date", doc.CreatedDate) + d.Set("default_version", doc.DefaultVersion) + d.Set("description", doc.Description) + d.Set("schema_version", doc.SchemaVersion) + + if _, ok := d.GetOk("document_type"); ok { + d.Set("document_type", doc.DocumentType) + } + + d.Set("document_version", doc.DocumentVersion) + d.Set("hash", doc.Hash) + d.Set("hash_type", doc.HashType) + d.Set("latest_version", doc.LatestVersion) + d.Set("name", doc.Name) + d.Set("owner", doc.Owner) + d.Set("platform_types", flattenStringList(doc.PlatformTypes)) + if err := d.Set("arn", flattenAwsSsmDocumentArn(meta, doc.Name)); err != nil { + return fmt.Errorf("[DEBUG] Error setting arn error: %#v", err) + } + + d.Set("status", doc.Status) + + gp, err := getDocumentPermissions(d, meta) + + if err != nil { + return errwrap.Wrapf("[ERROR] Error reading SSM document permissions: {{err}}", err) + } + + d.Set("permissions", gp) + + params := make([]map[string]interface{}, 0) + for i := 0; i < len(doc.Parameters); i++ { + + dp := doc.Parameters[i] + param := make(map[string]interface{}) + + if dp.DefaultValue != nil { + param["default_value"] = *dp.DefaultValue + } + if dp.Description != nil { + param["description"] = *dp.Description + } + if dp.Name != nil { + param["name"] = *dp.Name + } + if dp.Type != nil { + param["type"] = *dp.Type + } + params = append(params, param) + } + + if len(params) == 0 { + params = make([]map[string]interface{}, 1) + } + + if err := d.Set("parameter", params); err != nil { + return err + } + + return nil +} + +func flattenAwsSsmDocumentArn(meta interface{}, docName *string) string { + region := meta.(*AWSClient).region + + return fmt.Sprintf("arn:aws:ssm:%s::document/%s", region, *docName) +} + +func resourceAwsSsmDocumentUpdate(d *schema.ResourceData, meta interface{}) error { + + if _, ok := d.GetOk("permissions"); ok { + if err := setDocumentPermissions(d, meta); err != nil { + return err + } + } else { + log.Printf("[DEBUG] Not setting document permissions on %q", d.Id()) + } + + if !d.HasChange("content") { + return nil + } + + if schemaVersion, ok := d.GetOk("schemaVersion"); ok { + schemaNumber, _ := strconv.ParseFloat(schemaVersion.(string), 64) + + if schemaNumber < MINIMUM_VERSIONED_SCHEMA { + log.Printf("[DEBUG] Skipping document update because document version is not 2.0 %q", d.Id()) + return nil + } + } + + if err := updateAwsSSMDocument(d, meta); err != nil { + return err + } + + return resourceAwsSsmDocumentRead(d, meta) +} + +func resourceAwsSsmDocumentDelete(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + if err := deleteDocumentPermissions(d, meta); err != nil { + return err + } + + log.Printf("[INFO] Deleting SSM Document: %s", d.Id()) + + params := &ssm.DeleteDocumentInput{ + Name: aws.String(d.Get("name").(string)), + } + + _, err := ssmconn.DeleteDocument(params) + if err != nil { + return err + } + + log.Printf("[DEBUG] Waiting for SSM Document %q to be deleted", d.Get("name").(string)) + err = resource.Retry(10*time.Minute, func() *resource.RetryError { + _, err := ssmconn.DescribeDocument(&ssm.DescribeDocumentInput{ + Name: aws.String(d.Get("name").(string)), + }) + + if err != nil { + awsErr, ok := err.(awserr.Error) + if !ok { + return resource.NonRetryableError(err) + } + + if awsErr.Code() == "InvalidDocument" { + return nil + } + + return resource.NonRetryableError(err) + } + + return resource.RetryableError( + fmt.Errorf("%q: Timeout while waiting for the document to be deleted", d.Id())) + }) + if err != nil { + return err + } + + d.SetId("") + + return nil +} + +func setDocumentPermissions(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[INFO] Setting permissions for document: %s", d.Id()) + permission := d.Get("permissions").(map[string]interface{}) + + ids := aws.StringSlice([]string{permission["account_ids"].(string)}) + + if strings.Contains(permission["account_ids"].(string), ",") { + ids = aws.StringSlice(strings.Split(permission["account_ids"].(string), ",")) + } + + permInput := &ssm.ModifyDocumentPermissionInput{ + Name: aws.String(d.Get("name").(string)), + PermissionType: aws.String(permission["type"].(string)), + AccountIdsToAdd: ids, + } + + _, err := ssmconn.ModifyDocumentPermission(permInput) + + if err != nil { + return errwrap.Wrapf("[ERROR] Error setting permissions for SSM document: {{err}}", err) + } + + return nil +} + +func getDocumentPermissions(d *schema.ResourceData, meta interface{}) (map[string]interface{}, error) { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[INFO] Getting permissions for document: %s", d.Id()) + + //How to get from nested scheme resource? + permissionType := "Share" + + permInput := &ssm.DescribeDocumentPermissionInput{ + Name: aws.String(d.Get("name").(string)), + PermissionType: aws.String(permissionType), + } + + resp, err := ssmconn.DescribeDocumentPermission(permInput) + + if err != nil { + return nil, errwrap.Wrapf("[ERROR] Error setting permissions for SSM document: {{err}}", err) + } + + var account_ids = make([]string, len(resp.AccountIds)) + for i := 0; i < len(resp.AccountIds); i++ { + account_ids[i] = *resp.AccountIds[i] + } + + var ids = "" + if len(account_ids) == 1 { + ids = account_ids[0] + } else if len(account_ids) > 1 { + ids = strings.Join(account_ids, ",") + } else { + ids = "" + } + + if ids == "" { + return nil, nil + } + + perms := make(map[string]interface{}) + perms["type"] = permissionType + perms["account_ids"] = ids + + return perms, nil +} + +func deleteDocumentPermissions(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[INFO] Removing permissions from document: %s", d.Id()) + + permInput := &ssm.ModifyDocumentPermissionInput{ + Name: aws.String(d.Get("name").(string)), + PermissionType: aws.String("Share"), + AccountIdsToRemove: aws.StringSlice(strings.Split("all", ",")), + } + + _, err := ssmconn.ModifyDocumentPermission(permInput) + + if err != nil { + return errwrap.Wrapf("[ERROR] Error removing permissions for SSM document: {{err}}", err) + } + + return nil +} + +func updateAwsSSMDocument(d *schema.ResourceData, meta interface{}) error { + log.Printf("[INFO] Updating SSM Document: %s", d.Id()) + + name := d.Get("name").(string) + + updateDocInput := &ssm.UpdateDocumentInput{ + Name: aws.String(name), + Content: aws.String(d.Get("content").(string)), + DocumentVersion: aws.String(d.Get("default_version").(string)), + } + + newDefaultVersion := d.Get("default_version").(string) + + ssmconn := meta.(*AWSClient).ssmconn + updated, err := ssmconn.UpdateDocument(updateDocInput) + + if isAWSErr(err, "DuplicateDocumentContent", "") { + log.Printf("[DEBUG] Content is a duplicate of the latest version so update is not necessary: %s", d.Id()) + log.Printf("[INFO] Updating the default version to the latest version %s: %s", newDefaultVersion, d.Id()) + + newDefaultVersion = d.Get("latest_version").(string) + } else if err != nil { + return errwrap.Wrapf("Error updating SSM document: {{err}}", err) + } else { + log.Printf("[INFO] Updating the default version to the new version %s: %s", newDefaultVersion, d.Id()) + newDefaultVersion = *updated.DocumentDescription.DocumentVersion + } + + updateDefaultInput := &ssm.UpdateDocumentDefaultVersionInput{ + Name: aws.String(name), + DocumentVersion: aws.String(newDefaultVersion), + } + + _, err = ssmconn.UpdateDocumentDefaultVersion(updateDefaultInput) + + if err != nil { + return errwrap.Wrapf("Error updating the default document version to that of the updated document: {{err}}", err) + } + return nil +} + +func validateAwsSSMDocumentType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + types := map[string]bool{ + "Command": true, + "Policy": true, + "Automation": true, + } + + if !types[value] { + errors = append(errors, fmt.Errorf("Document type %s is invalid. Valid types are Command, Policy or Automation", value)) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window.go new file mode 100644 index 000000000..5ce566778 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window.go @@ -0,0 +1,150 @@ +package aws + +import ( + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSsmMaintenanceWindow() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSsmMaintenanceWindowCreate, + Read: resourceAwsSsmMaintenanceWindowRead, + Update: resourceAwsSsmMaintenanceWindowUpdate, + Delete: resourceAwsSsmMaintenanceWindowDelete, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "schedule": { + Type: schema.TypeString, + Required: true, + }, + + "duration": { + Type: schema.TypeInt, + Required: true, + }, + + "cutoff": { + Type: schema.TypeInt, + Required: true, + }, + + "allow_unassociated_targets": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + }, + } +} + +func resourceAwsSsmMaintenanceWindowCreate(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + params := &ssm.CreateMaintenanceWindowInput{ + Name: aws.String(d.Get("name").(string)), + Schedule: aws.String(d.Get("schedule").(string)), + Duration: aws.Int64(int64(d.Get("duration").(int))), + Cutoff: aws.Int64(int64(d.Get("cutoff").(int))), + AllowUnassociatedTargets: aws.Bool(d.Get("allow_unassociated_targets").(bool)), + } + + resp, err := ssmconn.CreateMaintenanceWindow(params) + if err != nil { + return err + } + + d.SetId(*resp.WindowId) + return resourceAwsSsmMaintenanceWindowRead(d, meta) +} + +func resourceAwsSsmMaintenanceWindowUpdate(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + params := &ssm.UpdateMaintenanceWindowInput{ + WindowId: aws.String(d.Id()), + } + + if d.HasChange("name") { + params.Name = aws.String(d.Get("name").(string)) + } + + if d.HasChange("schedule") { + params.Schedule = aws.String(d.Get("schedule").(string)) + } + + if d.HasChange("duration") { + params.Duration = aws.Int64(int64(d.Get("duration").(int))) + } + + if d.HasChange("cutoff") { + params.Cutoff = aws.Int64(int64(d.Get("cutoff").(int))) + } + + if d.HasChange("allow_unassociated_targets") { + params.AllowUnassociatedTargets = aws.Bool(d.Get("allow_unassociated_targets").(bool)) + } + + if d.HasChange("enabled") { + params.Enabled = aws.Bool(d.Get("enabled").(bool)) + } + + _, err := ssmconn.UpdateMaintenanceWindow(params) + if err != nil { + return err + } + + return resourceAwsSsmMaintenanceWindowRead(d, meta) +} + +func resourceAwsSsmMaintenanceWindowRead(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + params := &ssm.GetMaintenanceWindowInput{ + WindowId: aws.String(d.Id()), + } + + resp, err := ssmconn.GetMaintenanceWindow(params) + if err != nil { + return err + } + + d.Set("name", resp.Name) + d.Set("cutoff", resp.Cutoff) + d.Set("duration", resp.Duration) + d.Set("enabled", resp.Enabled) + d.Set("allow_unassociated_targets", resp.AllowUnassociatedTargets) + d.Set("schedule", resp.Schedule) + + return nil +} + +func resourceAwsSsmMaintenanceWindowDelete(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[INFO] Deleting SSM Maintenance Window: %s", d.Id()) + + params := &ssm.DeleteMaintenanceWindowInput{ + WindowId: aws.String(d.Id()), + } + + _, err := ssmconn.DeleteMaintenanceWindow(params) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window_target.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window_target.go new file mode 100644 index 000000000..c460ca6e3 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window_target.go @@ -0,0 +1,143 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSsmMaintenanceWindowTarget() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSsmMaintenanceWindowTargetCreate, + Read: resourceAwsSsmMaintenanceWindowTargetRead, + Delete: resourceAwsSsmMaintenanceWindowTargetDelete, + + Schema: map[string]*schema.Schema{ + "window_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "resource_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "targets": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + + "owner_information": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + }, + } +} + +func resourceAwsSsmMaintenanceWindowTargetCreate(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[INFO] Registering SSM Maintenance Window Target") + + params := &ssm.RegisterTargetWithMaintenanceWindowInput{ + WindowId: aws.String(d.Get("window_id").(string)), + ResourceType: aws.String(d.Get("resource_type").(string)), + Targets: expandAwsSsmTargets(d), + } + + if v, ok := d.GetOk("owner_information"); ok { + params.OwnerInformation = aws.String(v.(string)) + } + + resp, err := ssmconn.RegisterTargetWithMaintenanceWindow(params) + if err != nil { + return err + } + + d.SetId(*resp.WindowTargetId) + + return resourceAwsSsmMaintenanceWindowTargetRead(d, meta) +} + +func resourceAwsSsmMaintenanceWindowTargetRead(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + params := &ssm.DescribeMaintenanceWindowTargetsInput{ + WindowId: aws.String(d.Get("window_id").(string)), + Filters: []*ssm.MaintenanceWindowFilter{ + { + Key: aws.String("WindowTargetId"), + Values: []*string{aws.String(d.Id())}, + }, + }, + } + + resp, err := ssmconn.DescribeMaintenanceWindowTargets(params) + if err != nil { + return err + } + + found := false + for _, t := range resp.Targets { + if *t.WindowTargetId == d.Id() { + found = true + + d.Set("owner_information", t.OwnerInformation) + d.Set("window_id", t.WindowId) + d.Set("resource_type", t.ResourceType) + + if err := d.Set("targets", flattenAwsSsmTargets(t.Targets)); err != nil { + return fmt.Errorf("[DEBUG] Error setting targets error: %#v", err) + } + } + } + + if !found { + log.Printf("[INFO] Maintenance Window Target not found. Removing from state") + d.SetId("") + return nil + } + + return nil +} + +func resourceAwsSsmMaintenanceWindowTargetDelete(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[INFO] Deregistering SSM Maintenance Window Target: %s", d.Id()) + + params := &ssm.DeregisterTargetFromMaintenanceWindowInput{ + WindowId: aws.String(d.Get("window_id").(string)), + WindowTargetId: aws.String(d.Id()), + } + + _, err := ssmconn.DeregisterTargetFromMaintenanceWindow(params) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window_task.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window_task.go new file mode 100644 index 000000000..1931d385a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_maintenance_window_task.go @@ -0,0 +1,283 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSsmMaintenanceWindowTask() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSsmMaintenanceWindowTaskCreate, + Read: resourceAwsSsmMaintenanceWindowTaskRead, + Delete: resourceAwsSsmMaintenanceWindowTaskDelete, + + Schema: map[string]*schema.Schema{ + "window_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "max_concurrency": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "max_errors": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "task_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "task_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "service_role_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "targets": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + + "priority": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "logging_info": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "s3_bucket_name": { + Type: schema.TypeString, + Required: true, + }, + "s3_region": { + Type: schema.TypeString, + Required: true, + }, + "s3_bucket_prefix": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "task_parameters": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + } +} + +func expandAwsSsmMaintenanceWindowLoggingInfo(config []interface{}) *ssm.LoggingInfo { + + loggingConfig := config[0].(map[string]interface{}) + + loggingInfo := &ssm.LoggingInfo{ + S3BucketName: aws.String(loggingConfig["s3_bucket_name"].(string)), + S3Region: aws.String(loggingConfig["s3_region"].(string)), + } + + if s := loggingConfig["s3_bucket_prefix"].(string); s != "" { + loggingInfo.S3KeyPrefix = aws.String(s) + } + + return loggingInfo +} + +func flattenAwsSsmMaintenanceWindowLoggingInfo(loggingInfo *ssm.LoggingInfo) []interface{} { + + result := make(map[string]interface{}) + result["s3_bucket_name"] = *loggingInfo.S3BucketName + result["s3_region"] = *loggingInfo.S3Region + + if loggingInfo.S3KeyPrefix != nil { + result["s3_bucket_prefix"] = *loggingInfo.S3KeyPrefix + } + + return []interface{}{result} +} + +func expandAwsSsmTaskParameters(config []interface{}) map[string]*ssm.MaintenanceWindowTaskParameterValueExpression { + params := make(map[string]*ssm.MaintenanceWindowTaskParameterValueExpression) + for _, v := range config { + paramConfig := v.(map[string]interface{}) + params[paramConfig["name"].(string)] = &ssm.MaintenanceWindowTaskParameterValueExpression{ + Values: expandStringList(paramConfig["values"].([]interface{})), + } + } + return params +} + +func flattenAwsSsmTaskParameters(taskParameters map[string]*ssm.MaintenanceWindowTaskParameterValueExpression) []interface{} { + result := make([]interface{}, 0, len(taskParameters)) + for k, v := range taskParameters { + taskParam := map[string]interface{}{ + "name": k, + "values": flattenStringList(v.Values), + } + result = append(result, taskParam) + } + + return result +} + +func resourceAwsSsmMaintenanceWindowTaskCreate(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[INFO] Registering SSM Maintenance Window Task") + + params := &ssm.RegisterTaskWithMaintenanceWindowInput{ + WindowId: aws.String(d.Get("window_id").(string)), + MaxConcurrency: aws.String(d.Get("max_concurrency").(string)), + MaxErrors: aws.String(d.Get("max_errors").(string)), + TaskType: aws.String(d.Get("task_type").(string)), + ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)), + TaskArn: aws.String(d.Get("task_arn").(string)), + Targets: expandAwsSsmTargets(d), + } + + if v, ok := d.GetOk("priority"); ok { + params.Priority = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("logging_info"); ok { + params.LoggingInfo = expandAwsSsmMaintenanceWindowLoggingInfo(v.([]interface{})) + } + + if v, ok := d.GetOk("task_parameters"); ok { + params.TaskParameters = expandAwsSsmTaskParameters(v.([]interface{})) + } + + resp, err := ssmconn.RegisterTaskWithMaintenanceWindow(params) + if err != nil { + return err + } + + d.SetId(*resp.WindowTaskId) + + return resourceAwsSsmMaintenanceWindowTaskRead(d, meta) +} + +func resourceAwsSsmMaintenanceWindowTaskRead(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + params := &ssm.DescribeMaintenanceWindowTasksInput{ + WindowId: aws.String(d.Get("window_id").(string)), + } + + resp, err := ssmconn.DescribeMaintenanceWindowTasks(params) + if err != nil { + return err + } + + found := false + for _, t := range resp.Tasks { + if *t.WindowTaskId == d.Id() { + found = true + + d.Set("window_id", t.WindowId) + d.Set("max_concurrency", t.MaxConcurrency) + d.Set("max_errors", t.MaxErrors) + d.Set("task_type", t.Type) + d.Set("service_role_arn", t.ServiceRoleArn) + d.Set("task_arn", t.TaskArn) + d.Set("priority", t.Priority) + + if t.LoggingInfo != nil { + if err := d.Set("logging_info", flattenAwsSsmMaintenanceWindowLoggingInfo(t.LoggingInfo)); err != nil { + return fmt.Errorf("[DEBUG] Error setting logging_info error: %#v", err) + } + } + + if t.TaskParameters != nil { + if err := d.Set("task_parameters", flattenAwsSsmTaskParameters(t.TaskParameters)); err != nil { + return fmt.Errorf("[DEBUG] Error setting task_parameters error: %#v", err) + } + } + + if err := d.Set("targets", flattenAwsSsmTargets(t.Targets)); err != nil { + return fmt.Errorf("[DEBUG] Error setting targets error: %#v", err) + } + } + } + + if !found { + log.Printf("[INFO] Maintenance Window Target not found. Removing from state") + d.SetId("") + return nil + } + + return nil +} + +func resourceAwsSsmMaintenanceWindowTaskDelete(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[INFO] Deregistering SSM Maintenance Window Task: %s", d.Id()) + + params := &ssm.DeregisterTaskFromMaintenanceWindowInput{ + WindowId: aws.String(d.Get("window_id").(string)), + WindowTaskId: aws.String(d.Id()), + } + + _, err := ssmconn.DeregisterTaskFromMaintenanceWindow(params) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_parameter.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_parameter.go new file mode 100644 index 000000000..16b44bebd --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_parameter.go @@ -0,0 +1,128 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSsmParameter() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSsmParameterCreate, + Read: resourceAwsSsmParameterRead, + Update: resourceAwsSsmParameterUpdate, + Delete: resourceAwsSsmParameterDelete, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateSsmParameterType, + }, + "value": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + }, + "key_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsSsmParameterCreate(d *schema.ResourceData, meta interface{}) error { + return putAwsSSMParameter(d, meta) +} + +func resourceAwsSsmParameterRead(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[DEBUG] Reading SSM Parameter: %s", d.Id()) + + paramInput := &ssm.GetParametersInput{ + Names: []*string{ + aws.String(d.Get("name").(string)), + }, + WithDecryption: aws.Bool(true), + } + + resp, err := ssmconn.GetParameters(paramInput) + + if err != nil { + return errwrap.Wrapf("[ERROR] Error describing SSM parameter: {{err}}", err) + } + + if len(resp.InvalidParameters) > 0 { + return fmt.Errorf("[ERROR] SSM Parameter %s is invalid", d.Id()) + } + + param := resp.Parameters[0] + d.Set("name", param.Name) + d.Set("type", param.Type) + d.Set("value", param.Value) + + return nil +} + +func resourceAwsSsmParameterUpdate(d *schema.ResourceData, meta interface{}) error { + return putAwsSSMParameter(d, meta) +} + +func resourceAwsSsmParameterDelete(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[INFO] Deleting SSM Parameter: %s", d.Id()) + + paramInput := &ssm.DeleteParameterInput{ + Name: aws.String(d.Get("name").(string)), + } + + _, err := ssmconn.DeleteParameter(paramInput) + if err != nil { + return err + } + + d.SetId("") + + return nil +} + +func putAwsSSMParameter(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[INFO] Creating SSM Parameter: %s", d.Get("name").(string)) + + paramInput := &ssm.PutParameterInput{ + Name: aws.String(d.Get("name").(string)), + Type: aws.String(d.Get("type").(string)), + Value: aws.String(d.Get("value").(string)), + Overwrite: aws.Bool(!d.IsNewResource()), + } + if keyID, ok := d.GetOk("key_id"); ok { + log.Printf("[DEBUG] Setting key_id for SSM Parameter %s: %s", d.Get("name").(string), keyID.(string)) + paramInput.SetKeyId(keyID.(string)) + } + + log.Printf("[DEBUG] Waiting for SSM Parameter %q to be updated", d.Get("name").(string)) + _, err := ssmconn.PutParameter(paramInput) + + if err != nil { + return errwrap.Wrapf("[ERROR] Error creating SSM parameter: {{err}}", err) + } + + d.SetId(d.Get("name").(string)) + + return resourceAwsSsmParameterRead(d, meta) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_patch_baseline.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_patch_baseline.go new file mode 100644 index 000000000..4109c5083 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_patch_baseline.go @@ -0,0 +1,277 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSsmPatchBaseline() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSsmPatchBaselineCreate, + Read: resourceAwsSsmPatchBaselineRead, + Delete: resourceAwsSsmPatchBaselineDelete, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "global_filter": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 4, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + + "approval_rule": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "approve_after_days": { + Type: schema.TypeInt, + Required: true, + }, + + "patch_filter": { + Type: schema.TypeList, + Required: true, + MaxItems: 10, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + + "approved_patches": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "rejected_patches": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceAwsSsmPatchBaselineCreate(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + params := &ssm.CreatePatchBaselineInput{ + Name: aws.String(d.Get("name").(string)), + } + + if v, ok := d.GetOk("description"); ok { + params.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("approved_patches"); ok && v.(*schema.Set).Len() > 0 { + params.ApprovedPatches = expandStringList(v.(*schema.Set).List()) + } + + if v, ok := d.GetOk("rejected_patches"); ok && v.(*schema.Set).Len() > 0 { + params.RejectedPatches = expandStringList(v.(*schema.Set).List()) + } + + if _, ok := d.GetOk("global_filter"); ok { + params.GlobalFilters = expandAwsSsmPatchFilterGroup(d) + } + + if _, ok := d.GetOk("approval_rule"); ok { + params.ApprovalRules = expandAwsSsmPatchRuleGroup(d) + } + + resp, err := ssmconn.CreatePatchBaseline(params) + if err != nil { + return err + } + + d.SetId(*resp.BaselineId) + return resourceAwsSsmPatchBaselineRead(d, meta) +} + +func resourceAwsSsmPatchBaselineRead(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + params := &ssm.GetPatchBaselineInput{ + BaselineId: aws.String(d.Id()), + } + + resp, err := ssmconn.GetPatchBaseline(params) + if err != nil { + return err + } + + d.Set("name", resp.Name) + d.Set("description", resp.Description) + d.Set("approved_patches", flattenStringList(resp.ApprovedPatches)) + d.Set("rejected_patches", flattenStringList(resp.RejectedPatches)) + + if err := d.Set("global_filter", flattenAwsSsmPatchFilterGroup(resp.GlobalFilters)); err != nil { + return fmt.Errorf("[DEBUG] Error setting global filters error: %#v", err) + } + + if err := d.Set("approval_rule", flattenAwsSsmPatchRuleGroup(resp.ApprovalRules)); err != nil { + return fmt.Errorf("[DEBUG] Error setting approval rules error: %#v", err) + } + + return nil +} + +func resourceAwsSsmPatchBaselineDelete(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[INFO] Deleting SSM Patch Baseline: %s", d.Id()) + + params := &ssm.DeletePatchBaselineInput{ + BaselineId: aws.String(d.Id()), + } + + _, err := ssmconn.DeletePatchBaseline(params) + if err != nil { + return err + } + + return nil +} + +func expandAwsSsmPatchFilterGroup(d *schema.ResourceData) *ssm.PatchFilterGroup { + var filters []*ssm.PatchFilter + + filterConfig := d.Get("global_filter").([]interface{}) + + for _, fConfig := range filterConfig { + config := fConfig.(map[string]interface{}) + + filter := &ssm.PatchFilter{ + Key: aws.String(config["key"].(string)), + Values: expandStringList(config["values"].([]interface{})), + } + + filters = append(filters, filter) + } + + return &ssm.PatchFilterGroup{ + PatchFilters: filters, + } +} + +func flattenAwsSsmPatchFilterGroup(group *ssm.PatchFilterGroup) []map[string]interface{} { + if len(group.PatchFilters) == 0 { + return nil + } + + result := make([]map[string]interface{}, 0, len(group.PatchFilters)) + + for _, filter := range group.PatchFilters { + f := make(map[string]interface{}) + f["key"] = *filter.Key + f["values"] = flattenStringList(filter.Values) + + result = append(result, f) + } + + return result +} + +func expandAwsSsmPatchRuleGroup(d *schema.ResourceData) *ssm.PatchRuleGroup { + var rules []*ssm.PatchRule + + ruleConfig := d.Get("approval_rule").([]interface{}) + + for _, rConfig := range ruleConfig { + rCfg := rConfig.(map[string]interface{}) + + var filters []*ssm.PatchFilter + filterConfig := rCfg["patch_filter"].([]interface{}) + + for _, fConfig := range filterConfig { + fCfg := fConfig.(map[string]interface{}) + + filter := &ssm.PatchFilter{ + Key: aws.String(fCfg["key"].(string)), + Values: expandStringList(fCfg["values"].([]interface{})), + } + + filters = append(filters, filter) + } + + filterGroup := &ssm.PatchFilterGroup{ + PatchFilters: filters, + } + + rule := &ssm.PatchRule{ + ApproveAfterDays: aws.Int64(int64(rCfg["approve_after_days"].(int))), + PatchFilterGroup: filterGroup, + } + + rules = append(rules, rule) + } + + return &ssm.PatchRuleGroup{ + PatchRules: rules, + } +} + +func flattenAwsSsmPatchRuleGroup(group *ssm.PatchRuleGroup) []map[string]interface{} { + if len(group.PatchRules) == 0 { + return nil + } + + result := make([]map[string]interface{}, 0, len(group.PatchRules)) + + for _, rule := range group.PatchRules { + r := make(map[string]interface{}) + r["approve_after_days"] = *rule.ApproveAfterDays + r["patch_filter"] = flattenAwsSsmPatchFilterGroup(rule.PatchFilterGroup) + result = append(result, r) + } + + return result +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_patch_group.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_patch_group.go new file mode 100644 index 000000000..20327b248 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_ssm_patch_group.go @@ -0,0 +1,95 @@ +package aws + +import ( + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSsmPatchGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSsmPatchGroupCreate, + Read: resourceAwsSsmPatchGroupRead, + Delete: resourceAwsSsmPatchGroupDelete, + + Schema: map[string]*schema.Schema{ + "baseline_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "patch_group": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsSsmPatchGroupCreate(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + params := &ssm.RegisterPatchBaselineForPatchGroupInput{ + BaselineId: aws.String(d.Get("baseline_id").(string)), + PatchGroup: aws.String(d.Get("patch_group").(string)), + } + + resp, err := ssmconn.RegisterPatchBaselineForPatchGroup(params) + if err != nil { + return err + } + + d.SetId(*resp.PatchGroup) + return resourceAwsSsmPatchGroupRead(d, meta) +} + +func resourceAwsSsmPatchGroupRead(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + params := &ssm.DescribePatchGroupsInput{} + + resp, err := ssmconn.DescribePatchGroups(params) + if err != nil { + return err + } + + found := false + for _, t := range resp.Mappings { + if *t.PatchGroup == d.Id() { + found = true + + d.Set("patch_group", t.PatchGroup) + d.Set("baseline_id", t.BaselineIdentity.BaselineId) + } + } + + if !found { + log.Printf("[INFO] Patch Group not found. Removing from state") + d.SetId("") + return nil + } + + return nil + +} + +func resourceAwsSsmPatchGroupDelete(d *schema.ResourceData, meta interface{}) error { + ssmconn := meta.(*AWSClient).ssmconn + + log.Printf("[INFO] Deleting SSM Patch Group: %s", d.Id()) + + params := &ssm.DeregisterPatchBaselineForPatchGroupInput{ + BaselineId: aws.String(d.Get("baseline_id").(string)), + PatchGroup: aws.String(d.Get("patch_group").(string)), + } + + _, err := ssmconn.DeregisterPatchBaselineForPatchGroup(params) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_subnet.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_subnet.go new file mode 100644 index 000000000..88d23e829 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_subnet.go @@ -0,0 +1,387 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSubnet() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSubnetCreate, + Read: resourceAwsSubnetRead, + Update: resourceAwsSubnetUpdate, + Delete: resourceAwsSubnetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + SchemaVersion: 1, + MigrateState: resourceAwsSubnetMigrateState, + + Schema: map[string]*schema.Schema{ + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "cidr_block": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "ipv6_cidr_block": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "availability_zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "map_public_ip_on_launch": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "assign_ipv6_address_on_creation": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "ipv6_cidr_block_association_id": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsSubnetCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + createOpts := &ec2.CreateSubnetInput{ + AvailabilityZone: aws.String(d.Get("availability_zone").(string)), + CidrBlock: aws.String(d.Get("cidr_block").(string)), + VpcId: aws.String(d.Get("vpc_id").(string)), + } + + if v, ok := d.GetOk("ipv6_cidr_block"); ok { + createOpts.Ipv6CidrBlock = aws.String(v.(string)) + } + + var err error + resp, err := conn.CreateSubnet(createOpts) + + if err != nil { + return fmt.Errorf("Error creating subnet: %s", err) + } + + // Get the ID and store it + subnet := resp.Subnet + d.SetId(*subnet.SubnetId) + log.Printf("[INFO] Subnet ID: %s", *subnet.SubnetId) + + // Wait for the Subnet to become available + log.Printf("[DEBUG] Waiting for subnet (%s) to become available", *subnet.SubnetId) + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"available"}, + Refresh: SubnetStateRefreshFunc(conn, *subnet.SubnetId), + Timeout: 10 * time.Minute, + } + + _, err = stateConf.WaitForState() + + if err != nil { + return fmt.Errorf( + "Error waiting for subnet (%s) to become ready: %s", + d.Id(), err) + } + + return resourceAwsSubnetUpdate(d, meta) +} + +func resourceAwsSubnetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + resp, err := conn.DescribeSubnets(&ec2.DescribeSubnetsInput{ + SubnetIds: []*string{aws.String(d.Id())}, + }) + + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidSubnetID.NotFound" { + // Update state to indicate the subnet no longer exists. + d.SetId("") + return nil + } + return err + } + if resp == nil { + return nil + } + + subnet := resp.Subnets[0] + + d.Set("vpc_id", subnet.VpcId) + d.Set("availability_zone", subnet.AvailabilityZone) + d.Set("cidr_block", subnet.CidrBlock) + d.Set("map_public_ip_on_launch", subnet.MapPublicIpOnLaunch) + d.Set("assign_ipv6_address_on_creation", subnet.AssignIpv6AddressOnCreation) + for _, a := range subnet.Ipv6CidrBlockAssociationSet { + if *a.Ipv6CidrBlockState.State == "associated" { //we can only ever have 1 IPv6 block associated at once + d.Set("ipv6_cidr_block_association_id", a.AssociationId) + d.Set("ipv6_cidr_block", a.Ipv6CidrBlock) + break + } else { + d.Set("ipv6_cidr_block_association_id", "") // we blank these out to remove old entries + d.Set("ipv6_cidr_block", "") + } + } + d.Set("tags", tagsToMap(subnet.Tags)) + + return nil +} + +func resourceAwsSubnetUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + d.Partial(true) + + if err := setTags(conn, d); err != nil { + return err + } else { + d.SetPartial("tags") + } + + if d.HasChange("map_public_ip_on_launch") { + modifyOpts := &ec2.ModifySubnetAttributeInput{ + SubnetId: aws.String(d.Id()), + MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ + Value: aws.Bool(d.Get("map_public_ip_on_launch").(bool)), + }, + } + + log.Printf("[DEBUG] Subnet modify attributes: %#v", modifyOpts) + + _, err := conn.ModifySubnetAttribute(modifyOpts) + + if err != nil { + return err + } else { + d.SetPartial("map_public_ip_on_launch") + } + } + + // We have to be careful here to not go through a change of association if this is a new resource + // A New resource here would denote that the Update func is called by the Create func + if d.HasChange("ipv6_cidr_block") && !d.IsNewResource() { + // We need to handle that we disassociate the IPv6 CIDR block before we try and associate the new one + // This could be an issue as, we could error out when we try and add the new one + // We may need to roll back the state and reattach the old one if this is the case + + _, new := d.GetChange("ipv6_cidr_block") + + if v, ok := d.GetOk("ipv6_cidr_block_association_id"); ok { + + //Firstly we have to disassociate the old IPv6 CIDR Block + disassociateOps := &ec2.DisassociateSubnetCidrBlockInput{ + AssociationId: aws.String(v.(string)), + } + + _, err := conn.DisassociateSubnetCidrBlock(disassociateOps) + if err != nil { + return err + } + + // Wait for the CIDR to become disassociated + log.Printf( + "[DEBUG] Waiting for IPv6 CIDR (%s) to become disassociated", + d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"disassociating", "associated"}, + Target: []string{"disassociated"}, + Refresh: SubnetIpv6CidrStateRefreshFunc(conn, d.Id(), d.Get("ipv6_cidr_block_association_id").(string)), + Timeout: 3 * time.Minute, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for IPv6 CIDR (%s) to become disassociated: %s", + d.Id(), err) + } + } + + //Now we need to try and associate the new CIDR block + associatesOpts := &ec2.AssociateSubnetCidrBlockInput{ + SubnetId: aws.String(d.Id()), + Ipv6CidrBlock: aws.String(new.(string)), + } + + resp, err := conn.AssociateSubnetCidrBlock(associatesOpts) + if err != nil { + //The big question here is, do we want to try and reassociate the old one?? + //If we have a failure here, then we may be in a situation that we have nothing associated + return err + } + + // Wait for the CIDR to become associated + log.Printf( + "[DEBUG] Waiting for IPv6 CIDR (%s) to become associated", + d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"associating", "disassociated"}, + Target: []string{"associated"}, + Refresh: SubnetIpv6CidrStateRefreshFunc(conn, d.Id(), *resp.Ipv6CidrBlockAssociation.AssociationId), + Timeout: 3 * time.Minute, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for IPv6 CIDR (%s) to become associated: %s", + d.Id(), err) + } + + d.SetPartial("ipv6_cidr_block") + } + + if d.HasChange("assign_ipv6_address_on_creation") { + modifyOpts := &ec2.ModifySubnetAttributeInput{ + SubnetId: aws.String(d.Id()), + AssignIpv6AddressOnCreation: &ec2.AttributeBooleanValue{ + Value: aws.Bool(d.Get("assign_ipv6_address_on_creation").(bool)), + }, + } + + log.Printf("[DEBUG] Subnet modify attributes: %#v", modifyOpts) + + _, err := conn.ModifySubnetAttribute(modifyOpts) + + if err != nil { + return err + } else { + d.SetPartial("assign_ipv6_address_on_creation") + } + } + + d.Partial(false) + + return resourceAwsSubnetRead(d, meta) +} + +func resourceAwsSubnetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + log.Printf("[INFO] Deleting subnet: %s", d.Id()) + req := &ec2.DeleteSubnetInput{ + SubnetId: aws.String(d.Id()), + } + + wait := resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"destroyed"}, + Timeout: 10 * time.Minute, + MinTimeout: 1 * time.Second, + Refresh: func() (interface{}, string, error) { + _, err := conn.DeleteSubnet(req) + if err != nil { + if apiErr, ok := err.(awserr.Error); ok { + if apiErr.Code() == "DependencyViolation" { + // There is some pending operation, so just retry + // in a bit. + return 42, "pending", nil + } + + if apiErr.Code() == "InvalidSubnetID.NotFound" { + return 42, "destroyed", nil + } + } + + return 42, "failure", err + } + + return 42, "destroyed", nil + }, + } + + if _, err := wait.WaitForState(); err != nil { + return fmt.Errorf("Error deleting subnet: %s", err) + } + + return nil +} + +// SubnetStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch a Subnet. +func SubnetStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeSubnets(&ec2.DescribeSubnetsInput{ + SubnetIds: []*string{aws.String(id)}, + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidSubnetID.NotFound" { + resp = nil + } else { + log.Printf("Error on SubnetStateRefresh: %s", err) + return nil, "", err + } + } + + if resp == nil { + // Sometimes AWS just has consistency issues and doesn't see + // our instance yet. Return an empty state. + return nil, "", nil + } + + subnet := resp.Subnets[0] + return subnet, *subnet.State, nil + } +} + +func SubnetIpv6CidrStateRefreshFunc(conn *ec2.EC2, id string, associationId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + opts := &ec2.DescribeSubnetsInput{ + SubnetIds: []*string{aws.String(id)}, + } + resp, err := conn.DescribeSubnets(opts) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidSubnetID.NotFound" { + resp = nil + } else { + log.Printf("Error on SubnetIpv6CidrStateRefreshFunc: %s", err) + return nil, "", err + } + } + + if resp == nil { + // Sometimes AWS just has consistency issues and doesn't see + // our instance yet. Return an empty state. + return nil, "", nil + } + + if resp.Subnets[0].Ipv6CidrBlockAssociationSet == nil { + return nil, "", nil + } + + for _, association := range resp.Subnets[0].Ipv6CidrBlockAssociationSet { + if *association.AssociationId == associationId { + return association, *association.Ipv6CidrBlockState.State, nil + } + } + + return nil, "", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_subnet_migrate.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_subnet_migrate.go new file mode 100644 index 000000000..0e0f19cf6 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_subnet_migrate.go @@ -0,0 +1,33 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceAwsSubnetMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found AWS Subnet State v0; migrating to v1") + return migrateSubnetStateV0toV1(is) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateSubnetStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + if is.Empty() || is.Attributes == nil { + log.Println("[DEBUG] Empty Subnet State; nothing to migrate.") + return is, nil + } + + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + is.Attributes["assign_ipv6_address_on_creation"] = "false" + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_volume_attachment.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_volume_attachment.go new file mode 100644 index 000000000..2afcd6c67 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_volume_attachment.go @@ -0,0 +1,251 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsVolumeAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsVolumeAttachmentCreate, + Read: resourceAwsVolumeAttachmentRead, + Delete: resourceAwsVolumeAttachmentDelete, + + Schema: map[string]*schema.Schema{ + "device_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "instance_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "volume_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "force_detach": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "skip_destroy": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceAwsVolumeAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + name := d.Get("device_name").(string) + iID := d.Get("instance_id").(string) + vID := d.Get("volume_id").(string) + + // Find out if the volume is already attached to the instance, in which case + // we have nothing to do + request := &ec2.DescribeVolumesInput{ + VolumeIds: []*string{aws.String(vID)}, + Filters: []*ec2.Filter{ + &ec2.Filter{ + Name: aws.String("attachment.instance-id"), + Values: []*string{aws.String(iID)}, + }, + &ec2.Filter{ + Name: aws.String("attachment.device"), + Values: []*string{aws.String(name)}, + }, + }, + } + + vols, err := conn.DescribeVolumes(request) + if (err != nil) || (len(vols.Volumes) == 0) { + // This handles the situation where the instance is created by + // a spot request and whilst the request has been fulfilled the + // instance is not running yet + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"running"}, + Refresh: InstanceStateRefreshFunc(conn, iID, "terminated"), + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for instance (%s) to become ready: %s", + iID, err) + } + + // not attached + opts := &ec2.AttachVolumeInput{ + Device: aws.String(name), + InstanceId: aws.String(iID), + VolumeId: aws.String(vID), + } + + log.Printf("[DEBUG] Attaching Volume (%s) to Instance (%s)", vID, iID) + _, err := conn.AttachVolume(opts) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + return fmt.Errorf("[WARN] Error attaching volume (%s) to instance (%s), message: \"%s\", code: \"%s\"", + vID, iID, awsErr.Message(), awsErr.Code()) + } + return err + } + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"attaching"}, + Target: []string{"attached"}, + Refresh: volumeAttachmentStateRefreshFunc(conn, vID, iID), + Timeout: 5 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for Volume (%s) to attach to Instance: %s, error: %s", + vID, iID, err) + } + + d.SetId(volumeAttachmentID(name, vID, iID)) + return resourceAwsVolumeAttachmentRead(d, meta) +} + +func volumeAttachmentStateRefreshFunc(conn *ec2.EC2, volumeID, instanceID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + + request := &ec2.DescribeVolumesInput{ + VolumeIds: []*string{aws.String(volumeID)}, + Filters: []*ec2.Filter{ + &ec2.Filter{ + Name: aws.String("attachment.instance-id"), + Values: []*string{aws.String(instanceID)}, + }, + }, + } + + resp, err := conn.DescribeVolumes(request) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + return nil, "failed", fmt.Errorf("code: %s, message: %s", awsErr.Code(), awsErr.Message()) + } + return nil, "failed", err + } + + if len(resp.Volumes) > 0 { + v := resp.Volumes[0] + for _, a := range v.Attachments { + if a.InstanceId != nil && *a.InstanceId == instanceID { + return a, *a.State, nil + } + } + } + // assume detached if volume count is 0 + return 42, "detached", nil + } +} +func resourceAwsVolumeAttachmentRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + request := &ec2.DescribeVolumesInput{ + VolumeIds: []*string{aws.String(d.Get("volume_id").(string))}, + Filters: []*ec2.Filter{ + &ec2.Filter{ + Name: aws.String("attachment.instance-id"), + Values: []*string{aws.String(d.Get("instance_id").(string))}, + }, + }, + } + + vols, err := conn.DescribeVolumes(request) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVolume.NotFound" { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading EC2 volume %s for instance: %s: %#v", d.Get("volume_id").(string), d.Get("instance_id").(string), err) + } + + if len(vols.Volumes) == 0 || *vols.Volumes[0].State == "available" { + log.Printf("[DEBUG] Volume Attachment (%s) not found, removing from state", d.Id()) + d.SetId("") + } + + return nil +} + +func resourceAwsVolumeAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + if _, ok := d.GetOk("skip_destroy"); ok { + log.Printf("[INFO] Found skip_destroy to be true, removing attachment %q from state", d.Id()) + d.SetId("") + return nil + } + + vID := d.Get("volume_id").(string) + iID := d.Get("instance_id").(string) + + opts := &ec2.DetachVolumeInput{ + Device: aws.String(d.Get("device_name").(string)), + InstanceId: aws.String(iID), + VolumeId: aws.String(vID), + Force: aws.Bool(d.Get("force_detach").(bool)), + } + + _, err := conn.DetachVolume(opts) + if err != nil { + return fmt.Errorf("Failed to detach Volume (%s) from Instance (%s): %s", + vID, iID, err) + } + stateConf := &resource.StateChangeConf{ + Pending: []string{"detaching"}, + Target: []string{"detached"}, + Refresh: volumeAttachmentStateRefreshFunc(conn, vID, iID), + Timeout: 5 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + log.Printf("[DEBUG] Detaching Volume (%s) from Instance (%s)", vID, iID) + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for Volume (%s) to detach from Instance: %s", + vID, iID) + } + d.SetId("") + return nil +} + +func volumeAttachmentID(name, volumeID, instanceID string) string { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("%s-", name)) + buf.WriteString(fmt.Sprintf("%s-", instanceID)) + buf.WriteString(fmt.Sprintf("%s-", volumeID)) + + return fmt.Sprintf("vai-%d", hashcode.String(buf.String())) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc.go new file mode 100644 index 000000000..6a8edca4b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc.go @@ -0,0 +1,592 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsVpc() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsVpcCreate, + Read: resourceAwsVpcRead, + Update: resourceAwsVpcUpdate, + Delete: resourceAwsVpcDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsVpcInstanceImport, + }, + + SchemaVersion: 1, + MigrateState: resourceAwsVpcMigrateState, + + Schema: map[string]*schema.Schema{ + "cidr_block": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateCIDRNetworkAddress, + }, + + "instance_tenancy": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "enable_dns_hostnames": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "enable_dns_support": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "enable_classiclink": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "assign_generated_ipv6_cidr_block": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "main_route_table_id": { + Type: schema.TypeString, + Computed: true, + }, + + "default_network_acl_id": { + Type: schema.TypeString, + Computed: true, + }, + + "dhcp_options_id": { + Type: schema.TypeString, + Computed: true, + }, + + "default_security_group_id": { + Type: schema.TypeString, + Computed: true, + }, + + "default_route_table_id": { + Type: schema.TypeString, + Computed: true, + }, + + "ipv6_association_id": { + Type: schema.TypeString, + Computed: true, + }, + + "ipv6_cidr_block": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsVpcCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + instance_tenancy := "default" + if v, ok := d.GetOk("instance_tenancy"); ok { + instance_tenancy = v.(string) + } + + // Create the VPC + createOpts := &ec2.CreateVpcInput{ + CidrBlock: aws.String(d.Get("cidr_block").(string)), + InstanceTenancy: aws.String(instance_tenancy), + AmazonProvidedIpv6CidrBlock: aws.Bool(d.Get("assign_generated_ipv6_cidr_block").(bool)), + } + + log.Printf("[DEBUG] VPC create config: %#v", *createOpts) + vpcResp, err := conn.CreateVpc(createOpts) + if err != nil { + return fmt.Errorf("Error creating VPC: %s", err) + } + + // Get the ID and store it + vpc := vpcResp.Vpc + d.SetId(*vpc.VpcId) + log.Printf("[INFO] VPC ID: %s", d.Id()) + + // Set partial mode and say that we setup the cidr block + d.Partial(true) + d.SetPartial("cidr_block") + + // Wait for the VPC to become available + log.Printf( + "[DEBUG] Waiting for VPC (%s) to become available", + d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"available"}, + Refresh: VPCStateRefreshFunc(conn, d.Id()), + Timeout: 10 * time.Minute, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for VPC (%s) to become available: %s", + d.Id(), err) + } + + // Update our attributes and return + return resourceAwsVpcUpdate(d, meta) +} + +func resourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + // Refresh the VPC state + vpcRaw, _, err := VPCStateRefreshFunc(conn, d.Id())() + if err != nil { + return err + } + if vpcRaw == nil { + d.SetId("") + return nil + } + + // VPC stuff + vpc := vpcRaw.(*ec2.Vpc) + vpcid := d.Id() + d.Set("cidr_block", vpc.CidrBlock) + d.Set("dhcp_options_id", vpc.DhcpOptionsId) + d.Set("instance_tenancy", vpc.InstanceTenancy) + + // Tags + d.Set("tags", tagsToMap(vpc.Tags)) + + for _, a := range vpc.Ipv6CidrBlockAssociationSet { + if *a.Ipv6CidrBlockState.State == "associated" { //we can only ever have 1 IPv6 block associated at once + d.Set("assign_generated_ipv6_cidr_block", true) + d.Set("ipv6_association_id", a.AssociationId) + d.Set("ipv6_cidr_block", a.Ipv6CidrBlock) + } else { + d.Set("assign_generated_ipv6_cidr_block", false) + d.Set("ipv6_association_id", "") // we blank these out to remove old entries + d.Set("ipv6_cidr_block", "") + } + } + + // Attributes + attribute := "enableDnsSupport" + DescribeAttrOpts := &ec2.DescribeVpcAttributeInput{ + Attribute: aws.String(attribute), + VpcId: aws.String(vpcid), + } + resp, err := conn.DescribeVpcAttribute(DescribeAttrOpts) + if err != nil { + return err + } + d.Set("enable_dns_support", *resp.EnableDnsSupport.Value) + attribute = "enableDnsHostnames" + DescribeAttrOpts = &ec2.DescribeVpcAttributeInput{ + Attribute: &attribute, + VpcId: &vpcid, + } + resp, err = conn.DescribeVpcAttribute(DescribeAttrOpts) + if err != nil { + return err + } + d.Set("enable_dns_hostnames", *resp.EnableDnsHostnames.Value) + + DescribeClassiclinkOpts := &ec2.DescribeVpcClassicLinkInput{ + VpcIds: []*string{&vpcid}, + } + + // Classic Link is only available in regions that support EC2 Classic + respClassiclink, err := conn.DescribeVpcClassicLink(DescribeClassiclinkOpts) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "UnsupportedOperation" { + log.Printf("[WARN] VPC Classic Link is not supported in this region") + } else { + return err + } + } else { + classiclink_enabled := false + for _, v := range respClassiclink.Vpcs { + if *v.VpcId == vpcid { + if v.ClassicLinkEnabled != nil { + classiclink_enabled = *v.ClassicLinkEnabled + } + break + } + } + d.Set("enable_classiclink", classiclink_enabled) + } + + // Get the main routing table for this VPC + // Really Ugly need to make this better - rmenn + filter1 := &ec2.Filter{ + Name: aws.String("association.main"), + Values: []*string{aws.String("true")}, + } + filter2 := &ec2.Filter{ + Name: aws.String("vpc-id"), + Values: []*string{aws.String(d.Id())}, + } + DescribeRouteOpts := &ec2.DescribeRouteTablesInput{ + Filters: []*ec2.Filter{filter1, filter2}, + } + routeResp, err := conn.DescribeRouteTables(DescribeRouteOpts) + if err != nil { + return err + } + if v := routeResp.RouteTables; len(v) > 0 { + d.Set("main_route_table_id", *v[0].RouteTableId) + } + + if err := resourceAwsVpcSetDefaultNetworkAcl(conn, d); err != nil { + log.Printf("[WARN] Unable to set Default Network ACL: %s", err) + } + if err := resourceAwsVpcSetDefaultSecurityGroup(conn, d); err != nil { + log.Printf("[WARN] Unable to set Default Security Group: %s", err) + } + if err := resourceAwsVpcSetDefaultRouteTable(conn, d); err != nil { + log.Printf("[WARN] Unable to set Default Route Table: %s", err) + } + + return nil +} + +func resourceAwsVpcUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + // Turn on partial mode + d.Partial(true) + vpcid := d.Id() + if d.HasChange("enable_dns_hostnames") { + val := d.Get("enable_dns_hostnames").(bool) + modifyOpts := &ec2.ModifyVpcAttributeInput{ + VpcId: &vpcid, + EnableDnsHostnames: &ec2.AttributeBooleanValue{ + Value: &val, + }, + } + + log.Printf( + "[INFO] Modifying enable_dns_hostnames vpc attribute for %s: %s", + d.Id(), modifyOpts) + if _, err := conn.ModifyVpcAttribute(modifyOpts); err != nil { + return err + } + + d.SetPartial("enable_dns_hostnames") + } + + _, hasEnableDnsSupportOption := d.GetOk("enable_dns_support") + + if !hasEnableDnsSupportOption || d.HasChange("enable_dns_support") { + val := d.Get("enable_dns_support").(bool) + modifyOpts := &ec2.ModifyVpcAttributeInput{ + VpcId: &vpcid, + EnableDnsSupport: &ec2.AttributeBooleanValue{ + Value: &val, + }, + } + + log.Printf( + "[INFO] Modifying enable_dns_support vpc attribute for %s: %s", + d.Id(), modifyOpts) + if _, err := conn.ModifyVpcAttribute(modifyOpts); err != nil { + return err + } + + d.SetPartial("enable_dns_support") + } + + if d.HasChange("enable_classiclink") { + val := d.Get("enable_classiclink").(bool) + + if val { + modifyOpts := &ec2.EnableVpcClassicLinkInput{ + VpcId: &vpcid, + } + log.Printf( + "[INFO] Modifying enable_classiclink vpc attribute for %s: %#v", + d.Id(), modifyOpts) + if _, err := conn.EnableVpcClassicLink(modifyOpts); err != nil { + return err + } + } else { + modifyOpts := &ec2.DisableVpcClassicLinkInput{ + VpcId: &vpcid, + } + log.Printf( + "[INFO] Modifying enable_classiclink vpc attribute for %s: %#v", + d.Id(), modifyOpts) + if _, err := conn.DisableVpcClassicLink(modifyOpts); err != nil { + return err + } + } + + d.SetPartial("enable_classiclink") + } + + if d.HasChange("assign_generated_ipv6_cidr_block") && !d.IsNewResource() { + toAssign := d.Get("assign_generated_ipv6_cidr_block").(bool) + + log.Printf("[INFO] Modifying assign_generated_ipv6_cidr_block to %#v", toAssign) + + if toAssign { + modifyOpts := &ec2.AssociateVpcCidrBlockInput{ + VpcId: &vpcid, + AmazonProvidedIpv6CidrBlock: aws.Bool(toAssign), + } + log.Printf("[INFO] Enabling assign_generated_ipv6_cidr_block vpc attribute for %s: %#v", + d.Id(), modifyOpts) + resp, err := conn.AssociateVpcCidrBlock(modifyOpts) + if err != nil { + return err + } + + // Wait for the CIDR to become available + log.Printf( + "[DEBUG] Waiting for IPv6 CIDR (%s) to become associated", + d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"associating", "disassociated"}, + Target: []string{"associated"}, + Refresh: Ipv6CidrStateRefreshFunc(conn, d.Id(), *resp.Ipv6CidrBlockAssociation.AssociationId), + Timeout: 1 * time.Minute, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for IPv6 CIDR (%s) to become associated: %s", + d.Id(), err) + } + } else { + modifyOpts := &ec2.DisassociateVpcCidrBlockInput{ + AssociationId: aws.String(d.Get("ipv6_association_id").(string)), + } + log.Printf("[INFO] Disabling assign_generated_ipv6_cidr_block vpc attribute for %s: %#v", + d.Id(), modifyOpts) + if _, err := conn.DisassociateVpcCidrBlock(modifyOpts); err != nil { + return err + } + + // Wait for the CIDR to become available + log.Printf( + "[DEBUG] Waiting for IPv6 CIDR (%s) to become disassociated", + d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"disassociating", "associated"}, + Target: []string{"disassociated"}, + Refresh: Ipv6CidrStateRefreshFunc(conn, d.Id(), d.Get("ipv6_association_id").(string)), + Timeout: 1 * time.Minute, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for IPv6 CIDR (%s) to become disassociated: %s", + d.Id(), err) + } + } + + d.SetPartial("assign_generated_ipv6_cidr_block") + } + + if err := setTags(conn, d); err != nil { + return err + } else { + d.SetPartial("tags") + } + + d.Partial(false) + return resourceAwsVpcRead(d, meta) +} + +func resourceAwsVpcDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + vpcID := d.Id() + DeleteVpcOpts := &ec2.DeleteVpcInput{ + VpcId: &vpcID, + } + log.Printf("[INFO] Deleting VPC: %s", d.Id()) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteVpc(DeleteVpcOpts) + if err == nil { + return nil + } + + ec2err, ok := err.(awserr.Error) + if !ok { + return resource.NonRetryableError(err) + } + + switch ec2err.Code() { + case "InvalidVpcID.NotFound": + return nil + case "DependencyViolation": + return resource.RetryableError(err) + } + + return resource.NonRetryableError(fmt.Errorf("Error deleting VPC: %s", err)) + }) +} + +// VPCStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// a VPC. +func VPCStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + DescribeVpcOpts := &ec2.DescribeVpcsInput{ + VpcIds: []*string{aws.String(id)}, + } + resp, err := conn.DescribeVpcs(DescribeVpcOpts) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpcID.NotFound" { + resp = nil + } else { + log.Printf("Error on VPCStateRefresh: %s", err) + return nil, "", err + } + } + + if resp == nil { + // Sometimes AWS just has consistency issues and doesn't see + // our instance yet. Return an empty state. + return nil, "", nil + } + + vpc := resp.Vpcs[0] + return vpc, *vpc.State, nil + } +} + +func Ipv6CidrStateRefreshFunc(conn *ec2.EC2, id string, associationId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + describeVpcOpts := &ec2.DescribeVpcsInput{ + VpcIds: []*string{aws.String(id)}, + } + resp, err := conn.DescribeVpcs(describeVpcOpts) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpcID.NotFound" { + resp = nil + } else { + log.Printf("Error on VPCStateRefresh: %s", err) + return nil, "", err + } + } + + if resp == nil { + // Sometimes AWS just has consistency issues and doesn't see + // our instance yet. Return an empty state. + return nil, "", nil + } + + if resp.Vpcs[0].Ipv6CidrBlockAssociationSet == nil { + return nil, "", nil + } + + for _, association := range resp.Vpcs[0].Ipv6CidrBlockAssociationSet { + if *association.AssociationId == associationId { + return association, *association.Ipv6CidrBlockState.State, nil + } + } + + return nil, "", nil + } +} + +func resourceAwsVpcSetDefaultNetworkAcl(conn *ec2.EC2, d *schema.ResourceData) error { + filter1 := &ec2.Filter{ + Name: aws.String("default"), + Values: []*string{aws.String("true")}, + } + filter2 := &ec2.Filter{ + Name: aws.String("vpc-id"), + Values: []*string{aws.String(d.Id())}, + } + DescribeNetworkACLOpts := &ec2.DescribeNetworkAclsInput{ + Filters: []*ec2.Filter{filter1, filter2}, + } + networkAclResp, err := conn.DescribeNetworkAcls(DescribeNetworkACLOpts) + + if err != nil { + return err + } + if v := networkAclResp.NetworkAcls; len(v) > 0 { + d.Set("default_network_acl_id", v[0].NetworkAclId) + } + + return nil +} + +func resourceAwsVpcSetDefaultSecurityGroup(conn *ec2.EC2, d *schema.ResourceData) error { + filter1 := &ec2.Filter{ + Name: aws.String("group-name"), + Values: []*string{aws.String("default")}, + } + filter2 := &ec2.Filter{ + Name: aws.String("vpc-id"), + Values: []*string{aws.String(d.Id())}, + } + DescribeSgOpts := &ec2.DescribeSecurityGroupsInput{ + Filters: []*ec2.Filter{filter1, filter2}, + } + securityGroupResp, err := conn.DescribeSecurityGroups(DescribeSgOpts) + + if err != nil { + return err + } + if v := securityGroupResp.SecurityGroups; len(v) > 0 { + d.Set("default_security_group_id", v[0].GroupId) + } + + return nil +} + +func resourceAwsVpcSetDefaultRouteTable(conn *ec2.EC2, d *schema.ResourceData) error { + filter1 := &ec2.Filter{ + Name: aws.String("association.main"), + Values: []*string{aws.String("true")}, + } + filter2 := &ec2.Filter{ + Name: aws.String("vpc-id"), + Values: []*string{aws.String(d.Id())}, + } + + findOpts := &ec2.DescribeRouteTablesInput{ + Filters: []*ec2.Filter{filter1, filter2}, + } + + resp, err := conn.DescribeRouteTables(findOpts) + if err != nil { + return err + } + + if len(resp.RouteTables) < 1 || resp.RouteTables[0] == nil { + return fmt.Errorf("Default Route table not found") + } + + // There Can Be Only 1 ... Default Route Table + d.Set("default_route_table_id", resp.RouteTables[0].RouteTableId) + + return nil +} + +func resourceAwsVpcInstanceImport( + d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("assign_generated_ipv6_cidr_block", false) + return []*schema.ResourceData{d}, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_dhcp_options.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_dhcp_options.go new file mode 100644 index 000000000..ec2844cc7 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_dhcp_options.go @@ -0,0 +1,292 @@ +package aws + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsVpcDhcpOptions() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsVpcDhcpOptionsCreate, + Read: resourceAwsVpcDhcpOptionsRead, + Update: resourceAwsVpcDhcpOptionsUpdate, + Delete: resourceAwsVpcDhcpOptionsDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "domain_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "domain_name_servers": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "ntp_servers": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "netbios_node_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "netbios_name_servers": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "tags": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + }, + }, + } +} + +func resourceAwsVpcDhcpOptionsCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + setDHCPOption := func(key string) *ec2.NewDhcpConfiguration { + log.Printf("[DEBUG] Setting DHCP option %s...", key) + tfKey := strings.Replace(key, "-", "_", -1) + + value, ok := d.GetOk(tfKey) + if !ok { + return nil + } + + if v, ok := value.(string); ok { + return &ec2.NewDhcpConfiguration{ + Key: aws.String(key), + Values: []*string{ + aws.String(v), + }, + } + } + + if v, ok := value.([]interface{}); ok { + var s []*string + for _, attr := range v { + s = append(s, aws.String(attr.(string))) + } + + return &ec2.NewDhcpConfiguration{ + Key: aws.String(key), + Values: s, + } + } + + return nil + } + + createOpts := &ec2.CreateDhcpOptionsInput{ + DhcpConfigurations: []*ec2.NewDhcpConfiguration{ + setDHCPOption("domain-name"), + setDHCPOption("domain-name-servers"), + setDHCPOption("ntp-servers"), + setDHCPOption("netbios-node-type"), + setDHCPOption("netbios-name-servers"), + }, + } + + resp, err := conn.CreateDhcpOptions(createOpts) + if err != nil { + return fmt.Errorf("Error creating DHCP Options Set: %s", err) + } + + dos := resp.DhcpOptions + d.SetId(*dos.DhcpOptionsId) + log.Printf("[INFO] DHCP Options Set ID: %s", d.Id()) + + // Wait for the DHCP Options to become available + log.Printf("[DEBUG] Waiting for DHCP Options (%s) to become available", d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"created"}, + Refresh: resourceDHCPOptionsStateRefreshFunc(conn, d.Id()), + Timeout: 5 * time.Minute, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for DHCP Options (%s) to become available: %s", + d.Id(), err) + } + + return resourceAwsVpcDhcpOptionsUpdate(d, meta) +} + +func resourceAwsVpcDhcpOptionsRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + req := &ec2.DescribeDhcpOptionsInput{ + DhcpOptionsIds: []*string{ + aws.String(d.Id()), + }, + } + + resp, err := conn.DescribeDhcpOptions(req) + if err != nil { + ec2err, ok := err.(awserr.Error) + if !ok { + return fmt.Errorf("Error retrieving DHCP Options: %s", err.Error()) + } + + if ec2err.Code() == "InvalidDhcpOptionID.NotFound" { + log.Printf("[WARN] DHCP Options (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving DHCP Options: %s", err.Error()) + } + + if len(resp.DhcpOptions) == 0 { + return nil + } + + opts := resp.DhcpOptions[0] + d.Set("tags", tagsToMap(opts.Tags)) + + for _, cfg := range opts.DhcpConfigurations { + tfKey := strings.Replace(*cfg.Key, "-", "_", -1) + + if _, ok := d.Get(tfKey).(string); ok { + d.Set(tfKey, cfg.Values[0].Value) + } else { + values := make([]string, 0, len(cfg.Values)) + for _, v := range cfg.Values { + values = append(values, *v.Value) + } + + d.Set(tfKey, values) + } + } + + return nil +} + +func resourceAwsVpcDhcpOptionsUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + return setTags(conn, d) +} + +func resourceAwsVpcDhcpOptionsDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + return resource.Retry(3*time.Minute, func() *resource.RetryError { + log.Printf("[INFO] Deleting DHCP Options ID %s...", d.Id()) + _, err := conn.DeleteDhcpOptions(&ec2.DeleteDhcpOptionsInput{ + DhcpOptionsId: aws.String(d.Id()), + }) + + if err == nil { + return nil + } + + log.Printf("[WARN] %s", err) + + ec2err, ok := err.(awserr.Error) + if !ok { + return resource.RetryableError(err) + } + + switch ec2err.Code() { + case "InvalidDhcpOptionsID.NotFound": + return nil + case "DependencyViolation": + // If it is a dependency violation, we want to disassociate + // all VPCs using the given DHCP Options ID, and retry deleting. + vpcs, err2 := findVPCsByDHCPOptionsID(conn, d.Id()) + if err2 != nil { + log.Printf("[ERROR] %s", err2) + return resource.RetryableError(err2) + } + + for _, vpc := range vpcs { + log.Printf("[INFO] Disassociating DHCP Options Set %s from VPC %s...", d.Id(), *vpc.VpcId) + if _, err := conn.AssociateDhcpOptions(&ec2.AssociateDhcpOptionsInput{ + DhcpOptionsId: aws.String("default"), + VpcId: vpc.VpcId, + }); err != nil { + return resource.RetryableError(err) + } + } + return resource.RetryableError(err) + default: + return resource.NonRetryableError(err) + } + }) +} + +func findVPCsByDHCPOptionsID(conn *ec2.EC2, id string) ([]*ec2.Vpc, error) { + req := &ec2.DescribeVpcsInput{ + Filters: []*ec2.Filter{ + &ec2.Filter{ + Name: aws.String("dhcp-options-id"), + Values: []*string{ + aws.String(id), + }, + }, + }, + } + + resp, err := conn.DescribeVpcs(req) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpcID.NotFound" { + return nil, nil + } + return nil, err + } + + return resp.Vpcs, nil +} + +func resourceDHCPOptionsStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + DescribeDhcpOpts := &ec2.DescribeDhcpOptionsInput{ + DhcpOptionsIds: []*string{ + aws.String(id), + }, + } + + resp, err := conn.DescribeDhcpOptions(DescribeDhcpOpts) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidDhcpOptionsID.NotFound" { + resp = nil + } else { + log.Printf("Error on DHCPOptionsStateRefresh: %s", err) + return nil, "", err + } + } + + if resp == nil { + // Sometimes AWS just has consistency issues and doesn't see + // our instance yet. Return an empty state. + return nil, "", nil + } + + dos := resp.DhcpOptions[0] + return dos, "created", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_dhcp_options_association.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_dhcp_options_association.go new file mode 100644 index 000000000..7bdcb7a68 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_dhcp_options_association.go @@ -0,0 +1,99 @@ +package aws + +import ( + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsVpcDhcpOptionsAssociation() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsVpcDhcpOptionsAssociationCreate, + Read: resourceAwsVpcDhcpOptionsAssociationRead, + Update: resourceAwsVpcDhcpOptionsAssociationUpdate, + Delete: resourceAwsVpcDhcpOptionsAssociationDelete, + + Schema: map[string]*schema.Schema{ + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "dhcp_options_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceAwsVpcDhcpOptionsAssociationCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + log.Printf( + "[INFO] Creating DHCP Options association: %s => %s", + d.Get("vpc_id").(string), + d.Get("dhcp_options_id").(string)) + + optsID := aws.String(d.Get("dhcp_options_id").(string)) + vpcID := aws.String(d.Get("vpc_id").(string)) + + if _, err := conn.AssociateDhcpOptions(&ec2.AssociateDhcpOptionsInput{ + DhcpOptionsId: optsID, + VpcId: vpcID, + }); err != nil { + return err + } + + // Set the ID and return + d.SetId(*optsID + "-" + *vpcID) + log.Printf("[INFO] Association ID: %s", d.Id()) + + return nil +} + +func resourceAwsVpcDhcpOptionsAssociationRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + // Get the VPC that this association belongs to + vpcRaw, _, err := VPCStateRefreshFunc(conn, d.Get("vpc_id").(string))() + + if err != nil { + return err + } + + if vpcRaw == nil { + return nil + } + + vpc := vpcRaw.(*ec2.Vpc) + if *vpc.VpcId != d.Get("vpc_id") || *vpc.DhcpOptionsId != d.Get("dhcp_options_id") { + log.Printf("[INFO] It seems the DHCP Options association is gone. Deleting reference from Graph...") + d.SetId("") + } + + return nil +} + +// DHCP Options Asociations cannot be updated. +func resourceAwsVpcDhcpOptionsAssociationUpdate(d *schema.ResourceData, meta interface{}) error { + return resourceAwsVpcDhcpOptionsAssociationCreate(d, meta) +} + +// AWS does not provide an API to disassociate a DHCP Options set from a VPC. +// So, we do this by setting the VPC to the default DHCP Options Set. +func resourceAwsVpcDhcpOptionsAssociationDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + log.Printf("[INFO] Disassociating DHCP Options Set %s from VPC %s...", d.Get("dhcp_options_id"), d.Get("vpc_id")) + if _, err := conn.AssociateDhcpOptions(&ec2.AssociateDhcpOptionsInput{ + DhcpOptionsId: aws.String("default"), + VpcId: aws.String(d.Get("vpc_id").(string)), + }); err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint.go new file mode 100644 index 000000000..b07940326 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint.go @@ -0,0 +1,237 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsVpcEndpoint() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsVPCEndpointCreate, + Read: resourceAwsVPCEndpointRead, + Update: resourceAwsVPCEndpointUpdate, + Delete: resourceAwsVPCEndpointDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "policy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateJsonString, + StateFunc: func(v interface{}) string { + json, _ := normalizeJsonString(v) + return json + }, + }, + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "service_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "route_table_ids": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "prefix_list_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "cidr_blocks": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceAwsVPCEndpointCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + input := &ec2.CreateVpcEndpointInput{ + VpcId: aws.String(d.Get("vpc_id").(string)), + ServiceName: aws.String(d.Get("service_name").(string)), + } + + if v, ok := d.GetOk("route_table_ids"); ok { + list := v.(*schema.Set).List() + if len(list) > 0 { + input.RouteTableIds = expandStringList(list) + } + } + + if v, ok := d.GetOk("policy"); ok { + policy, err := normalizeJsonString(v) + if err != nil { + return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err) + } + input.PolicyDocument = aws.String(policy) + } + + log.Printf("[DEBUG] Creating VPC Endpoint: %#v", input) + output, err := conn.CreateVpcEndpoint(input) + if err != nil { + return fmt.Errorf("Error creating VPC Endpoint: %s", err) + } + log.Printf("[DEBUG] VPC Endpoint %q created.", *output.VpcEndpoint.VpcEndpointId) + + d.SetId(*output.VpcEndpoint.VpcEndpointId) + + return resourceAwsVPCEndpointRead(d, meta) +} + +func resourceAwsVPCEndpointRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + input := &ec2.DescribeVpcEndpointsInput{ + VpcEndpointIds: []*string{aws.String(d.Id())}, + } + + log.Printf("[DEBUG] Reading VPC Endpoint: %q", d.Id()) + output, err := conn.DescribeVpcEndpoints(input) + + if err != nil { + ec2err, ok := err.(awserr.Error) + if !ok { + return fmt.Errorf("Error reading VPC Endpoint: %s", err.Error()) + } + + if ec2err.Code() == "InvalidVpcEndpointId.NotFound" { + log.Printf("[WARN] VPC Endpoint (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + return fmt.Errorf("Error reading VPC Endpoint: %s", err.Error()) + } + + if len(output.VpcEndpoints) != 1 { + return fmt.Errorf("There's no unique VPC Endpoint, but %d endpoints: %#v", + len(output.VpcEndpoints), output.VpcEndpoints) + } + + vpce := output.VpcEndpoints[0] + + // A VPC Endpoint is associated with exactly one prefix list name (also called Service Name). + // The prefix list ID can be used in security groups, so retrieve it to support that capability. + prefixListServiceName := *vpce.ServiceName + prefixListInput := &ec2.DescribePrefixListsInput{ + Filters: []*ec2.Filter{ + {Name: aws.String("prefix-list-name"), Values: []*string{aws.String(prefixListServiceName)}}, + }, + } + + log.Printf("[DEBUG] Reading VPC Endpoint prefix list: %s", prefixListServiceName) + prefixListsOutput, err := conn.DescribePrefixLists(prefixListInput) + + if err != nil { + _, ok := err.(awserr.Error) + if !ok { + return fmt.Errorf("Error reading VPC Endpoint prefix list: %s", err.Error()) + } + } + + if len(prefixListsOutput.PrefixLists) != 1 { + return fmt.Errorf("There are multiple prefix lists associated with the service name '%s'. Unexpected", prefixListServiceName) + } + + policy, err := normalizeJsonString(*vpce.PolicyDocument) + if err != nil { + return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err) + } + + d.Set("vpc_id", vpce.VpcId) + d.Set("policy", policy) + d.Set("service_name", vpce.ServiceName) + if err := d.Set("route_table_ids", aws.StringValueSlice(vpce.RouteTableIds)); err != nil { + return err + } + pl := prefixListsOutput.PrefixLists[0] + d.Set("prefix_list_id", pl.PrefixListId) + d.Set("cidr_blocks", aws.StringValueSlice(pl.Cidrs)) + + return nil +} + +func resourceAwsVPCEndpointUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + input := &ec2.ModifyVpcEndpointInput{ + VpcEndpointId: aws.String(d.Id()), + } + + if d.HasChange("route_table_ids") { + o, n := d.GetChange("route_table_ids") + os := o.(*schema.Set) + ns := n.(*schema.Set) + + add := expandStringList(ns.Difference(os).List()) + if len(add) > 0 { + input.AddRouteTableIds = add + } + + remove := expandStringList(os.Difference(ns).List()) + if len(remove) > 0 { + input.RemoveRouteTableIds = remove + } + } + + if d.HasChange("policy") { + policy, err := normalizeJsonString(d.Get("policy")) + if err != nil { + return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err) + } + input.PolicyDocument = aws.String(policy) + } + + log.Printf("[DEBUG] Updating VPC Endpoint: %#v", input) + _, err := conn.ModifyVpcEndpoint(input) + if err != nil { + return fmt.Errorf("Error updating VPC Endpoint: %s", err) + } + log.Printf("[DEBUG] VPC Endpoint %q updated", input.VpcEndpointId) + + return resourceAwsVPCEndpointRead(d, meta) +} + +func resourceAwsVPCEndpointDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + input := &ec2.DeleteVpcEndpointsInput{ + VpcEndpointIds: []*string{aws.String(d.Id())}, + } + + log.Printf("[DEBUG] Deleting VPC Endpoint: %#v", input) + _, err := conn.DeleteVpcEndpoints(input) + + if err != nil { + ec2err, ok := err.(awserr.Error) + if !ok { + return fmt.Errorf("Error deleting VPC Endpoint: %s", err.Error()) + } + + if ec2err.Code() == "InvalidVpcEndpointId.NotFound" { + log.Printf("[DEBUG] VPC Endpoint %q is already gone", d.Id()) + } else { + return fmt.Errorf("Error deleting VPC Endpoint: %s", err.Error()) + } + } + + log.Printf("[DEBUG] VPC Endpoint %q deleted", d.Id()) + d.SetId("") + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_route_table_association.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_route_table_association.go new file mode 100644 index 000000000..655638aa4 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_endpoint_route_table_association.go @@ -0,0 +1,159 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsVpcEndpointRouteTableAssociation() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsVPCEndpointRouteTableAssociationCreate, + Read: resourceAwsVPCEndpointRouteTableAssociationRead, + Delete: resourceAwsVPCEndpointRouteTableAssociationDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "vpc_endpoint_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "route_table_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsVPCEndpointRouteTableAssociationCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + endpointId := d.Get("vpc_endpoint_id").(string) + rtId := d.Get("route_table_id").(string) + + _, err := findResourceVPCEndpoint(conn, endpointId) + if err != nil { + return err + } + + log.Printf( + "[INFO] Creating VPC Endpoint/Route Table association: %s => %s", + endpointId, rtId) + + input := &ec2.ModifyVpcEndpointInput{ + VpcEndpointId: aws.String(endpointId), + AddRouteTableIds: aws.StringSlice([]string{rtId}), + } + + _, err = conn.ModifyVpcEndpoint(input) + if err != nil { + return fmt.Errorf("Error creating VPC Endpoint/Route Table association: %s", err.Error()) + } + id := vpcEndpointIdRouteTableIdHash(endpointId, rtId) + log.Printf("[DEBUG] VPC Endpoint/Route Table association %q created.", id) + + d.SetId(id) + + return resourceAwsVPCEndpointRouteTableAssociationRead(d, meta) +} + +func resourceAwsVPCEndpointRouteTableAssociationRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + endpointId := d.Get("vpc_endpoint_id").(string) + rtId := d.Get("route_table_id").(string) + + vpce, err := findResourceVPCEndpoint(conn, endpointId) + if err != nil { + if err, ok := err.(awserr.Error); ok && err.Code() == "InvalidVpcEndpointId.NotFound" { + d.SetId("") + return nil + } + + return err + } + + found := false + for _, id := range vpce.RouteTableIds { + if id != nil && *id == rtId { + found = true + break + } + } + if !found { + // The association no longer exists. + d.SetId("") + return nil + } + + id := vpcEndpointIdRouteTableIdHash(endpointId, rtId) + log.Printf("[DEBUG] Computed VPC Endpoint/Route Table ID %s", id) + d.SetId(id) + + return nil +} + +func resourceAwsVPCEndpointRouteTableAssociationDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + endpointId := d.Get("vpc_endpoint_id").(string) + rtId := d.Get("route_table_id").(string) + + input := &ec2.ModifyVpcEndpointInput{ + VpcEndpointId: aws.String(endpointId), + RemoveRouteTableIds: aws.StringSlice([]string{rtId}), + } + + _, err := conn.ModifyVpcEndpoint(input) + if err != nil { + ec2err, ok := err.(awserr.Error) + if !ok { + return fmt.Errorf("Error deleting VPC Endpoint/Route Table association: %s", err.Error()) + } + + switch ec2err.Code() { + case "InvalidVpcEndpointId.NotFound": + fallthrough + case "InvalidRouteTableId.NotFound": + fallthrough + case "InvalidParameter": + log.Printf("[DEBUG] VPC Endpoint/Route Table association is already gone") + default: + return fmt.Errorf("Error deleting VPC Endpoint/Route Table association: %s", err.Error()) + } + } + + log.Printf("[DEBUG] VPC Endpoint/Route Table association %q deleted", d.Id()) + d.SetId("") + + return nil +} + +func findResourceVPCEndpoint(conn *ec2.EC2, id string) (*ec2.VpcEndpoint, error) { + input := &ec2.DescribeVpcEndpointsInput{ + VpcEndpointIds: aws.StringSlice([]string{id}), + } + + log.Printf("[DEBUG] Reading VPC Endpoint: %q", id) + output, err := conn.DescribeVpcEndpoints(input) + if err != nil { + return nil, err + } + + if output.VpcEndpoints == nil { + return nil, fmt.Errorf("No VPC Endpoints were found for %q", id) + } + + return output.VpcEndpoints[0], nil +} + +func vpcEndpointIdRouteTableIdHash(endpointId, rtId string) string { + return fmt.Sprintf("a-%s%d", endpointId, hashcode.String(rtId)) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_migrate.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_migrate.go new file mode 100644 index 000000000..90738d1f2 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_migrate.go @@ -0,0 +1,33 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceAwsVpcMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found AWS VPC State v0; migrating to v1") + return migrateVpcStateV0toV1(is) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateVpcStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + if is.Empty() || is.Attributes == nil { + log.Println("[DEBUG] Empty VPC State; nothing to migrate.") + return is, nil + } + + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + is.Attributes["assign_generated_ipv6_cidr_block"] = "false" + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_peering_connection.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_peering_connection.go new file mode 100644 index 000000000..24a1912e4 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_peering_connection.go @@ -0,0 +1,381 @@ +package aws + +import ( + "errors" + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsVpcPeeringConnection() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsVPCPeeringCreate, + Read: resourceAwsVPCPeeringRead, + Update: resourceAwsVPCPeeringUpdate, + Delete: resourceAwsVPCPeeringDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "peer_owner_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "peer_vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "auto_accept": { + Type: schema.TypeBool, + Optional: true, + }, + "accept_status": { + Type: schema.TypeString, + Computed: true, + }, + "accepter": vpcPeeringConnectionOptionsSchema(), + "requester": vpcPeeringConnectionOptionsSchema(), + "tags": tagsSchema(), + }, + } +} + +func resourceAwsVPCPeeringCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + // Create the vpc peering connection + createOpts := &ec2.CreateVpcPeeringConnectionInput{ + PeerVpcId: aws.String(d.Get("peer_vpc_id").(string)), + VpcId: aws.String(d.Get("vpc_id").(string)), + } + + if v, ok := d.GetOk("peer_owner_id"); ok { + createOpts.PeerOwnerId = aws.String(v.(string)) + } + + log.Printf("[DEBUG] VPC Peering Create options: %#v", createOpts) + + resp, err := conn.CreateVpcPeeringConnection(createOpts) + if err != nil { + return errwrap.Wrapf("Error creating VPC Peering Connection: {{err}}", err) + } + + // Get the ID and store it + rt := resp.VpcPeeringConnection + d.SetId(*rt.VpcPeeringConnectionId) + log.Printf("[INFO] VPC Peering Connection ID: %s", d.Id()) + + // Wait for the vpc peering connection to become available + log.Printf("[DEBUG] Waiting for VPC Peering Connection (%s) to become available.", d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"initiating-request", "provisioning", "pending"}, + Target: []string{"pending-acceptance", "active"}, + Refresh: resourceAwsVPCPeeringConnectionStateRefreshFunc(conn, d.Id()), + Timeout: 1 * time.Minute, + } + if _, err := stateConf.WaitForState(); err != nil { + return errwrap.Wrapf(fmt.Sprintf( + "Error waiting for VPC Peering Connection (%s) to become available: {{err}}", + d.Id()), err) + } + + return resourceAwsVPCPeeringUpdate(d, meta) +} + +func resourceAwsVPCPeeringRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient) + conn := client.ec2conn + + pcRaw, status, err := resourceAwsVPCPeeringConnectionStateRefreshFunc(conn, d.Id())() + // Allow a failed VPC Peering Connection to fallthrough, + // to allow rest of the logic below to do its work. + if err != nil && status != "failed" { + return err + } + + if pcRaw == nil { + d.SetId("") + return nil + } + + pc := pcRaw.(*ec2.VpcPeeringConnection) + + // The failed status is a status that we can assume just means the + // connection is gone. Destruction isn't allowed, and it eventually + // just "falls off" the console. See GH-2322 + if pc.Status != nil { + status := map[string]bool{ + "deleted": true, + "deleting": true, + "expired": true, + "failed": true, + "rejected": true, + } + if _, ok := status[*pc.Status.Code]; ok { + log.Printf("[DEBUG] VPC Peering Connection (%s) in state (%s), removing.", + d.Id(), *pc.Status.Code) + d.SetId("") + return nil + } + } + log.Printf("[DEBUG] VPC Peering Connection response: %#v", pc) + + log.Printf("[DEBUG] Account ID %s, VPC PeerConn Requester %s, Accepter %s", + client.accountid, *pc.RequesterVpcInfo.OwnerId, *pc.AccepterVpcInfo.OwnerId) + + if (client.accountid == *pc.AccepterVpcInfo.OwnerId) && (client.accountid != *pc.RequesterVpcInfo.OwnerId) { + // We're the accepter + d.Set("peer_owner_id", pc.RequesterVpcInfo.OwnerId) + d.Set("peer_vpc_id", pc.RequesterVpcInfo.VpcId) + d.Set("vpc_id", pc.AccepterVpcInfo.VpcId) + } else { + // We're the requester + d.Set("peer_owner_id", pc.AccepterVpcInfo.OwnerId) + d.Set("peer_vpc_id", pc.AccepterVpcInfo.VpcId) + d.Set("vpc_id", pc.RequesterVpcInfo.VpcId) + } + + d.Set("accept_status", pc.Status.Code) + + // When the VPC Peering Connection is pending acceptance, + // the details about accepter and/or requester peering + // options would not be included in the response. + if pc.AccepterVpcInfo.PeeringOptions != nil { + err := d.Set("accepter", flattenPeeringOptions(pc.AccepterVpcInfo.PeeringOptions)) + if err != nil { + return errwrap.Wrapf("Error setting VPC Peering Connection accepter information: {{err}}", err) + } + } + + if pc.RequesterVpcInfo.PeeringOptions != nil { + err := d.Set("requester", flattenPeeringOptions(pc.RequesterVpcInfo.PeeringOptions)) + if err != nil { + return errwrap.Wrapf("Error setting VPC Peering Connection requester information: {{err}}", err) + } + } + + err = d.Set("tags", tagsToMap(pc.Tags)) + if err != nil { + return errwrap.Wrapf("Error setting VPC Peering Connection tags: {{err}}", err) + } + + return nil +} + +func resourceVPCPeeringConnectionAccept(conn *ec2.EC2, id string) (string, error) { + log.Printf("[INFO] Accept VPC Peering Connection with ID: %s", id) + + req := &ec2.AcceptVpcPeeringConnectionInput{ + VpcPeeringConnectionId: aws.String(id), + } + + resp, err := conn.AcceptVpcPeeringConnection(req) + if err != nil { + return "", err + } + pc := resp.VpcPeeringConnection + + return *pc.Status.Code, nil +} + +func resourceVPCPeeringConnectionOptionsModify(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + modifyOpts := &ec2.ModifyVpcPeeringConnectionOptionsInput{ + VpcPeeringConnectionId: aws.String(d.Id()), + } + + if v, ok := d.GetOk("accepter"); ok { + if s := v.(*schema.Set); len(s.List()) > 0 { + co := s.List()[0].(map[string]interface{}) + modifyOpts.AccepterPeeringConnectionOptions = expandPeeringOptions(co) + } + } + + if v, ok := d.GetOk("requester"); ok { + if s := v.(*schema.Set); len(s.List()) > 0 { + co := s.List()[0].(map[string]interface{}) + modifyOpts.RequesterPeeringConnectionOptions = expandPeeringOptions(co) + } + } + + log.Printf("[DEBUG] VPC Peering Connection modify options: %#v", modifyOpts) + if _, err := conn.ModifyVpcPeeringConnectionOptions(modifyOpts); err != nil { + return err + } + + return nil +} + +func resourceAwsVPCPeeringUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + if err := setTags(conn, d); err != nil { + return err + } else { + d.SetPartial("tags") + } + + pcRaw, _, err := resourceAwsVPCPeeringConnectionStateRefreshFunc(conn, d.Id())() + if err != nil { + return err + } + + if pcRaw == nil { + d.SetId("") + return nil + } + pc := pcRaw.(*ec2.VpcPeeringConnection) + + if _, ok := d.GetOk("auto_accept"); ok { + if pc.Status != nil && *pc.Status.Code == "pending-acceptance" { + status, err := resourceVPCPeeringConnectionAccept(conn, d.Id()) + if err != nil { + return errwrap.Wrapf("Unable to accept VPC Peering Connection: {{err}}", err) + } + log.Printf("[DEBUG] VPC Peering Connection accept status: %s", status) + } + } + + if d.HasChange("accepter") || d.HasChange("requester") { + _, ok := d.GetOk("auto_accept") + if !ok && pc.Status != nil && *pc.Status.Code != "active" { + return fmt.Errorf("Unable to modify peering options. The VPC Peering Connection "+ + "%q is not active. Please set `auto_accept` attribute to `true`, "+ + "or activate VPC Peering Connection manually.", d.Id()) + } + + if err := resourceVPCPeeringConnectionOptionsModify(d, meta); err != nil { + return errwrap.Wrapf("Error modifying VPC Peering Connection options: {{err}}", err) + } + } + + return resourceAwsVPCPeeringRead(d, meta) +} + +func resourceAwsVPCPeeringDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + _, err := conn.DeleteVpcPeeringConnection( + &ec2.DeleteVpcPeeringConnectionInput{ + VpcPeeringConnectionId: aws.String(d.Id()), + }) + + return err +} + +// resourceAwsVPCPeeringConnectionStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// a VPCPeeringConnection. +func resourceAwsVPCPeeringConnectionStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeVpcPeeringConnections(&ec2.DescribeVpcPeeringConnectionsInput{ + VpcPeeringConnectionIds: []*string{aws.String(id)}, + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpcPeeringConnectionID.NotFound" { + resp = nil + } else { + log.Printf("Error reading VPC Peering Connection details: %s", err) + return nil, "error", err + } + } + + if resp == nil { + // Sometimes AWS just has consistency issues and doesn't see + // our instance yet. Return an empty state. + return nil, "", nil + } + + pc := resp.VpcPeeringConnections[0] + + // A VPC Peering Connection can exist in a failed state due to + // incorrect VPC ID, account ID, or overlapping IP address range, + // thus we short circuit before the time out would occur. + if pc != nil && *pc.Status.Code == "failed" { + return nil, "failed", errors.New(*pc.Status.Message) + } + + return pc, *pc.Status.Code, nil + } +} + +func vpcPeeringConnectionOptionsSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_remote_vpc_dns_resolution": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "allow_classic_link_to_remote_vpc": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "allow_vpc_to_remote_classic_link": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + } +} + +func flattenPeeringOptions(options *ec2.VpcPeeringConnectionOptionsDescription) (results []map[string]interface{}) { + m := make(map[string]interface{}) + + if options.AllowDnsResolutionFromRemoteVpc != nil { + m["allow_remote_vpc_dns_resolution"] = *options.AllowDnsResolutionFromRemoteVpc + } + + if options.AllowEgressFromLocalClassicLinkToRemoteVpc != nil { + m["allow_classic_link_to_remote_vpc"] = *options.AllowEgressFromLocalClassicLinkToRemoteVpc + } + + if options.AllowEgressFromLocalVpcToRemoteClassicLink != nil { + m["allow_vpc_to_remote_classic_link"] = *options.AllowEgressFromLocalVpcToRemoteClassicLink + } + + results = append(results, m) + return +} + +func expandPeeringOptions(m map[string]interface{}) *ec2.PeeringConnectionOptionsRequest { + r := &ec2.PeeringConnectionOptionsRequest{} + + if v, ok := m["allow_remote_vpc_dns_resolution"]; ok { + r.AllowDnsResolutionFromRemoteVpc = aws.Bool(v.(bool)) + } + + if v, ok := m["allow_classic_link_to_remote_vpc"]; ok { + r.AllowEgressFromLocalClassicLinkToRemoteVpc = aws.Bool(v.(bool)) + } + + if v, ok := m["allow_vpc_to_remote_classic_link"]; ok { + r.AllowEgressFromLocalVpcToRemoteClassicLink = aws.Bool(v.(bool)) + } + + return r +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_peering_connection_accepter.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_peering_connection_accepter.go new file mode 100644 index 000000000..8b1efff50 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpc_peering_connection_accepter.go @@ -0,0 +1,76 @@ +package aws + +import ( + "errors" + "log" + + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsVpcPeeringConnectionAccepter() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsVPCPeeringAccepterCreate, + Read: resourceAwsVPCPeeringRead, + Update: resourceAwsVPCPeeringUpdate, + Delete: resourceAwsVPCPeeringAccepterDelete, + + Schema: map[string]*schema.Schema{ + "vpc_peering_connection_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Computed: false, + }, + "auto_accept": { + Type: schema.TypeBool, + Optional: true, + }, + "accept_status": { + Type: schema.TypeString, + Computed: true, + }, + "vpc_id": { + Type: schema.TypeString, + Computed: true, + }, + "peer_vpc_id": { + Type: schema.TypeString, + Computed: true, + }, + "peer_owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "accepter": vpcPeeringConnectionOptionsSchema(), + "requester": vpcPeeringConnectionOptionsSchema(), + "tags": tagsSchema(), + }, + } +} + +func resourceAwsVPCPeeringAccepterCreate(d *schema.ResourceData, meta interface{}) error { + id := d.Get("vpc_peering_connection_id").(string) + d.SetId(id) + + if err := resourceAwsVPCPeeringRead(d, meta); err != nil { + return err + } + if d.Id() == "" { + return fmt.Errorf("VPC Peering Connection %q not found", id) + } + + // Ensure that this IS as cross-account VPC peering connection. + if d.Get("peer_owner_id").(string) == meta.(*AWSClient).accountid { + return errors.New("aws_vpc_peering_connection_accepter can only adopt into management cross-account VPC peering connections") + } + + return resourceAwsVPCPeeringUpdate(d, meta) +} + +func resourceAwsVPCPeeringAccepterDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARN] Will not delete VPC peering connection. Terraform will remove this resource from the state file, however resources may remain.") + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_connection.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_connection.go new file mode 100644 index 000000000..1bef00d3b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_connection.go @@ -0,0 +1,484 @@ +package aws + +import ( + "bytes" + "encoding/xml" + "fmt" + "log" + "sort" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +type XmlVpnConnectionConfig struct { + Tunnels []XmlIpsecTunnel `xml:"ipsec_tunnel"` +} + +type XmlIpsecTunnel struct { + OutsideAddress string `xml:"vpn_gateway>tunnel_outside_address>ip_address"` + PreSharedKey string `xml:"ike>pre_shared_key"` + CgwInsideAddress string `xml:"customer_gateway>tunnel_inside_address>ip_address"` + VgwInsideAddress string `xml:"vpn_gateway>tunnel_inside_address>ip_address"` +} + +type TunnelInfo struct { + Tunnel1Address string + Tunnel1CgwInsideAddress string + Tunnel1VgwInsideAddress string + Tunnel1PreSharedKey string + Tunnel2Address string + Tunnel2CgwInsideAddress string + Tunnel2VgwInsideAddress string + Tunnel2PreSharedKey string +} + +func (slice XmlVpnConnectionConfig) Len() int { + return len(slice.Tunnels) +} + +func (slice XmlVpnConnectionConfig) Less(i, j int) bool { + return slice.Tunnels[i].OutsideAddress < slice.Tunnels[j].OutsideAddress +} + +func (slice XmlVpnConnectionConfig) Swap(i, j int) { + slice.Tunnels[i], slice.Tunnels[j] = slice.Tunnels[j], slice.Tunnels[i] +} + +func resourceAwsVpnConnection() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsVpnConnectionCreate, + Read: resourceAwsVpnConnectionRead, + Update: resourceAwsVpnConnectionUpdate, + Delete: resourceAwsVpnConnectionDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "vpn_gateway_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "customer_gateway_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "static_routes_only": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "tags": tagsSchema(), + + // Begin read only attributes + "customer_gateway_configuration": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + + "tunnel1_address": { + Type: schema.TypeString, + Computed: true, + }, + + "tunnel1_cgw_inside_address": { + Type: schema.TypeString, + Computed: true, + }, + + "tunnel1_vgw_inside_address": { + Type: schema.TypeString, + Computed: true, + }, + + "tunnel1_preshared_key": { + Type: schema.TypeString, + Computed: true, + }, + + "tunnel2_address": { + Type: schema.TypeString, + Computed: true, + }, + + "tunnel2_cgw_inside_address": { + Type: schema.TypeString, + Computed: true, + }, + + "tunnel2_vgw_inside_address": { + Type: schema.TypeString, + Computed: true, + }, + + "tunnel2_preshared_key": { + Type: schema.TypeString, + Computed: true, + }, + + "routes": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "destination_cidr_block": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + + "source": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["destination_cidr_block"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["source"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["state"].(string))) + return hashcode.String(buf.String()) + }, + }, + + "vgw_telemetry": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accepted_route_count": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + }, + + "last_status_change": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + + "outside_ip_address": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + + "status_message": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["outside_ip_address"].(string))) + return hashcode.String(buf.String()) + }, + }, + }, + } +} + +func resourceAwsVpnConnectionCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + connectOpts := &ec2.VpnConnectionOptionsSpecification{ + StaticRoutesOnly: aws.Bool(d.Get("static_routes_only").(bool)), + } + + createOpts := &ec2.CreateVpnConnectionInput{ + CustomerGatewayId: aws.String(d.Get("customer_gateway_id").(string)), + Options: connectOpts, + Type: aws.String(d.Get("type").(string)), + VpnGatewayId: aws.String(d.Get("vpn_gateway_id").(string)), + } + + // Create the VPN Connection + log.Printf("[DEBUG] Creating vpn connection") + resp, err := conn.CreateVpnConnection(createOpts) + if err != nil { + return fmt.Errorf("Error creating vpn connection: %s", err) + } + + // Store the ID + vpnConnection := resp.VpnConnection + d.SetId(*vpnConnection.VpnConnectionId) + log.Printf("[INFO] VPN connection ID: %s", *vpnConnection.VpnConnectionId) + + // Wait for the connection to become available. This has an obscenely + // high default timeout because AWS VPN connections are notoriously + // slow at coming up or going down. There's also no point in checking + // more frequently than every ten seconds. + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"available"}, + Refresh: vpnConnectionRefreshFunc(conn, *vpnConnection.VpnConnectionId), + Timeout: 30 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + _, stateErr := stateConf.WaitForState() + if stateErr != nil { + return fmt.Errorf( + "Error waiting for VPN connection (%s) to become ready: %s", + *vpnConnection.VpnConnectionId, err) + } + + // Create tags. + if err := setTags(conn, d); err != nil { + return err + } + + // Read off the API to populate our RO fields. + return resourceAwsVpnConnectionRead(d, meta) +} + +func vpnConnectionRefreshFunc(conn *ec2.EC2, connectionId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeVpnConnections(&ec2.DescribeVpnConnectionsInput{ + VpnConnectionIds: []*string{aws.String(connectionId)}, + }) + + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnConnectionID.NotFound" { + resp = nil + } else { + log.Printf("Error on VPNConnectionRefresh: %s", err) + return nil, "", err + } + } + + if resp == nil || len(resp.VpnConnections) == 0 { + return nil, "", nil + } + + connection := resp.VpnConnections[0] + return connection, *connection.State, nil + } +} + +func resourceAwsVpnConnectionRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + resp, err := conn.DescribeVpnConnections(&ec2.DescribeVpnConnectionsInput{ + VpnConnectionIds: []*string{aws.String(d.Id())}, + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnConnectionID.NotFound" { + d.SetId("") + return nil + } else { + log.Printf("[ERROR] Error finding VPN connection: %s", err) + return err + } + } + + if len(resp.VpnConnections) != 1 { + return fmt.Errorf("[ERROR] Error finding VPN connection: %s", d.Id()) + } + + vpnConnection := resp.VpnConnections[0] + if vpnConnection == nil || *vpnConnection.State == "deleted" { + // Seems we have lost our VPN Connection + d.SetId("") + return nil + } + + // Set attributes under the user's control. + d.Set("vpn_gateway_id", vpnConnection.VpnGatewayId) + d.Set("customer_gateway_id", vpnConnection.CustomerGatewayId) + d.Set("type", vpnConnection.Type) + d.Set("tags", tagsToMap(vpnConnection.Tags)) + + if vpnConnection.Options != nil { + if err := d.Set("static_routes_only", vpnConnection.Options.StaticRoutesOnly); err != nil { + return err + } + } else { + //If there no Options on the connection then we do not support *static_routes* + d.Set("static_routes_only", false) + } + + // Set read only attributes. + d.Set("customer_gateway_configuration", vpnConnection.CustomerGatewayConfiguration) + + if vpnConnection.CustomerGatewayConfiguration != nil { + if tunnelInfo, err := xmlConfigToTunnelInfo(*vpnConnection.CustomerGatewayConfiguration); err != nil { + log.Printf("[ERR] Error unmarshaling XML configuration for (%s): %s", d.Id(), err) + } else { + d.Set("tunnel1_address", tunnelInfo.Tunnel1Address) + d.Set("tunnel1_cgw_inside_address", tunnelInfo.Tunnel1CgwInsideAddress) + d.Set("tunnel1_vgw_inside_address", tunnelInfo.Tunnel1VgwInsideAddress) + d.Set("tunnel1_preshared_key", tunnelInfo.Tunnel1PreSharedKey) + d.Set("tunnel2_address", tunnelInfo.Tunnel2Address) + d.Set("tunnel2_preshared_key", tunnelInfo.Tunnel2PreSharedKey) + d.Set("tunnel2_cgw_inside_address", tunnelInfo.Tunnel2CgwInsideAddress) + d.Set("tunnel2_vgw_inside_address", tunnelInfo.Tunnel2VgwInsideAddress) + } + } + + if err := d.Set("vgw_telemetry", telemetryToMapList(vpnConnection.VgwTelemetry)); err != nil { + return err + } + if err := d.Set("routes", routesToMapList(vpnConnection.Routes)); err != nil { + return err + } + + return nil +} + +func resourceAwsVpnConnectionUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + // Update tags if required. + if err := setTags(conn, d); err != nil { + return err + } + + d.SetPartial("tags") + + return resourceAwsVpnConnectionRead(d, meta) +} + +func resourceAwsVpnConnectionDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + _, err := conn.DeleteVpnConnection(&ec2.DeleteVpnConnectionInput{ + VpnConnectionId: aws.String(d.Id()), + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnConnectionID.NotFound" { + d.SetId("") + return nil + } else { + log.Printf("[ERROR] Error deleting VPN connection: %s", err) + return err + } + } + + // These things can take quite a while to tear themselves down and any + // attempt to modify resources they reference (e.g. CustomerGateways or + // VPN Gateways) before deletion will result in an error. Furthermore, + // they don't just disappear. The go into "deleted" state. We need to + // wait to ensure any other modifications the user might make to their + // VPC stack can safely run. + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting"}, + Target: []string{"deleted"}, + Refresh: vpnConnectionRefreshFunc(conn, d.Id()), + Timeout: 30 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + _, stateErr := stateConf.WaitForState() + if stateErr != nil { + return fmt.Errorf( + "Error waiting for VPN connection (%s) to delete: %s", d.Id(), err) + } + + return nil +} + +// routesToMapList turns the list of routes into a list of maps. +func routesToMapList(routes []*ec2.VpnStaticRoute) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(routes)) + for _, r := range routes { + staticRoute := make(map[string]interface{}) + staticRoute["destination_cidr_block"] = *r.DestinationCidrBlock + staticRoute["state"] = *r.State + + if r.Source != nil { + staticRoute["source"] = *r.Source + } + + result = append(result, staticRoute) + } + + return result +} + +// telemetryToMapList turns the VGW telemetry into a list of maps. +func telemetryToMapList(telemetry []*ec2.VgwTelemetry) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(telemetry)) + for _, t := range telemetry { + vgw := make(map[string]interface{}) + vgw["accepted_route_count"] = *t.AcceptedRouteCount + vgw["outside_ip_address"] = *t.OutsideIpAddress + vgw["status"] = *t.Status + vgw["status_message"] = *t.StatusMessage + + // LastStatusChange is a time.Time(). Convert it into a string + // so it can be handled by schema's type system. + vgw["last_status_change"] = t.LastStatusChange.String() + result = append(result, vgw) + } + + return result +} + +func xmlConfigToTunnelInfo(xmlConfig string) (*TunnelInfo, error) { + var vpnConfig XmlVpnConnectionConfig + if err := xml.Unmarshal([]byte(xmlConfig), &vpnConfig); err != nil { + return nil, errwrap.Wrapf("Error Unmarshalling XML: {{err}}", err) + } + + // don't expect consistent ordering from the XML + sort.Sort(vpnConfig) + + tunnelInfo := TunnelInfo{ + Tunnel1Address: vpnConfig.Tunnels[0].OutsideAddress, + Tunnel1PreSharedKey: vpnConfig.Tunnels[0].PreSharedKey, + Tunnel1CgwInsideAddress: vpnConfig.Tunnels[0].CgwInsideAddress, + Tunnel1VgwInsideAddress: vpnConfig.Tunnels[0].VgwInsideAddress, + + Tunnel2Address: vpnConfig.Tunnels[1].OutsideAddress, + Tunnel2PreSharedKey: vpnConfig.Tunnels[1].PreSharedKey, + Tunnel2CgwInsideAddress: vpnConfig.Tunnels[1].CgwInsideAddress, + Tunnel2VgwInsideAddress: vpnConfig.Tunnels[1].VgwInsideAddress, + } + + return &tunnelInfo, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway.go new file mode 100644 index 000000000..0c40d8c8c --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway.go @@ -0,0 +1,326 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsVpnGateway() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsVpnGatewayCreate, + Read: resourceAwsVpnGatewayRead, + Update: resourceAwsVpnGatewayUpdate, + Delete: resourceAwsVpnGatewayDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "availability_zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsVpnGatewayCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + createOpts := &ec2.CreateVpnGatewayInput{ + AvailabilityZone: aws.String(d.Get("availability_zone").(string)), + Type: aws.String("ipsec.1"), + } + + // Create the VPN gateway + log.Printf("[DEBUG] Creating VPN gateway") + resp, err := conn.CreateVpnGateway(createOpts) + if err != nil { + return fmt.Errorf("Error creating VPN gateway: %s", err) + } + + // Get the ID and store it + vpnGateway := resp.VpnGateway + d.SetId(*vpnGateway.VpnGatewayId) + log.Printf("[INFO] VPN Gateway ID: %s", *vpnGateway.VpnGatewayId) + + // Attach the VPN gateway to the correct VPC + return resourceAwsVpnGatewayUpdate(d, meta) +} + +func resourceAwsVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + resp, err := conn.DescribeVpnGateways(&ec2.DescribeVpnGatewaysInput{ + VpnGatewayIds: []*string{aws.String(d.Id())}, + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnGatewayID.NotFound" { + d.SetId("") + return nil + } else { + log.Printf("[ERROR] Error finding VpnGateway: %s", err) + return err + } + } + + vpnGateway := resp.VpnGateways[0] + if vpnGateway == nil || *vpnGateway.State == "deleted" { + // Seems we have lost our VPN gateway + d.SetId("") + return nil + } + + vpnAttachment := vpnGatewayGetAttachment(vpnGateway) + if len(vpnGateway.VpcAttachments) == 0 || *vpnAttachment.State == "detached" { + // Gateway exists but not attached to the VPC + d.Set("vpc_id", "") + } else { + d.Set("vpc_id", *vpnAttachment.VpcId) + } + + if vpnGateway.AvailabilityZone != nil && *vpnGateway.AvailabilityZone != "" { + d.Set("availability_zone", vpnGateway.AvailabilityZone) + } + d.Set("tags", tagsToMap(vpnGateway.Tags)) + + return nil +} + +func resourceAwsVpnGatewayUpdate(d *schema.ResourceData, meta interface{}) error { + if d.HasChange("vpc_id") { + // If we're already attached, detach it first + if err := resourceAwsVpnGatewayDetach(d, meta); err != nil { + return err + } + + // Attach the VPN gateway to the new vpc + if err := resourceAwsVpnGatewayAttach(d, meta); err != nil { + return err + } + } + + conn := meta.(*AWSClient).ec2conn + + if err := setTags(conn, d); err != nil { + return err + } + + d.SetPartial("tags") + + return resourceAwsVpnGatewayRead(d, meta) +} + +func resourceAwsVpnGatewayDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + // Detach if it is attached + if err := resourceAwsVpnGatewayDetach(d, meta); err != nil { + return err + } + + log.Printf("[INFO] Deleting VPN gateway: %s", d.Id()) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteVpnGateway(&ec2.DeleteVpnGatewayInput{ + VpnGatewayId: aws.String(d.Id()), + }) + if err == nil { + return nil + } + + ec2err, ok := err.(awserr.Error) + if !ok { + return resource.RetryableError(err) + } + + switch ec2err.Code() { + case "InvalidVpnGatewayID.NotFound": + return nil + case "IncorrectState": + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + }) +} + +func resourceAwsVpnGatewayAttach(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + if d.Get("vpc_id").(string) == "" { + log.Printf( + "[DEBUG] Not attaching VPN Gateway '%s' as no VPC ID is set", + d.Id()) + return nil + } + + log.Printf( + "[INFO] Attaching VPN Gateway '%s' to VPC '%s'", + d.Id(), + d.Get("vpc_id").(string)) + + req := &ec2.AttachVpnGatewayInput{ + VpnGatewayId: aws.String(d.Id()), + VpcId: aws.String(d.Get("vpc_id").(string)), + } + + err := resource.Retry(30*time.Second, func() *resource.RetryError { + _, err := conn.AttachVpnGateway(req) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok { + if "InvalidVpnGatewayID.NotFound" == ec2err.Code() { + return resource.RetryableError( + fmt.Errorf("Gateway not found, retry for eventual consistancy")) + } + } + return resource.NonRetryableError(err) + } + return nil + }) + + if err != nil { + return err + } + + // Wait for it to be fully attached before continuing + log.Printf("[DEBUG] Waiting for VPN gateway (%s) to attach", d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"detached", "attaching"}, + Target: []string{"attached"}, + Refresh: vpnGatewayAttachStateRefreshFunc(conn, d.Id(), "available"), + Timeout: 10 * time.Minute, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for VPN gateway (%s) to attach: %s", + d.Id(), err) + } + + return nil +} + +func resourceAwsVpnGatewayDetach(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + // Get the old VPC ID to detach from + vpcID, _ := d.GetChange("vpc_id") + + if vpcID.(string) == "" { + log.Printf( + "[DEBUG] Not detaching VPN Gateway '%s' as no VPC ID is set", + d.Id()) + return nil + } + + log.Printf( + "[INFO] Detaching VPN Gateway '%s' from VPC '%s'", + d.Id(), + vpcID.(string)) + + wait := true + _, err := conn.DetachVpnGateway(&ec2.DetachVpnGatewayInput{ + VpnGatewayId: aws.String(d.Id()), + VpcId: aws.String(vpcID.(string)), + }) + if err != nil { + ec2err, ok := err.(awserr.Error) + if ok { + if ec2err.Code() == "InvalidVpnGatewayID.NotFound" { + err = nil + wait = false + } else if ec2err.Code() == "InvalidVpnGatewayAttachment.NotFound" { + err = nil + wait = false + } + } + + if err != nil { + return err + } + } + + if !wait { + return nil + } + + // Wait for it to be fully detached before continuing + log.Printf("[DEBUG] Waiting for VPN gateway (%s) to detach", d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"attached", "detaching", "available"}, + Target: []string{"detached"}, + Refresh: vpnGatewayAttachStateRefreshFunc(conn, d.Id(), "detached"), + Timeout: 10 * time.Minute, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf( + "Error waiting for vpn gateway (%s) to detach: %s", + d.Id(), err) + } + + return nil +} + +// vpnGatewayAttachStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// the state of a VPN gateway's attachment +func vpnGatewayAttachStateRefreshFunc(conn *ec2.EC2, id string, expected string) resource.StateRefreshFunc { + var start time.Time + return func() (interface{}, string, error) { + if start.IsZero() { + start = time.Now() + } + + resp, err := conn.DescribeVpnGateways(&ec2.DescribeVpnGatewaysInput{ + VpnGatewayIds: []*string{aws.String(id)}, + }) + + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnGatewayID.NotFound" { + resp = nil + } else { + log.Printf("[ERROR] Error on VpnGatewayStateRefresh: %s", err) + return nil, "", err + } + } + + if resp == nil { + // Sometimes AWS just has consistency issues and doesn't see + // our instance yet. Return an empty state. + return nil, "", nil + } + + vpnGateway := resp.VpnGateways[0] + if len(vpnGateway.VpcAttachments) == 0 { + // No attachments, we're detached + return vpnGateway, "detached", nil + } + + vpnAttachment := vpnGatewayGetAttachment(vpnGateway) + return vpnGateway, *vpnAttachment.State, nil + } +} + +func vpnGatewayGetAttachment(vgw *ec2.VpnGateway) *ec2.VpcAttachment { + for _, v := range vgw.VpcAttachments { + if *v.State == "attached" { + return v + } + } + return &ec2.VpcAttachment{State: aws.String("detached")} +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway_attachment.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway_attachment.go new file mode 100644 index 000000000..db0110000 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway_attachment.go @@ -0,0 +1,210 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsVpnGatewayAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsVpnGatewayAttachmentCreate, + Read: resourceAwsVpnGatewayAttachmentRead, + Delete: resourceAwsVpnGatewayAttachmentDelete, + + Schema: map[string]*schema.Schema{ + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "vpn_gateway_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsVpnGatewayAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + vpcId := d.Get("vpc_id").(string) + vgwId := d.Get("vpn_gateway_id").(string) + + createOpts := &ec2.AttachVpnGatewayInput{ + VpcId: aws.String(vpcId), + VpnGatewayId: aws.String(vgwId), + } + log.Printf("[DEBUG] VPN Gateway attachment options: %#v", *createOpts) + + _, err := conn.AttachVpnGateway(createOpts) + if err != nil { + return fmt.Errorf("Error attaching VPN Gateway %q to VPC %q: %s", + vgwId, vpcId, err) + } + + d.SetId(vpnGatewayAttachmentId(vpcId, vgwId)) + log.Printf("[INFO] VPN Gateway %q attachment ID: %s", vgwId, d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"detached", "attaching"}, + Target: []string{"attached"}, + Refresh: vpnGatewayAttachmentStateRefresh(conn, vpcId, vgwId), + Timeout: 15 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 5 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for VPN Gateway %q to attach to VPC %q: %s", + vgwId, vpcId, err) + } + log.Printf("[DEBUG] VPN Gateway %q attached to VPC %q.", vgwId, vpcId) + + return resourceAwsVpnGatewayAttachmentRead(d, meta) +} + +func resourceAwsVpnGatewayAttachmentRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + vgwId := d.Get("vpn_gateway_id").(string) + + resp, err := conn.DescribeVpnGateways(&ec2.DescribeVpnGatewaysInput{ + VpnGatewayIds: []*string{aws.String(vgwId)}, + }) + + if err != nil { + awsErr, ok := err.(awserr.Error) + if ok && awsErr.Code() == "InvalidVPNGatewayID.NotFound" { + log.Printf("[WARN] VPN Gateway %q not found.", vgwId) + d.SetId("") + return nil + } + return err + } + + vgw := resp.VpnGateways[0] + if *vgw.State == "deleted" { + log.Printf("[INFO] VPN Gateway %q appears to have been deleted.", vgwId) + d.SetId("") + return nil + } + + vga := vpnGatewayGetAttachment(vgw) + if len(vgw.VpcAttachments) == 0 || *vga.State == "detached" { + d.Set("vpc_id", "") + return nil + } + + d.Set("vpc_id", *vga.VpcId) + return nil +} + +func resourceAwsVpnGatewayAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + vpcId := d.Get("vpc_id").(string) + vgwId := d.Get("vpn_gateway_id").(string) + + if vpcId == "" { + log.Printf("[DEBUG] Not detaching VPN Gateway %q as no VPC ID is set.", vgwId) + return nil + } + + _, err := conn.DetachVpnGateway(&ec2.DetachVpnGatewayInput{ + VpcId: aws.String(vpcId), + VpnGatewayId: aws.String(vgwId), + }) + + if err != nil { + awsErr, ok := err.(awserr.Error) + if ok { + switch awsErr.Code() { + case "InvalidVPNGatewayID.NotFound": + log.Printf("[WARN] VPN Gateway %q not found.", vgwId) + d.SetId("") + return nil + case "InvalidVpnGatewayAttachment.NotFound": + log.Printf( + "[WARN] VPN Gateway %q attachment to VPC %q not found.", + vgwId, vpcId) + d.SetId("") + return nil + } + } + + return fmt.Errorf("Error detaching VPN Gateway %q from VPC %q: %s", + vgwId, vpcId, err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"attached", "detaching"}, + Target: []string{"detached"}, + Refresh: vpnGatewayAttachmentStateRefresh(conn, vpcId, vgwId), + Timeout: 15 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 5 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for VPN Gateway %q to detach from VPC %q: %s", + vgwId, vpcId, err) + } + log.Printf("[DEBUG] VPN Gateway %q detached from VPC %q.", vgwId, vpcId) + + d.SetId("") + return nil +} + +func vpnGatewayAttachmentStateRefresh(conn *ec2.EC2, vpcId, vgwId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeVpnGateways(&ec2.DescribeVpnGatewaysInput{ + Filters: []*ec2.Filter{ + &ec2.Filter{ + Name: aws.String("attachment.vpc-id"), + Values: []*string{aws.String(vpcId)}, + }, + }, + VpnGatewayIds: []*string{aws.String(vgwId)}, + }) + + if err != nil { + awsErr, ok := err.(awserr.Error) + if ok { + switch awsErr.Code() { + case "InvalidVPNGatewayID.NotFound": + fallthrough + case "InvalidVpnGatewayAttachment.NotFound": + return nil, "", nil + } + } + + return nil, "", err + } + + vgw := resp.VpnGateways[0] + if len(vgw.VpcAttachments) == 0 { + return vgw, "detached", nil + } + + vga := vpnGatewayGetAttachment(vgw) + + log.Printf("[DEBUG] VPN Gateway %q attachment status: %s", vgwId, *vga.State) + return vgw, *vga.State, nil + } +} + +func vpnGatewayAttachmentId(vpcId, vgwId string) string { + return fmt.Sprintf("vpn-attachment-%x", hashcode.String(fmt.Sprintf("%s-%s", vpcId, vgwId))) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway_route_propagation.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway_route_propagation.go new file mode 100644 index 000000000..46e4b2208 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_vpn_gateway_route_propagation.go @@ -0,0 +1,102 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsVpnGatewayRoutePropagation() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsVpnGatewayRoutePropagationEnable, + Read: resourceAwsVpnGatewayRoutePropagationRead, + Delete: resourceAwsVpnGatewayRoutePropagationDisable, + + Schema: map[string]*schema.Schema{ + "vpn_gateway_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "route_table_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsVpnGatewayRoutePropagationEnable(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + gwID := d.Get("vpn_gateway_id").(string) + rtID := d.Get("route_table_id").(string) + + log.Printf("[INFO] Enabling VGW propagation from %s to %s", gwID, rtID) + _, err := conn.EnableVgwRoutePropagation(&ec2.EnableVgwRoutePropagationInput{ + GatewayId: aws.String(gwID), + RouteTableId: aws.String(rtID), + }) + if err != nil { + return fmt.Errorf("error enabling VGW propagation: %s", err) + } + + d.SetId(fmt.Sprintf("%s_%s", gwID, rtID)) + return nil +} + +func resourceAwsVpnGatewayRoutePropagationDisable(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + gwID := d.Get("vpn_gateway_id").(string) + rtID := d.Get("route_table_id").(string) + + log.Printf("[INFO] Disabling VGW propagation from %s to %s", gwID, rtID) + _, err := conn.DisableVgwRoutePropagation(&ec2.DisableVgwRoutePropagationInput{ + GatewayId: aws.String(gwID), + RouteTableId: aws.String(rtID), + }) + if err != nil { + return fmt.Errorf("error disabling VGW propagation: %s", err) + } + + d.SetId("") + return nil +} + +func resourceAwsVpnGatewayRoutePropagationRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + gwID := d.Get("vpn_gateway_id").(string) + rtID := d.Get("route_table_id").(string) + + log.Printf("[INFO] Reading route table %s to check for VPN gateway %s", rtID, gwID) + rtRaw, _, err := resourceAwsRouteTableStateRefreshFunc(conn, rtID)() + if err != nil { + return err + } + if rtRaw == nil { + log.Printf("[INFO] Route table %q doesn't exist, so dropping %q route propagation from state", rtID, gwID) + d.SetId("") + return nil + } + + rt := rtRaw.(*ec2.RouteTable) + exists := false + for _, vgw := range rt.PropagatingVgws { + if *vgw.GatewayId == gwID { + exists = true + } + } + if !exists { + log.Printf("[INFO] %s is no longer propagating to %s, so dropping route propagation from state", rtID, gwID) + d.SetId("") + return nil + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_byte_match_set.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_byte_match_set.go new file mode 100644 index 000000000..53f3e93b8 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_byte_match_set.go @@ -0,0 +1,249 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsWafByteMatchSet() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsWafByteMatchSetCreate, + Read: resourceAwsWafByteMatchSetRead, + Update: resourceAwsWafByteMatchSetUpdate, + Delete: resourceAwsWafByteMatchSetDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "byte_match_tuples": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_to_match": { + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data": { + Type: schema.TypeString, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "positional_constraint": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "target_string": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "text_transformation": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsWafByteMatchSetCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + log.Printf("[INFO] Creating ByteMatchSet: %s", d.Get("name").(string)) + + wr := newWafRetryer(conn, "global") + out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + params := &waf.CreateByteMatchSetInput{ + ChangeToken: token, + Name: aws.String(d.Get("name").(string)), + } + return conn.CreateByteMatchSet(params) + }) + if err != nil { + return errwrap.Wrapf("[ERROR] Error creating ByteMatchSet: {{err}}", err) + } + resp := out.(*waf.CreateByteMatchSetOutput) + + d.SetId(*resp.ByteMatchSet.ByteMatchSetId) + + return resourceAwsWafByteMatchSetUpdate(d, meta) +} + +func resourceAwsWafByteMatchSetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + log.Printf("[INFO] Reading ByteMatchSet: %s", d.Get("name").(string)) + params := &waf.GetByteMatchSetInput{ + ByteMatchSetId: aws.String(d.Id()), + } + + resp, err := conn.GetByteMatchSet(params) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { + log.Printf("[WARN] WAF IPSet (%s) not found, error code (404)", d.Id()) + d.SetId("") + return nil + } + + return err + } + + d.Set("name", resp.ByteMatchSet.Name) + d.Set("byte_match_tuples", flattenWafByteMatchTuples(resp.ByteMatchSet.ByteMatchTuples)) + + return nil +} + +func resourceAwsWafByteMatchSetUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + log.Printf("[INFO] Updating ByteMatchSet: %s", d.Get("name").(string)) + + if d.HasChange("byte_match_tuples") { + o, n := d.GetChange("byte_match_tuples") + oldT, newT := o.(*schema.Set).List(), n.(*schema.Set).List() + err := updateByteMatchSetResource(d.Id(), oldT, newT, conn) + if err != nil { + return errwrap.Wrapf("[ERROR] Error updating ByteMatchSet: {{err}}", err) + } + } + + return resourceAwsWafByteMatchSetRead(d, meta) +} + +func resourceAwsWafByteMatchSetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + oldTuples := d.Get("byte_match_tuples").(*schema.Set).List() + if len(oldTuples) > 0 { + noTuples := []interface{}{} + err := updateByteMatchSetResource(d.Id(), oldTuples, noTuples, conn) + if err != nil { + return fmt.Errorf("Error updating ByteMatchSet: %s", err) + } + } + + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.DeleteByteMatchSetInput{ + ChangeToken: token, + ByteMatchSetId: aws.String(d.Id()), + } + log.Printf("[INFO] Deleting WAF ByteMatchSet: %s", req) + return conn.DeleteByteMatchSet(req) + }) + if err != nil { + return errwrap.Wrapf("[ERROR] Error deleting ByteMatchSet: {{err}}", err) + } + + return nil +} + +func updateByteMatchSetResource(id string, oldT, newT []interface{}, conn *waf.WAF) error { + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateByteMatchSetInput{ + ChangeToken: token, + ByteMatchSetId: aws.String(id), + Updates: diffWafByteMatchSetTuples(oldT, newT), + } + + return conn.UpdateByteMatchSet(req) + }) + if err != nil { + return errwrap.Wrapf("[ERROR] Error updating ByteMatchSet: {{err}}", err) + } + + return nil +} + +func flattenWafByteMatchTuples(bmt []*waf.ByteMatchTuple) []interface{} { + out := make([]interface{}, len(bmt), len(bmt)) + for i, t := range bmt { + m := make(map[string]interface{}) + + if t.FieldToMatch != nil { + m["field_to_match"] = flattenFieldToMatch(t.FieldToMatch) + } + m["positional_constraint"] = *t.PositionalConstraint + m["target_string"] = string(t.TargetString) + m["text_transformation"] = *t.TextTransformation + + out[i] = m + } + return out +} + +func expandFieldToMatch(d map[string]interface{}) *waf.FieldToMatch { + return &waf.FieldToMatch{ + Type: aws.String(d["type"].(string)), + Data: aws.String(d["data"].(string)), + } +} + +func flattenFieldToMatch(fm *waf.FieldToMatch) []interface{} { + m := make(map[string]interface{}) + if fm.Data != nil { + m["data"] = *fm.Data + } + if fm.Type != nil { + m["type"] = *fm.Type + } + return []interface{}{m} +} + +func diffWafByteMatchSetTuples(oldT, newT []interface{}) []*waf.ByteMatchSetUpdate { + updates := make([]*waf.ByteMatchSetUpdate, 0) + + for _, ot := range oldT { + tuple := ot.(map[string]interface{}) + + if idx, contains := sliceContainsMap(newT, tuple); contains { + newT = append(newT[:idx], newT[idx+1:]...) + continue + } + + updates = append(updates, &waf.ByteMatchSetUpdate{ + Action: aws.String(waf.ChangeActionDelete), + ByteMatchTuple: &waf.ByteMatchTuple{ + FieldToMatch: expandFieldToMatch(tuple["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), + PositionalConstraint: aws.String(tuple["positional_constraint"].(string)), + TargetString: []byte(tuple["target_string"].(string)), + TextTransformation: aws.String(tuple["text_transformation"].(string)), + }, + }) + } + + for _, nt := range newT { + tuple := nt.(map[string]interface{}) + + updates = append(updates, &waf.ByteMatchSetUpdate{ + Action: aws.String(waf.ChangeActionInsert), + ByteMatchTuple: &waf.ByteMatchTuple{ + FieldToMatch: expandFieldToMatch(tuple["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), + PositionalConstraint: aws.String(tuple["positional_constraint"].(string)), + TargetString: []byte(tuple["target_string"].(string)), + TextTransformation: aws.String(tuple["text_transformation"].(string)), + }, + }) + } + return updates +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_ipset.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_ipset.go new file mode 100644 index 000000000..40ef54ff3 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_ipset.go @@ -0,0 +1,195 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsWafIPSet() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsWafIPSetCreate, + Read: resourceAwsWafIPSetRead, + Update: resourceAwsWafIPSetUpdate, + Delete: resourceAwsWafIPSetDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "ip_set_descriptors": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsWafIPSetCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + wr := newWafRetryer(conn, "global") + out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + params := &waf.CreateIPSetInput{ + ChangeToken: token, + Name: aws.String(d.Get("name").(string)), + } + return conn.CreateIPSet(params) + }) + if err != nil { + return err + } + resp := out.(*waf.CreateIPSetOutput) + d.SetId(*resp.IPSet.IPSetId) + return resourceAwsWafIPSetUpdate(d, meta) +} + +func resourceAwsWafIPSetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + params := &waf.GetIPSetInput{ + IPSetId: aws.String(d.Id()), + } + + resp, err := conn.GetIPSet(params) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { + log.Printf("[WARN] WAF IPSet (%s) not found, error code (404)", d.Id()) + d.SetId("") + return nil + } + + return err + } + + var descriptors []map[string]interface{} + + for _, descriptor := range resp.IPSet.IPSetDescriptors { + d := map[string]interface{}{ + "type": *descriptor.Type, + "value": *descriptor.Value, + } + descriptors = append(descriptors, d) + } + + d.Set("ip_set_descriptors", descriptors) + + d.Set("name", resp.IPSet.Name) + + return nil +} + +func resourceAwsWafIPSetUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + if d.HasChange("ip_set_descriptors") { + o, n := d.GetChange("ip_set_descriptors") + oldD, newD := o.(*schema.Set).List(), n.(*schema.Set).List() + + err := updateWafIpSetDescriptors(d.Id(), oldD, newD, conn) + if err != nil { + return fmt.Errorf("Error Updating WAF IPSet: %s", err) + } + } + + return resourceAwsWafIPSetRead(d, meta) +} + +func resourceAwsWafIPSetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + oldDescriptors := d.Get("ip_set_descriptors").(*schema.Set).List() + + if len(oldDescriptors) > 0 { + noDescriptors := []interface{}{} + err := updateWafIpSetDescriptors(d.Id(), oldDescriptors, noDescriptors, conn) + if err != nil { + return fmt.Errorf("Error updating IPSetDescriptors: %s", err) + } + } + + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.DeleteIPSetInput{ + ChangeToken: token, + IPSetId: aws.String(d.Id()), + } + log.Printf("[INFO] Deleting WAF IPSet") + return conn.DeleteIPSet(req) + }) + if err != nil { + return fmt.Errorf("Error Deleting WAF IPSet: %s", err) + } + + return nil +} + +func updateWafIpSetDescriptors(id string, oldD, newD []interface{}, conn *waf.WAF) error { + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateIPSetInput{ + ChangeToken: token, + IPSetId: aws.String(id), + Updates: diffWafIpSetDescriptors(oldD, newD), + } + log.Printf("[INFO] Updating IPSet descriptors: %s", req) + return conn.UpdateIPSet(req) + }) + if err != nil { + return fmt.Errorf("Error Updating WAF IPSet: %s", err) + } + + return nil +} + +func diffWafIpSetDescriptors(oldD, newD []interface{}) []*waf.IPSetUpdate { + updates := make([]*waf.IPSetUpdate, 0) + + for _, od := range oldD { + descriptor := od.(map[string]interface{}) + + if idx, contains := sliceContainsMap(newD, descriptor); contains { + newD = append(newD[:idx], newD[idx+1:]...) + continue + } + + updates = append(updates, &waf.IPSetUpdate{ + Action: aws.String(waf.ChangeActionDelete), + IPSetDescriptor: &waf.IPSetDescriptor{ + Type: aws.String(descriptor["type"].(string)), + Value: aws.String(descriptor["value"].(string)), + }, + }) + } + + for _, nd := range newD { + descriptor := nd.(map[string]interface{}) + + updates = append(updates, &waf.IPSetUpdate{ + Action: aws.String(waf.ChangeActionInsert), + IPSetDescriptor: &waf.IPSetDescriptor{ + Type: aws.String(descriptor["type"].(string)), + Value: aws.String(descriptor["value"].(string)), + }, + }) + } + return updates +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_rule.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_rule.go new file mode 100644 index 000000000..e7d44d7be --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_rule.go @@ -0,0 +1,225 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsWafRule() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsWafRuleCreate, + Read: resourceAwsWafRuleRead, + Update: resourceAwsWafRuleUpdate, + Delete: resourceAwsWafRuleDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "metric_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateWafMetricName, + }, + "predicates": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "negated": &schema.Schema{ + Type: schema.TypeBool, + Required: true, + }, + "data_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 128 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 128 characters", k)) + } + return + }, + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "IPMatch" && value != "ByteMatch" && value != "SqlInjectionMatch" && value != "SizeConstraint" && value != "XssMatch" { + errors = append(errors, fmt.Errorf( + "%q must be one of IPMatch | ByteMatch | SqlInjectionMatch | SizeConstraint | XssMatch", k)) + } + return + }, + }, + }, + }, + }, + }, + } +} + +func resourceAwsWafRuleCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + wr := newWafRetryer(conn, "global") + out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + params := &waf.CreateRuleInput{ + ChangeToken: token, + MetricName: aws.String(d.Get("metric_name").(string)), + Name: aws.String(d.Get("name").(string)), + } + + return conn.CreateRule(params) + }) + if err != nil { + return err + } + resp := out.(*waf.CreateRuleOutput) + d.SetId(*resp.Rule.RuleId) + return resourceAwsWafRuleUpdate(d, meta) +} + +func resourceAwsWafRuleRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + params := &waf.GetRuleInput{ + RuleId: aws.String(d.Id()), + } + + resp, err := conn.GetRule(params) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { + log.Printf("[WARN] WAF Rule (%s) not found, error code (404)", d.Id()) + d.SetId("") + return nil + } + + return err + } + + var predicates []map[string]interface{} + + for _, predicateSet := range resp.Rule.Predicates { + predicate := map[string]interface{}{ + "negated": *predicateSet.Negated, + "type": *predicateSet.Type, + "data_id": *predicateSet.DataId, + } + predicates = append(predicates, predicate) + } + + d.Set("predicates", predicates) + d.Set("name", resp.Rule.Name) + d.Set("metric_name", resp.Rule.MetricName) + + return nil +} + +func resourceAwsWafRuleUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + if d.HasChange("predicates") { + o, n := d.GetChange("predicates") + oldP, newP := o.(*schema.Set).List(), n.(*schema.Set).List() + + err := updateWafRuleResource(d.Id(), oldP, newP, conn) + if err != nil { + return fmt.Errorf("Error Updating WAF Rule: %s", err) + } + } + + return resourceAwsWafRuleRead(d, meta) +} + +func resourceAwsWafRuleDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + oldPredicates := d.Get("predicates").(*schema.Set).List() + if len(oldPredicates) > 0 { + noPredicates := []interface{}{} + err := updateWafRuleResource(d.Id(), oldPredicates, noPredicates, conn) + if err != nil { + return fmt.Errorf("Error updating WAF Rule Predicates: %s", err) + } + } + + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.DeleteRuleInput{ + ChangeToken: token, + RuleId: aws.String(d.Id()), + } + log.Printf("[INFO] Deleting WAF Rule") + return conn.DeleteRule(req) + }) + if err != nil { + return fmt.Errorf("Error deleting WAF Rule: %s", err) + } + + return nil +} + +func updateWafRuleResource(id string, oldP, newP []interface{}, conn *waf.WAF) error { + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateRuleInput{ + ChangeToken: token, + RuleId: aws.String(id), + Updates: diffWafRulePredicates(oldP, newP), + } + + return conn.UpdateRule(req) + }) + if err != nil { + return fmt.Errorf("Error Updating WAF Rule: %s", err) + } + + return nil +} + +func diffWafRulePredicates(oldP, newP []interface{}) []*waf.RuleUpdate { + updates := make([]*waf.RuleUpdate, 0) + + for _, op := range oldP { + predicate := op.(map[string]interface{}) + + if idx, contains := sliceContainsMap(newP, predicate); contains { + newP = append(newP[:idx], newP[idx+1:]...) + continue + } + + updates = append(updates, &waf.RuleUpdate{ + Action: aws.String(waf.ChangeActionDelete), + Predicate: &waf.Predicate{ + Negated: aws.Bool(predicate["negated"].(bool)), + Type: aws.String(predicate["type"].(string)), + DataId: aws.String(predicate["data_id"].(string)), + }, + }) + } + + for _, np := range newP { + predicate := np.(map[string]interface{}) + + updates = append(updates, &waf.RuleUpdate{ + Action: aws.String(waf.ChangeActionInsert), + Predicate: &waf.Predicate{ + Negated: aws.Bool(predicate["negated"].(bool)), + Type: aws.String(predicate["type"].(string)), + DataId: aws.String(predicate["data_id"].(string)), + }, + }) + } + return updates +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_size_constraint_set.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_size_constraint_set.go new file mode 100644 index 000000000..5e9f46dd4 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_size_constraint_set.go @@ -0,0 +1,229 @@ +package aws + +import ( + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsWafSizeConstraintSet() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsWafSizeConstraintSetCreate, + Read: resourceAwsWafSizeConstraintSetRead, + Update: resourceAwsWafSizeConstraintSetUpdate, + Delete: resourceAwsWafSizeConstraintSetDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "size_constraints": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_to_match": { + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data": { + Type: schema.TypeString, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "comparison_operator": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "size": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + "text_transformation": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsWafSizeConstraintSetCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + log.Printf("[INFO] Creating SizeConstraintSet: %s", d.Get("name").(string)) + + wr := newWafRetryer(conn, "global") + out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + params := &waf.CreateSizeConstraintSetInput{ + ChangeToken: token, + Name: aws.String(d.Get("name").(string)), + } + + return conn.CreateSizeConstraintSet(params) + }) + if err != nil { + return errwrap.Wrapf("[ERROR] Error creating SizeConstraintSet: {{err}}", err) + } + resp := out.(*waf.CreateSizeConstraintSetOutput) + + d.SetId(*resp.SizeConstraintSet.SizeConstraintSetId) + + return resourceAwsWafSizeConstraintSetUpdate(d, meta) +} + +func resourceAwsWafSizeConstraintSetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + log.Printf("[INFO] Reading SizeConstraintSet: %s", d.Get("name").(string)) + params := &waf.GetSizeConstraintSetInput{ + SizeConstraintSetId: aws.String(d.Id()), + } + + resp, err := conn.GetSizeConstraintSet(params) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { + log.Printf("[WARN] WAF IPSet (%s) not found, error code (404)", d.Id()) + d.SetId("") + return nil + } + + return err + } + + d.Set("name", resp.SizeConstraintSet.Name) + d.Set("size_constraints", flattenWafSizeConstraints(resp.SizeConstraintSet.SizeConstraints)) + + return nil +} + +func resourceAwsWafSizeConstraintSetUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + if d.HasChange("size_constraints") { + o, n := d.GetChange("size_constraints") + oldS, newS := o.(*schema.Set).List(), n.(*schema.Set).List() + + err := updateSizeConstraintSetResource(d.Id(), oldS, newS, conn) + if err != nil { + return errwrap.Wrapf("[ERROR] Error updating SizeConstraintSet: {{err}}", err) + } + } + + return resourceAwsWafSizeConstraintSetRead(d, meta) +} + +func resourceAwsWafSizeConstraintSetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + oldConstraints := d.Get("size_constraints").(*schema.Set).List() + + if len(oldConstraints) > 0 { + noConstraints := []interface{}{} + err := updateSizeConstraintSetResource(d.Id(), oldConstraints, noConstraints, conn) + if err != nil { + return errwrap.Wrapf("[ERROR] Error deleting SizeConstraintSet: {{err}}", err) + } + } + + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.DeleteSizeConstraintSetInput{ + ChangeToken: token, + SizeConstraintSetId: aws.String(d.Id()), + } + return conn.DeleteSizeConstraintSet(req) + }) + if err != nil { + return errwrap.Wrapf("[ERROR] Error deleting SizeConstraintSet: {{err}}", err) + } + + return nil +} + +func updateSizeConstraintSetResource(id string, oldS, newS []interface{}, conn *waf.WAF) error { + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateSizeConstraintSetInput{ + ChangeToken: token, + SizeConstraintSetId: aws.String(id), + Updates: diffWafSizeConstraints(oldS, newS), + } + + log.Printf("[INFO] Updating WAF Size Constraint constraints: %s", req) + return conn.UpdateSizeConstraintSet(req) + }) + if err != nil { + return errwrap.Wrapf("[ERROR] Error updating SizeConstraintSet: {{err}}", err) + } + + return nil +} + +func flattenWafSizeConstraints(sc []*waf.SizeConstraint) []interface{} { + out := make([]interface{}, len(sc), len(sc)) + for i, c := range sc { + m := make(map[string]interface{}) + m["comparison_operator"] = *c.ComparisonOperator + if c.FieldToMatch != nil { + m["field_to_match"] = flattenFieldToMatch(c.FieldToMatch) + } + m["size"] = *c.Size + m["text_transformation"] = *c.TextTransformation + out[i] = m + } + return out +} + +func diffWafSizeConstraints(oldS, newS []interface{}) []*waf.SizeConstraintSetUpdate { + updates := make([]*waf.SizeConstraintSetUpdate, 0) + + for _, os := range oldS { + constraint := os.(map[string]interface{}) + + if idx, contains := sliceContainsMap(newS, constraint); contains { + newS = append(newS[:idx], newS[idx+1:]...) + continue + } + + updates = append(updates, &waf.SizeConstraintSetUpdate{ + Action: aws.String(waf.ChangeActionDelete), + SizeConstraint: &waf.SizeConstraint{ + FieldToMatch: expandFieldToMatch(constraint["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), + ComparisonOperator: aws.String(constraint["comparison_operator"].(string)), + Size: aws.Int64(int64(constraint["size"].(int))), + TextTransformation: aws.String(constraint["text_transformation"].(string)), + }, + }) + } + + for _, ns := range newS { + constraint := ns.(map[string]interface{}) + + updates = append(updates, &waf.SizeConstraintSetUpdate{ + Action: aws.String(waf.ChangeActionInsert), + SizeConstraint: &waf.SizeConstraint{ + FieldToMatch: expandFieldToMatch(constraint["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), + ComparisonOperator: aws.String(constraint["comparison_operator"].(string)), + Size: aws.Int64(int64(constraint["size"].(int))), + TextTransformation: aws.String(constraint["text_transformation"].(string)), + }, + }) + } + return updates +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_sql_injection_match_set.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_sql_injection_match_set.go new file mode 100644 index 000000000..808373c4a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_sql_injection_match_set.go @@ -0,0 +1,214 @@ +package aws + +import ( + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsWafSqlInjectionMatchSet() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsWafSqlInjectionMatchSetCreate, + Read: resourceAwsWafSqlInjectionMatchSetRead, + Update: resourceAwsWafSqlInjectionMatchSetUpdate, + Delete: resourceAwsWafSqlInjectionMatchSetDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "sql_injection_match_tuples": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_to_match": { + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data": { + Type: schema.TypeString, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "text_transformation": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsWafSqlInjectionMatchSetCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + log.Printf("[INFO] Creating SqlInjectionMatchSet: %s", d.Get("name").(string)) + + wr := newWafRetryer(conn, "global") + out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + params := &waf.CreateSqlInjectionMatchSetInput{ + ChangeToken: token, + Name: aws.String(d.Get("name").(string)), + } + + return conn.CreateSqlInjectionMatchSet(params) + }) + if err != nil { + return errwrap.Wrapf("[ERROR] Error creating SqlInjectionMatchSet: {{err}}", err) + } + resp := out.(*waf.CreateSqlInjectionMatchSetOutput) + d.SetId(*resp.SqlInjectionMatchSet.SqlInjectionMatchSetId) + + return resourceAwsWafSqlInjectionMatchSetUpdate(d, meta) +} + +func resourceAwsWafSqlInjectionMatchSetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + log.Printf("[INFO] Reading SqlInjectionMatchSet: %s", d.Get("name").(string)) + params := &waf.GetSqlInjectionMatchSetInput{ + SqlInjectionMatchSetId: aws.String(d.Id()), + } + + resp, err := conn.GetSqlInjectionMatchSet(params) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { + log.Printf("[WARN] WAF IPSet (%s) not found, error code (404)", d.Id()) + d.SetId("") + return nil + } + + return err + } + + d.Set("name", resp.SqlInjectionMatchSet.Name) + d.Set("sql_injection_match_tuples", resp.SqlInjectionMatchSet.SqlInjectionMatchTuples) + + return nil +} + +func resourceAwsWafSqlInjectionMatchSetUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + if d.HasChange("sql_injection_match_tuples") { + o, n := d.GetChange("sql_injection_match_tuples") + oldT, newT := o.(*schema.Set).List(), n.(*schema.Set).List() + + err := updateSqlInjectionMatchSetResource(d.Id(), oldT, newT, conn) + if err != nil { + return errwrap.Wrapf("[ERROR] Error updating SqlInjectionMatchSet: {{err}}", err) + } + } + + return resourceAwsWafSqlInjectionMatchSetRead(d, meta) +} + +func resourceAwsWafSqlInjectionMatchSetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + oldTuples := d.Get("sql_injection_match_tuples").(*schema.Set).List() + + if len(oldTuples) > 0 { + noTuples := []interface{}{} + err := updateSqlInjectionMatchSetResource(d.Id(), oldTuples, noTuples, conn) + if err != nil { + return errwrap.Wrapf("[ERROR] Error deleting SqlInjectionMatchSet: {{err}}", err) + } + } + + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.DeleteSqlInjectionMatchSetInput{ + ChangeToken: token, + SqlInjectionMatchSetId: aws.String(d.Id()), + } + + return conn.DeleteSqlInjectionMatchSet(req) + }) + if err != nil { + return errwrap.Wrapf("[ERROR] Error deleting SqlInjectionMatchSet: {{err}}", err) + } + + return nil +} + +func updateSqlInjectionMatchSetResource(id string, oldT, newT []interface{}, conn *waf.WAF) error { + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateSqlInjectionMatchSetInput{ + ChangeToken: token, + SqlInjectionMatchSetId: aws.String(id), + Updates: diffWafSqlInjectionMatchTuples(oldT, newT), + } + + log.Printf("[INFO] Updating SqlInjectionMatchSet: %s", req) + return conn.UpdateSqlInjectionMatchSet(req) + }) + if err != nil { + return errwrap.Wrapf("[ERROR] Error updating SqlInjectionMatchSet: {{err}}", err) + } + + return nil +} + +func flattenWafSqlInjectionMatchTuples(ts []*waf.SqlInjectionMatchTuple) []interface{} { + out := make([]interface{}, len(ts), len(ts)) + for i, t := range ts { + m := make(map[string]interface{}) + m["text_transformation"] = *t.TextTransformation + m["field_to_match"] = flattenFieldToMatch(t.FieldToMatch) + out[i] = m + } + + return out +} + +func diffWafSqlInjectionMatchTuples(oldT, newT []interface{}) []*waf.SqlInjectionMatchSetUpdate { + updates := make([]*waf.SqlInjectionMatchSetUpdate, 0) + + for _, od := range oldT { + tuple := od.(map[string]interface{}) + + if idx, contains := sliceContainsMap(newT, tuple); contains { + newT = append(newT[:idx], newT[idx+1:]...) + continue + } + + updates = append(updates, &waf.SqlInjectionMatchSetUpdate{ + Action: aws.String(waf.ChangeActionDelete), + SqlInjectionMatchTuple: &waf.SqlInjectionMatchTuple{ + FieldToMatch: expandFieldToMatch(tuple["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), + TextTransformation: aws.String(tuple["text_transformation"].(string)), + }, + }) + } + + for _, nd := range newT { + tuple := nd.(map[string]interface{}) + + updates = append(updates, &waf.SqlInjectionMatchSetUpdate{ + Action: aws.String(waf.ChangeActionInsert), + SqlInjectionMatchTuple: &waf.SqlInjectionMatchTuple{ + FieldToMatch: expandFieldToMatch(tuple["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), + TextTransformation: aws.String(tuple["text_transformation"].(string)), + }, + }) + } + return updates +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_web_acl.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_web_acl.go new file mode 100644 index 000000000..7e3ac7237 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_web_acl.go @@ -0,0 +1,228 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsWafWebAcl() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsWafWebAclCreate, + Read: resourceAwsWafWebAclRead, + Update: resourceAwsWafWebAclUpdate, + Delete: resourceAwsWafWebAclDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "default_action": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "metric_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateWafMetricName, + }, + "rules": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "priority": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + "rule_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsWafWebAclCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + wr := newWafRetryer(conn, "global") + out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + params := &waf.CreateWebACLInput{ + ChangeToken: token, + DefaultAction: expandDefaultAction(d), + MetricName: aws.String(d.Get("metric_name").(string)), + Name: aws.String(d.Get("name").(string)), + } + + return conn.CreateWebACL(params) + }) + if err != nil { + return err + } + resp := out.(*waf.CreateWebACLOutput) + d.SetId(*resp.WebACL.WebACLId) + return resourceAwsWafWebAclUpdate(d, meta) +} + +func resourceAwsWafWebAclRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + params := &waf.GetWebACLInput{ + WebACLId: aws.String(d.Id()), + } + + resp, err := conn.GetWebACL(params) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { + log.Printf("[WARN] WAF ACL (%s) not found, error code (404)", d.Id()) + d.SetId("") + return nil + } + + return err + } + + defaultAction := flattenDefaultAction(resp.WebACL.DefaultAction) + if defaultAction != nil { + if err := d.Set("default_action", defaultAction); err != nil { + return fmt.Errorf("error setting default_action: %s", err) + } + } + d.Set("name", resp.WebACL.Name) + d.Set("metric_name", resp.WebACL.MetricName) + + return nil +} + +func resourceAwsWafWebAclUpdate(d *schema.ResourceData, meta interface{}) error { + err := updateWebAclResource(d, meta, waf.ChangeActionInsert) + if err != nil { + return fmt.Errorf("Error Updating WAF ACL: %s", err) + } + return resourceAwsWafWebAclRead(d, meta) +} + +func resourceAwsWafWebAclDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + err := updateWebAclResource(d, meta, waf.ChangeActionDelete) + if err != nil { + return fmt.Errorf("Error Removing WAF ACL Rules: %s", err) + } + + wr := newWafRetryer(conn, "global") + _, err = wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.DeleteWebACLInput{ + ChangeToken: token, + WebACLId: aws.String(d.Id()), + } + + log.Printf("[INFO] Deleting WAF ACL") + return conn.DeleteWebACL(req) + }) + if err != nil { + return fmt.Errorf("Error Deleting WAF ACL: %s", err) + } + return nil +} + +func updateWebAclResource(d *schema.ResourceData, meta interface{}, ChangeAction string) error { + conn := meta.(*AWSClient).wafconn + + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateWebACLInput{ + ChangeToken: token, + WebACLId: aws.String(d.Id()), + } + + if d.HasChange("default_action") { + req.DefaultAction = expandDefaultAction(d) + } + + rules := d.Get("rules").(*schema.Set) + for _, rule := range rules.List() { + aclRule := rule.(map[string]interface{}) + action := aclRule["action"].(*schema.Set).List()[0].(map[string]interface{}) + aclRuleUpdate := &waf.WebACLUpdate{ + Action: aws.String(ChangeAction), + ActivatedRule: &waf.ActivatedRule{ + Priority: aws.Int64(int64(aclRule["priority"].(int))), + RuleId: aws.String(aclRule["rule_id"].(string)), + Action: &waf.WafAction{Type: aws.String(action["type"].(string))}, + }, + } + req.Updates = append(req.Updates, aclRuleUpdate) + } + return conn.UpdateWebACL(req) + }) + if err != nil { + return fmt.Errorf("Error Updating WAF ACL: %s", err) + } + return nil +} + +func expandDefaultAction(d *schema.ResourceData) *waf.WafAction { + set, ok := d.GetOk("default_action") + if !ok { + return nil + } + + s := set.(*schema.Set).List() + if s == nil || len(s) == 0 { + return nil + } + + if s[0] == nil { + log.Printf("[ERR] First element of Default Action is set to nil") + return nil + } + + dA := s[0].(map[string]interface{}) + + return &waf.WafAction{ + Type: aws.String(dA["type"].(string)), + } +} + +func flattenDefaultAction(n *waf.WafAction) []map[string]interface{} { + if n == nil { + return nil + } + + m := setMap(make(map[string]interface{})) + + m.SetString("type", n.Type) + return m.MapList() +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_xss_match_set.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_xss_match_set.go new file mode 100644 index 000000000..c6ea0d630 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_waf_xss_match_set.go @@ -0,0 +1,214 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsWafXssMatchSet() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsWafXssMatchSetCreate, + Read: resourceAwsWafXssMatchSetRead, + Update: resourceAwsWafXssMatchSetUpdate, + Delete: resourceAwsWafXssMatchSetDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "xss_match_tuples": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_to_match": { + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data": { + Type: schema.TypeString, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "text_transformation": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsWafXssMatchSetCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + log.Printf("[INFO] Creating XssMatchSet: %s", d.Get("name").(string)) + + wr := newWafRetryer(conn, "global") + out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + params := &waf.CreateXssMatchSetInput{ + ChangeToken: token, + Name: aws.String(d.Get("name").(string)), + } + + return conn.CreateXssMatchSet(params) + }) + if err != nil { + return errwrap.Wrapf("[ERROR] Error creating XssMatchSet: {{err}}", err) + } + resp := out.(*waf.CreateXssMatchSetOutput) + + d.SetId(*resp.XssMatchSet.XssMatchSetId) + + return resourceAwsWafXssMatchSetUpdate(d, meta) +} + +func resourceAwsWafXssMatchSetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + log.Printf("[INFO] Reading XssMatchSet: %s", d.Get("name").(string)) + params := &waf.GetXssMatchSetInput{ + XssMatchSetId: aws.String(d.Id()), + } + + resp, err := conn.GetXssMatchSet(params) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { + log.Printf("[WARN] WAF IPSet (%s) not found, error code (404)", d.Id()) + d.SetId("") + return nil + } + + return err + } + + d.Set("name", resp.XssMatchSet.Name) + d.Set("xss_match_tuples", flattenWafXssMatchTuples(resp.XssMatchSet.XssMatchTuples)) + + return nil +} + +func resourceAwsWafXssMatchSetUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + if d.HasChange("xss_match_tuples") { + o, n := d.GetChange("xss_match_tuples") + oldT, newT := o.(*schema.Set).List(), n.(*schema.Set).List() + + err := updateXssMatchSetResource(d.Id(), oldT, newT, conn) + if err != nil { + return errwrap.Wrapf("[ERROR] Error updating XssMatchSet: {{err}}", err) + } + } + + return resourceAwsWafXssMatchSetRead(d, meta) +} + +func resourceAwsWafXssMatchSetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafconn + + oldTuples := d.Get("xss_match_tuples").(*schema.Set).List() + if len(oldTuples) > 0 { + noTuples := []interface{}{} + err := updateXssMatchSetResource(d.Id(), oldTuples, noTuples, conn) + if err != nil { + return fmt.Errorf("Error updating IPSetDescriptors: %s", err) + } + } + + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.DeleteXssMatchSetInput{ + ChangeToken: token, + XssMatchSetId: aws.String(d.Id()), + } + + return conn.DeleteXssMatchSet(req) + }) + if err != nil { + return errwrap.Wrapf("[ERROR] Error deleting XssMatchSet: {{err}}", err) + } + + return nil +} + +func updateXssMatchSetResource(id string, oldT, newT []interface{}, conn *waf.WAF) error { + wr := newWafRetryer(conn, "global") + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateXssMatchSetInput{ + ChangeToken: token, + XssMatchSetId: aws.String(id), + Updates: diffWafXssMatchSetTuples(oldT, newT), + } + + log.Printf("[INFO] Updating XssMatchSet tuples: %s", req) + return conn.UpdateXssMatchSet(req) + }) + if err != nil { + return errwrap.Wrapf("[ERROR] Error updating XssMatchSet: {{err}}", err) + } + + return nil +} + +func flattenWafXssMatchTuples(ts []*waf.XssMatchTuple) []interface{} { + out := make([]interface{}, len(ts), len(ts)) + for i, t := range ts { + m := make(map[string]interface{}) + m["field_to_match"] = flattenFieldToMatch(t.FieldToMatch) + m["text_transformation"] = *t.TextTransformation + out[i] = m + } + return out +} + +func diffWafXssMatchSetTuples(oldT, newT []interface{}) []*waf.XssMatchSetUpdate { + updates := make([]*waf.XssMatchSetUpdate, 0) + + for _, od := range oldT { + tuple := od.(map[string]interface{}) + + if idx, contains := sliceContainsMap(newT, tuple); contains { + newT = append(newT[:idx], newT[idx+1:]...) + continue + } + + updates = append(updates, &waf.XssMatchSetUpdate{ + Action: aws.String(waf.ChangeActionDelete), + XssMatchTuple: &waf.XssMatchTuple{ + FieldToMatch: expandFieldToMatch(tuple["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), + TextTransformation: aws.String(tuple["text_transformation"].(string)), + }, + }) + } + + for _, nd := range newT { + tuple := nd.(map[string]interface{}) + + updates = append(updates, &waf.XssMatchSetUpdate{ + Action: aws.String(waf.ChangeActionInsert), + XssMatchTuple: &waf.XssMatchTuple{ + FieldToMatch: expandFieldToMatch(tuple["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), + TextTransformation: aws.String(tuple["text_transformation"].(string)), + }, + }) + } + return updates +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_byte_match_set.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_byte_match_set.go new file mode 100644 index 000000000..d7f916ad3 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_byte_match_set.go @@ -0,0 +1,266 @@ +package aws + +import ( + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/aws/aws-sdk-go/service/wafregional" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsWafRegionalByteMatchSet() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsWafRegionalByteMatchSetCreate, + Read: resourceAwsWafRegionalByteMatchSetRead, + Update: resourceAwsWafRegionalByteMatchSetUpdate, + Delete: resourceAwsWafRegionalByteMatchSetDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "byte_match_tuple": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_to_match": { + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data": { + Type: schema.TypeString, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "positional_constraint": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "target_string": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "text_transformation": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsWafRegionalByteMatchSetCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafregionalconn + region := meta.(*AWSClient).region + + log.Printf("[INFO] Creating ByteMatchSet: %s", d.Get("name").(string)) + + wr := newWafRegionalRetryer(conn, region) + out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + params := &waf.CreateByteMatchSetInput{ + ChangeToken: token, + Name: aws.String(d.Get("name").(string)), + } + return conn.CreateByteMatchSet(params) + }) + + if err != nil { + return errwrap.Wrapf("[ERROR] Error creating ByteMatchSet: {{err}}", err) + } + resp := out.(*waf.CreateByteMatchSetOutput) + + d.SetId(*resp.ByteMatchSet.ByteMatchSetId) + + return resourceAwsWafRegionalByteMatchSetUpdate(d, meta) +} + +func resourceAwsWafRegionalByteMatchSetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafregionalconn + + log.Printf("[INFO] Reading ByteMatchSet: %s", d.Get("name").(string)) + + params := &waf.GetByteMatchSetInput{ + ByteMatchSetId: aws.String(d.Id()), + } + + resp, err := conn.GetByteMatchSet(params) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { + log.Printf("[WARN] WAF IPSet (%s) not found, error code (404)", d.Id()) + d.SetId("") + return nil + } + + return err + } + + d.Set("byte_match_tuple", flattenWafByteMatchTuplesWR(resp.ByteMatchSet.ByteMatchTuples)) + d.Set("name", resp.ByteMatchSet.Name) + + return nil +} + +func flattenWafByteMatchTuplesWR(in []*waf.ByteMatchTuple) []interface{} { + tuples := make([]interface{}, len(in), len(in)) + + for i, tuple := range in { + field_to_match := tuple.FieldToMatch + m := map[string]interface{}{ + "type": *field_to_match.Type, + } + + if field_to_match.Data == nil { + m["data"] = "" + } else { + m["data"] = *field_to_match.Data + } + + var ms []map[string]interface{} + ms = append(ms, m) + + tuple := map[string]interface{}{ + "field_to_match": ms, + "positional_constraint": *tuple.PositionalConstraint, + "target_string": tuple.TargetString, + "text_transformation": *tuple.TextTransformation, + } + tuples[i] = tuple + } + + return tuples +} + +func resourceAwsWafRegionalByteMatchSetUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafregionalconn + region := meta.(*AWSClient).region + log.Printf("[INFO] Updating ByteMatchSet: %s", d.Get("name").(string)) + + if d.HasChange("byte_match_tuple") { + o, n := d.GetChange("byte_match_tuple") + oldT, newT := o.(*schema.Set).List(), n.(*schema.Set).List() + + err := updateByteMatchSetResourceWR(d, oldT, newT, conn, region) + if err != nil { + return errwrap.Wrapf("[ERROR] Error updating ByteMatchSet: {{err}}", err) + } + } + return resourceAwsWafRegionalByteMatchSetRead(d, meta) +} + +func resourceAwsWafRegionalByteMatchSetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafregionalconn + region := meta.(*AWSClient).region + + log.Printf("[INFO] Deleting ByteMatchSet: %s", d.Get("name").(string)) + + oldT := d.Get("byte_match_tuple").(*schema.Set).List() + + if len(oldT) > 0 { + var newT []interface{} + + err := updateByteMatchSetResourceWR(d, oldT, newT, conn, region) + if err != nil { + return errwrap.Wrapf("[ERROR] Error deleting ByteMatchSet: {{err}}", err) + } + } + + wr := newWafRegionalRetryer(conn, region) + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.DeleteByteMatchSetInput{ + ChangeToken: token, + ByteMatchSetId: aws.String(d.Id()), + } + return conn.DeleteByteMatchSet(req) + }) + if err != nil { + return errwrap.Wrapf("[ERROR] Error deleting ByteMatchSet: {{err}}", err) + } + + return nil +} + +func updateByteMatchSetResourceWR(d *schema.ResourceData, oldT, newT []interface{}, conn *wafregional.WAFRegional, region string) error { + wr := newWafRegionalRetryer(conn, region) + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateByteMatchSetInput{ + ChangeToken: token, + ByteMatchSetId: aws.String(d.Id()), + Updates: diffByteMatchSetTuple(oldT, newT), + } + + return conn.UpdateByteMatchSet(req) + }) + if err != nil { + return errwrap.Wrapf("[ERROR] Error updating ByteMatchSet: {{err}}", err) + } + + return nil +} + +func expandFieldToMatchWR(d map[string]interface{}) *waf.FieldToMatch { + return &waf.FieldToMatch{ + Type: aws.String(d["type"].(string)), + Data: aws.String(d["data"].(string)), + } +} + +func flattenFieldToMatchWR(fm *waf.FieldToMatch) map[string]interface{} { + m := make(map[string]interface{}) + m["data"] = *fm.Data + m["type"] = *fm.Type + return m +} + +func diffByteMatchSetTuple(oldT, newT []interface{}) []*waf.ByteMatchSetUpdate { + updates := make([]*waf.ByteMatchSetUpdate, 0) + + for _, ot := range oldT { + tuple := ot.(map[string]interface{}) + + if idx, contains := sliceContainsMap(newT, tuple); contains { + newT = append(newT[:idx], newT[idx+1:]...) + continue + } + + updates = append(updates, &waf.ByteMatchSetUpdate{ + Action: aws.String(waf.ChangeActionDelete), + ByteMatchTuple: &waf.ByteMatchTuple{ + FieldToMatch: expandFieldToMatch(tuple["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), + PositionalConstraint: aws.String(tuple["positional_constraint"].(string)), + TargetString: []byte(tuple["target_string"].(string)), + TextTransformation: aws.String(tuple["text_transformation"].(string)), + }, + }) + } + + for _, nt := range newT { + tuple := nt.(map[string]interface{}) + + updates = append(updates, &waf.ByteMatchSetUpdate{ + Action: aws.String(waf.ChangeActionInsert), + ByteMatchTuple: &waf.ByteMatchTuple{ + FieldToMatch: expandFieldToMatch(tuple["field_to_match"].(*schema.Set).List()[0].(map[string]interface{})), + PositionalConstraint: aws.String(tuple["positional_constraint"].(string)), + TargetString: []byte(tuple["target_string"].(string)), + TextTransformation: aws.String(tuple["text_transformation"].(string)), + }, + }) + } + return updates +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_ipset.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_ipset.go new file mode 100644 index 000000000..0507621ee --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_wafregional_ipset.go @@ -0,0 +1,170 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/aws/aws-sdk-go/service/wafregional" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsWafRegionalIPSet() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsWafRegionalIPSetCreate, + Read: resourceAwsWafRegionalIPSetRead, + Update: resourceAwsWafRegionalIPSetUpdate, + Delete: resourceAwsWafRegionalIPSetDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "ip_set_descriptor": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsWafRegionalIPSetCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafregionalconn + region := meta.(*AWSClient).region + + wr := newWafRegionalRetryer(conn, region) + out, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + params := &waf.CreateIPSetInput{ + ChangeToken: token, + Name: aws.String(d.Get("name").(string)), + } + return conn.CreateIPSet(params) + }) + if err != nil { + return err + } + resp := out.(*waf.CreateIPSetOutput) + d.SetId(*resp.IPSet.IPSetId) + return resourceAwsWafRegionalIPSetUpdate(d, meta) +} + +func resourceAwsWafRegionalIPSetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafregionalconn + + params := &waf.GetIPSetInput{ + IPSetId: aws.String(d.Id()), + } + + resp, err := conn.GetIPSet(params) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "WAFNonexistentItemException" { + log.Printf("[WARN] WAF IPSet (%s) not found, error code (404)", d.Id()) + d.SetId("") + return nil + } + + return err + } + + d.Set("ip_set_descriptor", flattenWafIpSetDescriptorWR(resp.IPSet.IPSetDescriptors)) + d.Set("name", resp.IPSet.Name) + + return nil +} + +func flattenWafIpSetDescriptorWR(in []*waf.IPSetDescriptor) []interface{} { + descriptors := make([]interface{}, len(in), len(in)) + + for i, descriptor := range in { + d := map[string]interface{}{ + "type": *descriptor.Type, + "value": *descriptor.Value, + } + descriptors[i] = d + } + + return descriptors +} + +func resourceAwsWafRegionalIPSetUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafregionalconn + region := meta.(*AWSClient).region + + if d.HasChange("ip_set_descriptor") { + o, n := d.GetChange("ip_set_descriptor") + oldD, newD := o.(*schema.Set).List(), n.(*schema.Set).List() + + err := updateIPSetResourceWR(d.Id(), oldD, newD, conn, region) + if err != nil { + return fmt.Errorf("Error Updating WAF IPSet: %s", err) + } + } + return resourceAwsWafRegionalIPSetRead(d, meta) +} + +func resourceAwsWafRegionalIPSetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).wafregionalconn + region := meta.(*AWSClient).region + + oldD := d.Get("ip_set_descriptor").(*schema.Set).List() + + if len(oldD) > 0 { + noD := []interface{}{} + err := updateIPSetResourceWR(d.Id(), oldD, noD, conn, region) + + if err != nil { + return fmt.Errorf("Error Removing IPSetDescriptors: %s", err) + } + } + + wr := newWafRegionalRetryer(conn, region) + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.DeleteIPSetInput{ + ChangeToken: token, + IPSetId: aws.String(d.Id()), + } + log.Printf("[INFO] Deleting WAF IPSet") + return conn.DeleteIPSet(req) + }) + if err != nil { + return fmt.Errorf("Error Deleting WAF IPSet: %s", err) + } + + return nil +} + +func updateIPSetResourceWR(id string, oldD, newD []interface{}, conn *wafregional.WAFRegional, region string) error { + + wr := newWafRegionalRetryer(conn, region) + _, err := wr.RetryWithToken(func(token *string) (interface{}, error) { + req := &waf.UpdateIPSetInput{ + ChangeToken: token, + IPSetId: aws.String(id), + Updates: diffWafIpSetDescriptors(oldD, newD), + } + log.Printf("[INFO] Updating IPSet descriptor: %s", req) + + return conn.UpdateIPSet(req) + }) + if err != nil { + return fmt.Errorf("Error Updating WAF IPSet: %s", err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_vpn_connection_route.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_vpn_connection_route.go new file mode 100644 index 000000000..e6863f721 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_vpn_connection_route.go @@ -0,0 +1,140 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsVpnConnectionRoute() *schema.Resource { + return &schema.Resource{ + // You can't update a route. You can just delete one and make + // a new one. + Create: resourceAwsVpnConnectionRouteCreate, + Read: resourceAwsVpnConnectionRouteRead, + Delete: resourceAwsVpnConnectionRouteDelete, + + Schema: map[string]*schema.Schema{ + "destination_cidr_block": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "vpn_connection_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsVpnConnectionRouteCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + createOpts := &ec2.CreateVpnConnectionRouteInput{ + DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), + VpnConnectionId: aws.String(d.Get("vpn_connection_id").(string)), + } + + // Create the route. + log.Printf("[DEBUG] Creating VPN connection route") + _, err := conn.CreateVpnConnectionRoute(createOpts) + if err != nil { + return fmt.Errorf("Error creating VPN connection route: %s", err) + } + + // Store the ID by the only two data we have available to us. + d.SetId(fmt.Sprintf("%s:%s", *createOpts.DestinationCidrBlock, *createOpts.VpnConnectionId)) + + return resourceAwsVpnConnectionRouteRead(d, meta) +} + +func resourceAwsVpnConnectionRouteRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + cidrBlock, vpnConnectionId := resourceAwsVpnConnectionRouteParseId(d.Id()) + + routeFilters := []*ec2.Filter{ + &ec2.Filter{ + Name: aws.String("route.destination-cidr-block"), + Values: []*string{aws.String(cidrBlock)}, + }, + &ec2.Filter{ + Name: aws.String("vpn-connection-id"), + Values: []*string{aws.String(vpnConnectionId)}, + }, + } + + // Technically, we know everything there is to know about the route + // from its ID, but we still want to catch cases where it changes + // outside of terraform and results in a stale state file. Hence, + // conduct a read. + resp, err := conn.DescribeVpnConnections(&ec2.DescribeVpnConnectionsInput{ + Filters: routeFilters, + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnConnectionID.NotFound" { + d.SetId("") + return nil + } else { + log.Printf("[ERROR] Error finding VPN connection route: %s", err) + return err + } + } + if resp == nil || len(resp.VpnConnections) == 0 { + // This is kind of a weird edge case. I'd rather return an error + // instead of just blindly setting the ID to ""... since I don't know + // what might cause this. + return fmt.Errorf("No VPN connections returned") + } + + vpnConnection := resp.VpnConnections[0] + + var found bool + for _, r := range vpnConnection.Routes { + if *r.DestinationCidrBlock == cidrBlock { + d.Set("destination_cidr_block", *r.DestinationCidrBlock) + d.Set("vpn_connection_id", *vpnConnection.VpnConnectionId) + found = true + } + } + if !found { + // Something other than terraform eliminated the route. + d.SetId("") + } + + return nil +} + +func resourceAwsVpnConnectionRouteDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + _, err := conn.DeleteVpnConnectionRoute(&ec2.DeleteVpnConnectionRouteInput{ + DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)), + VpnConnectionId: aws.String(d.Get("vpn_connection_id").(string)), + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnConnectionID.NotFound" { + d.SetId("") + return nil + } else { + log.Printf("[ERROR] Error deleting VPN connection route: %s", err) + return err + } + } + + return nil +} + +func resourceAwsVpnConnectionRouteParseId(id string) (string, string) { + parts := strings.SplitN(id, ":", 2) + return parts[0], parts[1] +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/s3_tags.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/s3_tags.go new file mode 100644 index 000000000..f691cff46 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/s3_tags.go @@ -0,0 +1,133 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/terraform/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsS3(conn *s3.S3, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsS3(tagsFromMapS3(o), tagsFromMapS3(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %#v", remove) + _, err := conn.DeleteBucketTagging(&s3.DeleteBucketTaggingInput{ + Bucket: aws.String(d.Get("bucket").(string)), + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %#v", create) + req := &s3.PutBucketTaggingInput{ + Bucket: aws.String(d.Get("bucket").(string)), + Tagging: &s3.Tagging{ + TagSet: create, + }, + } + + _, err := conn.PutBucketTagging(req) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsS3(oldTags, newTags []*s3.Tag) ([]*s3.Tag, []*s3.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*s3.Tag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + return tagsFromMapS3(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapS3(m map[string]interface{}) []*s3.Tag { + result := make([]*s3.Tag, 0, len(m)) + for k, v := range m { + t := &s3.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredS3(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapS3(ts []*s3.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredS3(t) { + result[*t.Key] = *t.Value + } + } + + return result +} + +// return a slice of s3 tags associated with the given s3 bucket. Essentially +// s3.GetBucketTagging, except returns an empty slice instead of an error when +// there are no tags. +func getTagSetS3(s3conn *s3.S3, bucket string) ([]*s3.Tag, error) { + request := &s3.GetBucketTaggingInput{ + Bucket: aws.String(bucket), + } + + response, err := s3conn.GetBucketTagging(request) + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "NoSuchTagSet" { + // There is no tag set associated with the bucket. + return []*s3.Tag{}, nil + } else if err != nil { + return nil, err + } + + return response.TagSet, nil +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredS3(t *s3.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + if r, _ := regexp.MatchString(v, *t.Key); r == true { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/sort.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/sort.go new file mode 100644 index 000000000..0d90458eb --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/sort.go @@ -0,0 +1,53 @@ +package aws + +import ( + "sort" + "time" + + "github.com/aws/aws-sdk-go/service/ec2" +) + +type imageSort []*ec2.Image +type snapshotSort []*ec2.Snapshot + +func (a imageSort) Len() int { + return len(a) +} + +func (a imageSort) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func (a imageSort) Less(i, j int) bool { + itime, _ := time.Parse(time.RFC3339, *a[i].CreationDate) + jtime, _ := time.Parse(time.RFC3339, *a[j].CreationDate) + return itime.Unix() < jtime.Unix() +} + +// Sort images by creation date, in descending order. +func sortImages(images []*ec2.Image) []*ec2.Image { + sortedImages := images + sort.Sort(sort.Reverse(imageSort(sortedImages))) + return sortedImages +} + +func (a snapshotSort) Len() int { + return len(a) +} + +func (a snapshotSort) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func (a snapshotSort) Less(i, j int) bool { + itime := *a[i].StartTime + jtime := *a[j].StartTime + return itime.Unix() < jtime.Unix() +} + +// Sort snapshots by creation date, in descending order. +func sortSnapshots(snapshots []*ec2.Snapshot) []*ec2.Snapshot { + sortedSnapshots := snapshots + sort.Sort(sort.Reverse(snapshotSort(sortedSnapshots))) + return sortedSnapshots +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/structure.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/structure.go new file mode 100644 index 000000000..262dccfdc --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/structure.go @@ -0,0 +1,2127 @@ +package aws + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go/service/cloudformation" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go/service/cognitoidentity" + "github.com/aws/aws-sdk-go/service/configservice" + "github.com/aws/aws-sdk-go/service/directoryservice" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ecs" + "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go/service/elasticbeanstalk" + elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/aws/aws-sdk-go/service/lambda" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go/service/redshift" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/aws/aws-sdk-go/service/ssm" + "github.com/hashicorp/terraform/helper/schema" + "gopkg.in/yaml.v2" +) + +// Takes the result of flatmap.Expand for an array of listeners and +// returns ELB API compatible objects +func expandListeners(configured []interface{}) ([]*elb.Listener, error) { + listeners := make([]*elb.Listener, 0, len(configured)) + + // Loop over our configured listeners and create + // an array of aws-sdk-go compatible objects + for _, lRaw := range configured { + data := lRaw.(map[string]interface{}) + + ip := int64(data["instance_port"].(int)) + lp := int64(data["lb_port"].(int)) + l := &elb.Listener{ + InstancePort: &ip, + InstanceProtocol: aws.String(data["instance_protocol"].(string)), + LoadBalancerPort: &lp, + Protocol: aws.String(data["lb_protocol"].(string)), + } + + if v, ok := data["ssl_certificate_id"]; ok { + l.SSLCertificateId = aws.String(v.(string)) + } + + var valid bool + if l.SSLCertificateId != nil && *l.SSLCertificateId != "" { + // validate the protocol is correct + for _, p := range []string{"https", "ssl"} { + if (strings.ToLower(*l.InstanceProtocol) == p) || (strings.ToLower(*l.Protocol) == p) { + valid = true + } + } + } else { + valid = true + } + + if valid { + listeners = append(listeners, l) + } else { + return nil, fmt.Errorf("[ERR] ELB Listener: ssl_certificate_id may be set only when protocol is 'https' or 'ssl'") + } + } + + return listeners, nil +} + +// Takes the result of flatmap. Expand for an array of listeners and +// returns ECS Volume compatible objects +func expandEcsVolumes(configured []interface{}) ([]*ecs.Volume, error) { + volumes := make([]*ecs.Volume, 0, len(configured)) + + // Loop over our configured volumes and create + // an array of aws-sdk-go compatible objects + for _, lRaw := range configured { + data := lRaw.(map[string]interface{}) + + l := &ecs.Volume{ + Name: aws.String(data["name"].(string)), + } + + hostPath := data["host_path"].(string) + if hostPath != "" { + l.Host = &ecs.HostVolumeProperties{ + SourcePath: aws.String(hostPath), + } + } + + volumes = append(volumes, l) + } + + return volumes, nil +} + +// Takes JSON in a string. Decodes JSON into +// an array of ecs.ContainerDefinition compatible objects +func expandEcsContainerDefinitions(rawDefinitions string) ([]*ecs.ContainerDefinition, error) { + var definitions []*ecs.ContainerDefinition + + err := json.Unmarshal([]byte(rawDefinitions), &definitions) + if err != nil { + return nil, fmt.Errorf("Error decoding JSON: %s", err) + } + + return definitions, nil +} + +// Takes the result of flatmap. Expand for an array of load balancers and +// returns ecs.LoadBalancer compatible objects +func expandEcsLoadBalancers(configured []interface{}) []*ecs.LoadBalancer { + loadBalancers := make([]*ecs.LoadBalancer, 0, len(configured)) + + // Loop over our configured load balancers and create + // an array of aws-sdk-go compatible objects + for _, lRaw := range configured { + data := lRaw.(map[string]interface{}) + + l := &ecs.LoadBalancer{ + ContainerName: aws.String(data["container_name"].(string)), + ContainerPort: aws.Int64(int64(data["container_port"].(int))), + } + + if v, ok := data["elb_name"]; ok && v.(string) != "" { + l.LoadBalancerName = aws.String(v.(string)) + } + if v, ok := data["target_group_arn"]; ok && v.(string) != "" { + l.TargetGroupArn = aws.String(v.(string)) + } + + loadBalancers = append(loadBalancers, l) + } + + return loadBalancers +} + +// Takes the result of flatmap.Expand for an array of ingress/egress security +// group rules and returns EC2 API compatible objects. This function will error +// if it finds invalid permissions input, namely a protocol of "-1" with either +// to_port or from_port set to a non-zero value. +func expandIPPerms( + group *ec2.SecurityGroup, configured []interface{}) ([]*ec2.IpPermission, error) { + vpc := group.VpcId != nil && *group.VpcId != "" + + perms := make([]*ec2.IpPermission, len(configured)) + for i, mRaw := range configured { + var perm ec2.IpPermission + m := mRaw.(map[string]interface{}) + + perm.FromPort = aws.Int64(int64(m["from_port"].(int))) + perm.ToPort = aws.Int64(int64(m["to_port"].(int))) + perm.IpProtocol = aws.String(m["protocol"].(string)) + + // When protocol is "-1", AWS won't store any ports for the + // rule, but also won't error if the user specifies ports other + // than '0'. Force the user to make a deliberate '0' port + // choice when specifying a "-1" protocol, and tell them about + // AWS's behavior in the error message. + if *perm.IpProtocol == "-1" && (*perm.FromPort != 0 || *perm.ToPort != 0) { + return nil, fmt.Errorf( + "from_port (%d) and to_port (%d) must both be 0 to use the 'ALL' \"-1\" protocol!", + *perm.FromPort, *perm.ToPort) + } + + var groups []string + if raw, ok := m["security_groups"]; ok { + list := raw.(*schema.Set).List() + for _, v := range list { + groups = append(groups, v.(string)) + } + } + if v, ok := m["self"]; ok && v.(bool) { + if vpc { + groups = append(groups, *group.GroupId) + } else { + groups = append(groups, *group.GroupName) + } + } + + if len(groups) > 0 { + perm.UserIdGroupPairs = make([]*ec2.UserIdGroupPair, len(groups)) + for i, name := range groups { + ownerId, id := "", name + if items := strings.Split(id, "/"); len(items) > 1 { + ownerId, id = items[0], items[1] + } + + perm.UserIdGroupPairs[i] = &ec2.UserIdGroupPair{ + GroupId: aws.String(id), + } + + if ownerId != "" { + perm.UserIdGroupPairs[i].UserId = aws.String(ownerId) + } + + if !vpc { + perm.UserIdGroupPairs[i].GroupId = nil + perm.UserIdGroupPairs[i].GroupName = aws.String(id) + } + } + } + + if raw, ok := m["cidr_blocks"]; ok { + list := raw.([]interface{}) + for _, v := range list { + perm.IpRanges = append(perm.IpRanges, &ec2.IpRange{CidrIp: aws.String(v.(string))}) + } + } + if raw, ok := m["ipv6_cidr_blocks"]; ok { + list := raw.([]interface{}) + for _, v := range list { + perm.Ipv6Ranges = append(perm.Ipv6Ranges, &ec2.Ipv6Range{CidrIpv6: aws.String(v.(string))}) + } + } + + if raw, ok := m["prefix_list_ids"]; ok { + list := raw.([]interface{}) + for _, v := range list { + perm.PrefixListIds = append(perm.PrefixListIds, &ec2.PrefixListId{PrefixListId: aws.String(v.(string))}) + } + } + + perms[i] = &perm + } + + return perms, nil +} + +// Takes the result of flatmap.Expand for an array of parameters and +// returns Parameter API compatible objects +func expandParameters(configured []interface{}) ([]*rds.Parameter, error) { + var parameters []*rds.Parameter + + // Loop over our configured parameters and create + // an array of aws-sdk-go compatible objects + for _, pRaw := range configured { + data := pRaw.(map[string]interface{}) + + if data["name"].(string) == "" { + continue + } + + p := &rds.Parameter{ + ApplyMethod: aws.String(data["apply_method"].(string)), + ParameterName: aws.String(data["name"].(string)), + ParameterValue: aws.String(data["value"].(string)), + } + + parameters = append(parameters, p) + } + + return parameters, nil +} + +func expandRedshiftParameters(configured []interface{}) ([]*redshift.Parameter, error) { + var parameters []*redshift.Parameter + + // Loop over our configured parameters and create + // an array of aws-sdk-go compatible objects + for _, pRaw := range configured { + data := pRaw.(map[string]interface{}) + + if data["name"].(string) == "" { + continue + } + + p := &redshift.Parameter{ + ParameterName: aws.String(data["name"].(string)), + ParameterValue: aws.String(data["value"].(string)), + } + + parameters = append(parameters, p) + } + + return parameters, nil +} + +func expandOptionConfiguration(configured []interface{}) ([]*rds.OptionConfiguration, error) { + var option []*rds.OptionConfiguration + + for _, pRaw := range configured { + data := pRaw.(map[string]interface{}) + + o := &rds.OptionConfiguration{ + OptionName: aws.String(data["option_name"].(string)), + } + + if raw, ok := data["port"]; ok { + port := raw.(int) + if port != 0 { + o.Port = aws.Int64(int64(port)) + } + } + + if raw, ok := data["db_security_group_memberships"]; ok { + memberships := expandStringList(raw.(*schema.Set).List()) + if len(memberships) > 0 { + o.DBSecurityGroupMemberships = memberships + } + } + + if raw, ok := data["vpc_security_group_memberships"]; ok { + memberships := expandStringList(raw.(*schema.Set).List()) + if len(memberships) > 0 { + o.VpcSecurityGroupMemberships = memberships + } + } + + if raw, ok := data["option_settings"]; ok { + o.OptionSettings = expandOptionSetting(raw.(*schema.Set).List()) + } + + option = append(option, o) + } + + return option, nil +} + +func expandOptionSetting(list []interface{}) []*rds.OptionSetting { + options := make([]*rds.OptionSetting, 0, len(list)) + + for _, oRaw := range list { + data := oRaw.(map[string]interface{}) + + o := &rds.OptionSetting{ + Name: aws.String(data["name"].(string)), + Value: aws.String(data["value"].(string)), + } + + options = append(options, o) + } + + return options +} + +// Takes the result of flatmap.Expand for an array of parameters and +// returns Parameter API compatible objects +func expandElastiCacheParameters(configured []interface{}) ([]*elasticache.ParameterNameValue, error) { + parameters := make([]*elasticache.ParameterNameValue, 0, len(configured)) + + // Loop over our configured parameters and create + // an array of aws-sdk-go compatible objects + for _, pRaw := range configured { + data := pRaw.(map[string]interface{}) + + p := &elasticache.ParameterNameValue{ + ParameterName: aws.String(data["name"].(string)), + ParameterValue: aws.String(data["value"].(string)), + } + + parameters = append(parameters, p) + } + + return parameters, nil +} + +// Flattens an access log into something that flatmap.Flatten() can handle +func flattenAccessLog(l *elb.AccessLog) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + + if l == nil { + return nil + } + + r := make(map[string]interface{}) + if l.S3BucketName != nil { + r["bucket"] = *l.S3BucketName + } + + if l.S3BucketPrefix != nil { + r["bucket_prefix"] = *l.S3BucketPrefix + } + + if l.EmitInterval != nil { + r["interval"] = *l.EmitInterval + } + + if l.Enabled != nil { + r["enabled"] = *l.Enabled + } + + result = append(result, r) + + return result +} + +// Takes the result of flatmap.Expand for an array of step adjustments and +// returns a []*autoscaling.StepAdjustment. +func expandStepAdjustments(configured []interface{}) ([]*autoscaling.StepAdjustment, error) { + var adjustments []*autoscaling.StepAdjustment + + // Loop over our configured step adjustments and create an array + // of aws-sdk-go compatible objects. We're forced to convert strings + // to floats here because there's no way to detect whether or not + // an uninitialized, optional schema element is "0.0" deliberately. + // With strings, we can test for "", which is definitely an empty + // struct value. + for _, raw := range configured { + data := raw.(map[string]interface{}) + a := &autoscaling.StepAdjustment{ + ScalingAdjustment: aws.Int64(int64(data["scaling_adjustment"].(int))), + } + if data["metric_interval_lower_bound"] != "" { + bound := data["metric_interval_lower_bound"] + switch bound := bound.(type) { + case string: + f, err := strconv.ParseFloat(bound, 64) + if err != nil { + return nil, fmt.Errorf( + "metric_interval_lower_bound must be a float value represented as a string") + } + a.MetricIntervalLowerBound = aws.Float64(f) + default: + return nil, fmt.Errorf( + "metric_interval_lower_bound isn't a string. This is a bug. Please file an issue.") + } + } + if data["metric_interval_upper_bound"] != "" { + bound := data["metric_interval_upper_bound"] + switch bound := bound.(type) { + case string: + f, err := strconv.ParseFloat(bound, 64) + if err != nil { + return nil, fmt.Errorf( + "metric_interval_upper_bound must be a float value represented as a string") + } + a.MetricIntervalUpperBound = aws.Float64(f) + default: + return nil, fmt.Errorf( + "metric_interval_upper_bound isn't a string. This is a bug. Please file an issue.") + } + } + adjustments = append(adjustments, a) + } + + return adjustments, nil +} + +// Flattens a health check into something that flatmap.Flatten() +// can handle +func flattenHealthCheck(check *elb.HealthCheck) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + + chk := make(map[string]interface{}) + chk["unhealthy_threshold"] = *check.UnhealthyThreshold + chk["healthy_threshold"] = *check.HealthyThreshold + chk["target"] = *check.Target + chk["timeout"] = *check.Timeout + chk["interval"] = *check.Interval + + result = append(result, chk) + + return result +} + +// Flattens an array of UserSecurityGroups into a []*ec2.GroupIdentifier +func flattenSecurityGroups(list []*ec2.UserIdGroupPair, ownerId *string) []*ec2.GroupIdentifier { + result := make([]*ec2.GroupIdentifier, 0, len(list)) + for _, g := range list { + var userId *string + if g.UserId != nil && *g.UserId != "" && (ownerId == nil || *ownerId != *g.UserId) { + userId = g.UserId + } + // userid nil here for same vpc groups + + vpc := g.GroupName == nil || *g.GroupName == "" + var id *string + if vpc { + id = g.GroupId + } else { + id = g.GroupName + } + + // id is groupid for vpcs + // id is groupname for non vpc (classic) + + if userId != nil { + id = aws.String(*userId + "/" + *id) + } + + if vpc { + result = append(result, &ec2.GroupIdentifier{ + GroupId: id, + }) + } else { + result = append(result, &ec2.GroupIdentifier{ + GroupId: g.GroupId, + GroupName: id, + }) + } + } + return result +} + +// Flattens an array of Instances into a []string +func flattenInstances(list []*elb.Instance) []string { + result := make([]string, 0, len(list)) + for _, i := range list { + result = append(result, *i.InstanceId) + } + return result +} + +// Expands an array of String Instance IDs into a []Instances +func expandInstanceString(list []interface{}) []*elb.Instance { + result := make([]*elb.Instance, 0, len(list)) + for _, i := range list { + result = append(result, &elb.Instance{InstanceId: aws.String(i.(string))}) + } + return result +} + +// Flattens an array of Backend Descriptions into a a map of instance_port to policy names. +func flattenBackendPolicies(backends []*elb.BackendServerDescription) map[int64][]string { + policies := make(map[int64][]string) + for _, i := range backends { + for _, p := range i.PolicyNames { + policies[*i.InstancePort] = append(policies[*i.InstancePort], *p) + } + sort.Strings(policies[*i.InstancePort]) + } + return policies +} + +// Flattens an array of Listeners into a []map[string]interface{} +func flattenListeners(list []*elb.ListenerDescription) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(list)) + for _, i := range list { + l := map[string]interface{}{ + "instance_port": *i.Listener.InstancePort, + "instance_protocol": strings.ToLower(*i.Listener.InstanceProtocol), + "lb_port": *i.Listener.LoadBalancerPort, + "lb_protocol": strings.ToLower(*i.Listener.Protocol), + } + // SSLCertificateID is optional, and may be nil + if i.Listener.SSLCertificateId != nil { + l["ssl_certificate_id"] = *i.Listener.SSLCertificateId + } + result = append(result, l) + } + return result +} + +// Flattens an array of Volumes into a []map[string]interface{} +func flattenEcsVolumes(list []*ecs.Volume) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(list)) + for _, volume := range list { + l := map[string]interface{}{ + "name": *volume.Name, + } + + if volume.Host.SourcePath != nil { + l["host_path"] = *volume.Host.SourcePath + } + + result = append(result, l) + } + return result +} + +// Flattens an array of ECS LoadBalancers into a []map[string]interface{} +func flattenEcsLoadBalancers(list []*ecs.LoadBalancer) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(list)) + for _, loadBalancer := range list { + l := map[string]interface{}{ + "container_name": *loadBalancer.ContainerName, + "container_port": *loadBalancer.ContainerPort, + } + + if loadBalancer.LoadBalancerName != nil { + l["elb_name"] = *loadBalancer.LoadBalancerName + } + + if loadBalancer.TargetGroupArn != nil { + l["target_group_arn"] = *loadBalancer.TargetGroupArn + } + + result = append(result, l) + } + return result +} + +// Encodes an array of ecs.ContainerDefinitions into a JSON string +func flattenEcsContainerDefinitions(definitions []*ecs.ContainerDefinition) (string, error) { + byteArray, err := json.Marshal(definitions) + if err != nil { + return "", fmt.Errorf("Error encoding to JSON: %s", err) + } + + n := bytes.Index(byteArray, []byte{0}) + return string(byteArray[:n]), nil +} + +// Flattens an array of Options into a []map[string]interface{} +func flattenOptions(list []*rds.Option) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(list)) + for _, i := range list { + if i.OptionName != nil { + r := make(map[string]interface{}) + r["option_name"] = strings.ToLower(*i.OptionName) + // Default empty string, guard against nil parameter values + r["port"] = "" + if i.Port != nil { + r["port"] = int(*i.Port) + } + if i.VpcSecurityGroupMemberships != nil { + vpcs := make([]string, 0, len(i.VpcSecurityGroupMemberships)) + for _, vpc := range i.VpcSecurityGroupMemberships { + id := vpc.VpcSecurityGroupId + vpcs = append(vpcs, *id) + } + + r["vpc_security_group_memberships"] = vpcs + } + if i.DBSecurityGroupMemberships != nil { + dbs := make([]string, 0, len(i.DBSecurityGroupMemberships)) + for _, db := range i.DBSecurityGroupMemberships { + id := db.DBSecurityGroupName + dbs = append(dbs, *id) + } + + r["db_security_group_memberships"] = dbs + } + if i.OptionSettings != nil { + settings := make([]map[string]interface{}, 0, len(i.OptionSettings)) + for _, j := range i.OptionSettings { + setting := map[string]interface{}{ + "name": *j.Name, + } + if j.Value != nil { + setting["value"] = *j.Value + } + + settings = append(settings, setting) + } + + r["option_settings"] = settings + } + result = append(result, r) + } + } + return result +} + +// Flattens an array of Parameters into a []map[string]interface{} +func flattenParameters(list []*rds.Parameter) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(list)) + for _, i := range list { + if i.ParameterName != nil { + r := make(map[string]interface{}) + r["name"] = strings.ToLower(*i.ParameterName) + // Default empty string, guard against nil parameter values + r["value"] = "" + if i.ParameterValue != nil { + r["value"] = strings.ToLower(*i.ParameterValue) + } + if i.ApplyMethod != nil { + r["apply_method"] = strings.ToLower(*i.ApplyMethod) + } + + result = append(result, r) + } + } + return result +} + +// Flattens an array of Redshift Parameters into a []map[string]interface{} +func flattenRedshiftParameters(list []*redshift.Parameter) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(list)) + for _, i := range list { + result = append(result, map[string]interface{}{ + "name": strings.ToLower(*i.ParameterName), + "value": strings.ToLower(*i.ParameterValue), + }) + } + return result +} + +// Flattens an array of Parameters into a []map[string]interface{} +func flattenElastiCacheParameters(list []*elasticache.Parameter) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(list)) + for _, i := range list { + if i.ParameterValue != nil { + result = append(result, map[string]interface{}{ + "name": strings.ToLower(*i.ParameterName), + "value": *i.ParameterValue, + }) + } + } + return result +} + +// Takes the result of flatmap.Expand for an array of strings +// and returns a []*string +func expandStringList(configured []interface{}) []*string { + vs := make([]*string, 0, len(configured)) + for _, v := range configured { + val, ok := v.(string) + if ok && val != "" { + vs = append(vs, aws.String(v.(string))) + } + } + return vs +} + +// Takes the result of schema.Set of strings and returns a []*string +func expandStringSet(configured *schema.Set) []*string { + return expandStringList(configured.List()) +} + +// Takes list of pointers to strings. Expand to an array +// of raw strings and returns a []interface{} +// to keep compatibility w/ schema.NewSetschema.NewSet +func flattenStringList(list []*string) []interface{} { + vs := make([]interface{}, 0, len(list)) + for _, v := range list { + vs = append(vs, *v) + } + return vs +} + +//Flattens an array of private ip addresses into a []string, where the elements returned are the IP strings e.g. "192.168.0.0" +func flattenNetworkInterfacesPrivateIPAddresses(dtos []*ec2.NetworkInterfacePrivateIpAddress) []string { + ips := make([]string, 0, len(dtos)) + for _, v := range dtos { + ip := *v.PrivateIpAddress + ips = append(ips, ip) + } + return ips +} + +//Flattens security group identifiers into a []string, where the elements returned are the GroupIDs +func flattenGroupIdentifiers(dtos []*ec2.GroupIdentifier) []string { + ids := make([]string, 0, len(dtos)) + for _, v := range dtos { + group_id := *v.GroupId + ids = append(ids, group_id) + } + return ids +} + +//Expands an array of IPs into a ec2 Private IP Address Spec +func expandPrivateIPAddresses(ips []interface{}) []*ec2.PrivateIpAddressSpecification { + dtos := make([]*ec2.PrivateIpAddressSpecification, 0, len(ips)) + for i, v := range ips { + new_private_ip := &ec2.PrivateIpAddressSpecification{ + PrivateIpAddress: aws.String(v.(string)), + } + + new_private_ip.Primary = aws.Bool(i == 0) + + dtos = append(dtos, new_private_ip) + } + return dtos +} + +//Flattens network interface attachment into a map[string]interface +func flattenAttachment(a *ec2.NetworkInterfaceAttachment) map[string]interface{} { + att := make(map[string]interface{}) + if a.InstanceId != nil { + att["instance"] = *a.InstanceId + } + att["device_index"] = *a.DeviceIndex + att["attachment_id"] = *a.AttachmentId + return att +} + +func flattenElastiCacheSecurityGroupNames(securityGroups []*elasticache.CacheSecurityGroupMembership) []string { + result := make([]string, 0, len(securityGroups)) + for _, sg := range securityGroups { + if sg.CacheSecurityGroupName != nil { + result = append(result, *sg.CacheSecurityGroupName) + } + } + return result +} + +func flattenElastiCacheSecurityGroupIds(securityGroups []*elasticache.SecurityGroupMembership) []string { + result := make([]string, 0, len(securityGroups)) + for _, sg := range securityGroups { + if sg.SecurityGroupId != nil { + result = append(result, *sg.SecurityGroupId) + } + } + return result +} + +// Flattens step adjustments into a list of map[string]interface. +func flattenStepAdjustments(adjustments []*autoscaling.StepAdjustment) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(adjustments)) + for _, raw := range adjustments { + a := map[string]interface{}{ + "scaling_adjustment": *raw.ScalingAdjustment, + } + if raw.MetricIntervalUpperBound != nil { + a["metric_interval_upper_bound"] = *raw.MetricIntervalUpperBound + } + if raw.MetricIntervalLowerBound != nil { + a["metric_interval_lower_bound"] = *raw.MetricIntervalLowerBound + } + result = append(result, a) + } + return result +} + +func flattenResourceRecords(recs []*route53.ResourceRecord, typeStr string) []string { + strs := make([]string, 0, len(recs)) + for _, r := range recs { + if r.Value != nil { + s := *r.Value + if typeStr == "TXT" || typeStr == "SPF" { + s = expandTxtEntry(s) + } + strs = append(strs, s) + } + } + return strs +} + +func expandResourceRecords(recs []interface{}, typeStr string) []*route53.ResourceRecord { + records := make([]*route53.ResourceRecord, 0, len(recs)) + for _, r := range recs { + s := r.(string) + if typeStr == "TXT" || typeStr == "SPF" { + s = flattenTxtEntry(s) + } + records = append(records, &route53.ResourceRecord{Value: aws.String(s)}) + } + return records +} + +// How 'flattenTxtEntry' and 'expandTxtEntry' work. +// +// In the Route 53, TXT entries are written using quoted strings, one per line. +// Example: +// "x=foo" +// "bar=12" +// +// In Terraform, there are two differences: +// - We use a list of strings instead of separating strings with newlines. +// - Within each string, we dont' include the surrounding quotes. +// Example: +// records = ["x=foo", "bar=12"] # Instead of ["\"x=foo\", \"bar=12\""] +// +// When we pull from Route 53, `expandTxtEntry` removes the surrounding quotes; +// when we push to Route 53, `flattenTxtEntry` adds them back. +// +// One complication is that a single TXT entry can have multiple quoted strings. +// For example, here are two TXT entries, one with two quoted strings and the +// other with three. +// "x=" "foo" +// "ba" "r" "=12" +// +// DNS clients are expected to merge the quoted strings before interpreting the +// value. Since `expandTxtEntry` only removes the quotes at the end we can still +// (hackily) represent the above configuration in Terraform: +// records = ["x=\" \"foo", "ba\" \"r\" \"=12"] +// +// The primary reason to use multiple strings for an entry is that DNS (and Route +// 53) doesn't allow a quoted string to be more than 255 characters long. If you +// want a longer TXT entry, you must use multiple quoted strings. +// +// It would be nice if this Terraform automatically split strings longer than 255 +// characters. For example, imagine "xxx..xxx" has 256 "x" characters. +// records = ["xxx..xxx"] +// When pushing to Route 53, this could be converted to: +// "xxx..xx" "x" +// +// This could also work when the user is already using multiple quoted strings: +// records = ["xxx.xxx\" \"yyy..yyy"] +// When pushing to Route 53, this could be converted to: +// "xxx..xx" "xyyy...y" "yy" +// +// If you want to add this feature, make sure to follow all the quoting rules in +// . If you make a mistake, people +// might end up relying on that mistake so fixing it would be a breaking change. + +func flattenTxtEntry(s string) string { + return fmt.Sprintf(`"%s"`, s) +} + +func expandTxtEntry(s string) string { + last := len(s) - 1 + if last != 0 && s[0] == '"' && s[last] == '"' { + s = s[1:last] + } + return s +} + +func expandESClusterConfig(m map[string]interface{}) *elasticsearch.ElasticsearchClusterConfig { + config := elasticsearch.ElasticsearchClusterConfig{} + + if v, ok := m["dedicated_master_enabled"]; ok { + isEnabled := v.(bool) + config.DedicatedMasterEnabled = aws.Bool(isEnabled) + + if isEnabled { + if v, ok := m["dedicated_master_count"]; ok && v.(int) > 0 { + config.DedicatedMasterCount = aws.Int64(int64(v.(int))) + } + if v, ok := m["dedicated_master_type"]; ok && v.(string) != "" { + config.DedicatedMasterType = aws.String(v.(string)) + } + } + } + + if v, ok := m["instance_count"]; ok { + config.InstanceCount = aws.Int64(int64(v.(int))) + } + if v, ok := m["instance_type"]; ok { + config.InstanceType = aws.String(v.(string)) + } + + if v, ok := m["zone_awareness_enabled"]; ok { + config.ZoneAwarenessEnabled = aws.Bool(v.(bool)) + } + + return &config +} + +func flattenESClusterConfig(c *elasticsearch.ElasticsearchClusterConfig) []map[string]interface{} { + m := map[string]interface{}{} + + if c.DedicatedMasterCount != nil { + m["dedicated_master_count"] = *c.DedicatedMasterCount + } + if c.DedicatedMasterEnabled != nil { + m["dedicated_master_enabled"] = *c.DedicatedMasterEnabled + } + if c.DedicatedMasterType != nil { + m["dedicated_master_type"] = *c.DedicatedMasterType + } + if c.InstanceCount != nil { + m["instance_count"] = *c.InstanceCount + } + if c.InstanceType != nil { + m["instance_type"] = *c.InstanceType + } + if c.ZoneAwarenessEnabled != nil { + m["zone_awareness_enabled"] = *c.ZoneAwarenessEnabled + } + + return []map[string]interface{}{m} +} + +func flattenESEBSOptions(o *elasticsearch.EBSOptions) []map[string]interface{} { + m := map[string]interface{}{} + + if o.EBSEnabled != nil { + m["ebs_enabled"] = *o.EBSEnabled + } + if o.Iops != nil { + m["iops"] = *o.Iops + } + if o.VolumeSize != nil { + m["volume_size"] = *o.VolumeSize + } + if o.VolumeType != nil { + m["volume_type"] = *o.VolumeType + } + + return []map[string]interface{}{m} +} + +func expandESEBSOptions(m map[string]interface{}) *elasticsearch.EBSOptions { + options := elasticsearch.EBSOptions{} + + if v, ok := m["ebs_enabled"]; ok { + options.EBSEnabled = aws.Bool(v.(bool)) + } + if v, ok := m["iops"]; ok && v.(int) > 0 { + options.Iops = aws.Int64(int64(v.(int))) + } + if v, ok := m["volume_size"]; ok && v.(int) > 0 { + options.VolumeSize = aws.Int64(int64(v.(int))) + } + if v, ok := m["volume_type"]; ok && v.(string) != "" { + options.VolumeType = aws.String(v.(string)) + } + + return &options +} + +func expandConfigRecordingGroup(configured []interface{}) *configservice.RecordingGroup { + recordingGroup := configservice.RecordingGroup{} + group := configured[0].(map[string]interface{}) + + if v, ok := group["all_supported"]; ok { + recordingGroup.AllSupported = aws.Bool(v.(bool)) + } + + if v, ok := group["include_global_resource_types"]; ok { + recordingGroup.IncludeGlobalResourceTypes = aws.Bool(v.(bool)) + } + + if v, ok := group["resource_types"]; ok { + recordingGroup.ResourceTypes = expandStringList(v.(*schema.Set).List()) + } + return &recordingGroup +} + +func flattenConfigRecordingGroup(g *configservice.RecordingGroup) []map[string]interface{} { + m := make(map[string]interface{}, 1) + + if g.AllSupported != nil { + m["all_supported"] = *g.AllSupported + } + + if g.IncludeGlobalResourceTypes != nil { + m["include_global_resource_types"] = *g.IncludeGlobalResourceTypes + } + + if g.ResourceTypes != nil && len(g.ResourceTypes) > 0 { + m["resource_types"] = schema.NewSet(schema.HashString, flattenStringList(g.ResourceTypes)) + } + + return []map[string]interface{}{m} +} + +func flattenConfigSnapshotDeliveryProperties(p *configservice.ConfigSnapshotDeliveryProperties) []map[string]interface{} { + m := make(map[string]interface{}, 0) + + if p.DeliveryFrequency != nil { + m["delivery_frequency"] = *p.DeliveryFrequency + } + + return []map[string]interface{}{m} +} + +func pointersMapToStringList(pointers map[string]*string) map[string]interface{} { + list := make(map[string]interface{}, len(pointers)) + for i, v := range pointers { + list[i] = *v + } + return list +} + +func stringMapToPointers(m map[string]interface{}) map[string]*string { + list := make(map[string]*string, len(m)) + for i, v := range m { + list[i] = aws.String(v.(string)) + } + return list +} + +func flattenDSVpcSettings( + s *directoryservice.DirectoryVpcSettingsDescription) []map[string]interface{} { + settings := make(map[string]interface{}, 0) + + if s == nil { + return nil + } + + settings["subnet_ids"] = schema.NewSet(schema.HashString, flattenStringList(s.SubnetIds)) + settings["vpc_id"] = *s.VpcId + + return []map[string]interface{}{settings} +} + +func flattenLambdaEnvironment(lambdaEnv *lambda.EnvironmentResponse) []interface{} { + envs := make(map[string]interface{}) + en := make(map[string]string) + + if lambdaEnv == nil { + return nil + } + + for k, v := range lambdaEnv.Variables { + en[k] = *v + } + if len(en) > 0 { + envs["variables"] = en + } + + return []interface{}{envs} +} + +func flattenLambdaVpcConfigResponse(s *lambda.VpcConfigResponse) []map[string]interface{} { + settings := make(map[string]interface{}, 0) + + if s == nil { + return nil + } + + var emptyVpc bool + if s.VpcId == nil || *s.VpcId == "" { + emptyVpc = true + } + if len(s.SubnetIds) == 0 && len(s.SecurityGroupIds) == 0 && emptyVpc { + return nil + } + + settings["subnet_ids"] = schema.NewSet(schema.HashString, flattenStringList(s.SubnetIds)) + settings["security_group_ids"] = schema.NewSet(schema.HashString, flattenStringList(s.SecurityGroupIds)) + if s.VpcId != nil { + settings["vpc_id"] = *s.VpcId + } + + return []map[string]interface{}{settings} +} + +func flattenDSConnectSettings( + customerDnsIps []*string, + s *directoryservice.DirectoryConnectSettingsDescription) []map[string]interface{} { + if s == nil { + return nil + } + + settings := make(map[string]interface{}, 0) + + settings["customer_dns_ips"] = schema.NewSet(schema.HashString, flattenStringList(customerDnsIps)) + settings["connect_ips"] = schema.NewSet(schema.HashString, flattenStringList(s.ConnectIps)) + settings["customer_username"] = *s.CustomerUserName + settings["subnet_ids"] = schema.NewSet(schema.HashString, flattenStringList(s.SubnetIds)) + settings["vpc_id"] = *s.VpcId + + return []map[string]interface{}{settings} +} + +func expandCloudFormationParameters(params map[string]interface{}) []*cloudformation.Parameter { + var cfParams []*cloudformation.Parameter + for k, v := range params { + cfParams = append(cfParams, &cloudformation.Parameter{ + ParameterKey: aws.String(k), + ParameterValue: aws.String(v.(string)), + }) + } + + return cfParams +} + +// flattenCloudFormationParameters is flattening list of +// *cloudformation.Parameters and only returning existing +// parameters to avoid clash with default values +func flattenCloudFormationParameters(cfParams []*cloudformation.Parameter, + originalParams map[string]interface{}) map[string]interface{} { + params := make(map[string]interface{}, len(cfParams)) + for _, p := range cfParams { + _, isConfigured := originalParams[*p.ParameterKey] + if isConfigured { + params[*p.ParameterKey] = *p.ParameterValue + } + } + return params +} + +func flattenAllCloudFormationParameters(cfParams []*cloudformation.Parameter) map[string]interface{} { + params := make(map[string]interface{}, len(cfParams)) + for _, p := range cfParams { + params[*p.ParameterKey] = *p.ParameterValue + } + return params +} + +func expandCloudFormationTags(tags map[string]interface{}) []*cloudformation.Tag { + var cfTags []*cloudformation.Tag + for k, v := range tags { + cfTags = append(cfTags, &cloudformation.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + }) + } + return cfTags +} + +func flattenCloudFormationTags(cfTags []*cloudformation.Tag) map[string]string { + tags := make(map[string]string, len(cfTags)) + for _, t := range cfTags { + tags[*t.Key] = *t.Value + } + return tags +} + +func flattenCloudFormationOutputs(cfOutputs []*cloudformation.Output) map[string]string { + outputs := make(map[string]string, len(cfOutputs)) + for _, o := range cfOutputs { + outputs[*o.OutputKey] = *o.OutputValue + } + return outputs +} + +func flattenAsgSuspendedProcesses(list []*autoscaling.SuspendedProcess) []string { + strs := make([]string, 0, len(list)) + for _, r := range list { + if r.ProcessName != nil { + strs = append(strs, *r.ProcessName) + } + } + return strs +} + +func flattenAsgEnabledMetrics(list []*autoscaling.EnabledMetric) []string { + strs := make([]string, 0, len(list)) + for _, r := range list { + if r.Metric != nil { + strs = append(strs, *r.Metric) + } + } + return strs +} + +func flattenKinesisShardLevelMetrics(list []*kinesis.EnhancedMetrics) []string { + if len(list) == 0 { + return []string{} + } + strs := make([]string, 0, len(list[0].ShardLevelMetrics)) + for _, s := range list[0].ShardLevelMetrics { + strs = append(strs, *s) + } + return strs +} + +func flattenApiGatewayStageKeys(keys []*string) []map[string]interface{} { + stageKeys := make([]map[string]interface{}, 0, len(keys)) + for _, o := range keys { + key := make(map[string]interface{}) + parts := strings.Split(*o, "/") + key["stage_name"] = parts[1] + key["rest_api_id"] = parts[0] + + stageKeys = append(stageKeys, key) + } + return stageKeys +} + +func expandApiGatewayStageKeys(d *schema.ResourceData) []*apigateway.StageKey { + var stageKeys []*apigateway.StageKey + + if stageKeyData, ok := d.GetOk("stage_key"); ok { + params := stageKeyData.(*schema.Set).List() + for k := range params { + data := params[k].(map[string]interface{}) + stageKeys = append(stageKeys, &apigateway.StageKey{ + RestApiId: aws.String(data["rest_api_id"].(string)), + StageName: aws.String(data["stage_name"].(string)), + }) + } + } + + return stageKeys +} + +func expandApiGatewayRequestResponseModelOperations(d *schema.ResourceData, key string, prefix string) []*apigateway.PatchOperation { + operations := make([]*apigateway.PatchOperation, 0) + + oldModels, newModels := d.GetChange(key) + oldModelMap := oldModels.(map[string]interface{}) + newModelMap := newModels.(map[string]interface{}) + + for k, _ := range oldModelMap { + operation := apigateway.PatchOperation{ + Op: aws.String("remove"), + Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))), + } + + for nK, nV := range newModelMap { + if nK == k { + operation.Op = aws.String("replace") + operation.Value = aws.String(nV.(string)) + } + } + + operations = append(operations, &operation) + } + + for nK, nV := range newModelMap { + exists := false + for k, _ := range oldModelMap { + if k == nK { + exists = true + } + } + if !exists { + operation := apigateway.PatchOperation{ + Op: aws.String("add"), + Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(nK, "/", "~1", -1))), + Value: aws.String(nV.(string)), + } + operations = append(operations, &operation) + } + } + + return operations +} + +func deprecatedExpandApiGatewayMethodParametersJSONOperations(d *schema.ResourceData, key string, prefix string) ([]*apigateway.PatchOperation, error) { + operations := make([]*apigateway.PatchOperation, 0) + oldParameters, newParameters := d.GetChange(key) + oldParametersMap := make(map[string]interface{}) + newParametersMap := make(map[string]interface{}) + + if err := json.Unmarshal([]byte(oldParameters.(string)), &oldParametersMap); err != nil { + err := fmt.Errorf("Error unmarshaling old %s: %s", key, err) + return operations, err + } + + if err := json.Unmarshal([]byte(newParameters.(string)), &newParametersMap); err != nil { + err := fmt.Errorf("Error unmarshaling new %s: %s", key, err) + return operations, err + } + + for k, _ := range oldParametersMap { + operation := apigateway.PatchOperation{ + Op: aws.String("remove"), + Path: aws.String(fmt.Sprintf("/%s/%s", prefix, k)), + } + + for nK, nV := range newParametersMap { + if nK == k { + operation.Op = aws.String("replace") + operation.Value = aws.String(strconv.FormatBool(nV.(bool))) + } + } + + operations = append(operations, &operation) + } + + for nK, nV := range newParametersMap { + exists := false + for k, _ := range oldParametersMap { + if k == nK { + exists = true + } + } + if !exists { + operation := apigateway.PatchOperation{ + Op: aws.String("add"), + Path: aws.String(fmt.Sprintf("/%s/%s", prefix, nK)), + Value: aws.String(strconv.FormatBool(nV.(bool))), + } + operations = append(operations, &operation) + } + } + + return operations, nil +} + +func expandApiGatewayMethodParametersOperations(d *schema.ResourceData, key string, prefix string) ([]*apigateway.PatchOperation, error) { + operations := make([]*apigateway.PatchOperation, 0) + + oldParameters, newParameters := d.GetChange(key) + oldParametersMap := oldParameters.(map[string]interface{}) + newParametersMap := newParameters.(map[string]interface{}) + + for k, _ := range oldParametersMap { + operation := apigateway.PatchOperation{ + Op: aws.String("remove"), + Path: aws.String(fmt.Sprintf("/%s/%s", prefix, k)), + } + + for nK, nV := range newParametersMap { + b, ok := nV.(bool) + if !ok { + value, _ := strconv.ParseBool(nV.(string)) + b = value + } + if nK == k { + operation.Op = aws.String("replace") + operation.Value = aws.String(strconv.FormatBool(b)) + } + } + + operations = append(operations, &operation) + } + + for nK, nV := range newParametersMap { + exists := false + for k, _ := range oldParametersMap { + if k == nK { + exists = true + } + } + if !exists { + b, ok := nV.(bool) + if !ok { + value, _ := strconv.ParseBool(nV.(string)) + b = value + } + operation := apigateway.PatchOperation{ + Op: aws.String("add"), + Path: aws.String(fmt.Sprintf("/%s/%s", prefix, nK)), + Value: aws.String(strconv.FormatBool(b)), + } + operations = append(operations, &operation) + } + } + + return operations, nil +} + +func expandApiGatewayStageKeyOperations(d *schema.ResourceData) []*apigateway.PatchOperation { + operations := make([]*apigateway.PatchOperation, 0) + + prev, curr := d.GetChange("stage_key") + prevList := prev.(*schema.Set).List() + currList := curr.(*schema.Set).List() + + for i := range prevList { + p := prevList[i].(map[string]interface{}) + exists := false + + for j := range currList { + c := currList[j].(map[string]interface{}) + if c["rest_api_id"].(string) == p["rest_api_id"].(string) && c["stage_name"].(string) == p["stage_name"].(string) { + exists = true + } + } + + if !exists { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("remove"), + Path: aws.String("/stages"), + Value: aws.String(fmt.Sprintf("%s/%s", p["rest_api_id"].(string), p["stage_name"].(string))), + }) + } + } + + for i := range currList { + c := currList[i].(map[string]interface{}) + exists := false + + for j := range prevList { + p := prevList[j].(map[string]interface{}) + if c["rest_api_id"].(string) == p["rest_api_id"].(string) && c["stage_name"].(string) == p["stage_name"].(string) { + exists = true + } + } + + if !exists { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("add"), + Path: aws.String("/stages"), + Value: aws.String(fmt.Sprintf("%s/%s", c["rest_api_id"].(string), c["stage_name"].(string))), + }) + } + } + + return operations +} + +func expandCloudWachLogMetricTransformations(m map[string]interface{}) []*cloudwatchlogs.MetricTransformation { + transformation := cloudwatchlogs.MetricTransformation{ + MetricName: aws.String(m["name"].(string)), + MetricNamespace: aws.String(m["namespace"].(string)), + MetricValue: aws.String(m["value"].(string)), + } + + return []*cloudwatchlogs.MetricTransformation{&transformation} +} + +func flattenCloudWachLogMetricTransformations(ts []*cloudwatchlogs.MetricTransformation) map[string]string { + m := make(map[string]string, 0) + + m["name"] = *ts[0].MetricName + m["namespace"] = *ts[0].MetricNamespace + m["value"] = *ts[0].MetricValue + + return m +} + +func flattenBeanstalkAsg(list []*elasticbeanstalk.AutoScalingGroup) []string { + strs := make([]string, 0, len(list)) + for _, r := range list { + if r.Name != nil { + strs = append(strs, *r.Name) + } + } + return strs +} + +func flattenBeanstalkInstances(list []*elasticbeanstalk.Instance) []string { + strs := make([]string, 0, len(list)) + for _, r := range list { + if r.Id != nil { + strs = append(strs, *r.Id) + } + } + return strs +} + +func flattenBeanstalkLc(list []*elasticbeanstalk.LaunchConfiguration) []string { + strs := make([]string, 0, len(list)) + for _, r := range list { + if r.Name != nil { + strs = append(strs, *r.Name) + } + } + return strs +} + +func flattenBeanstalkElb(list []*elasticbeanstalk.LoadBalancer) []string { + strs := make([]string, 0, len(list)) + for _, r := range list { + if r.Name != nil { + strs = append(strs, *r.Name) + } + } + return strs +} + +func flattenBeanstalkSqs(list []*elasticbeanstalk.Queue) []string { + strs := make([]string, 0, len(list)) + for _, r := range list { + if r.URL != nil { + strs = append(strs, *r.URL) + } + } + return strs +} + +func flattenBeanstalkTrigger(list []*elasticbeanstalk.Trigger) []string { + strs := make([]string, 0, len(list)) + for _, r := range list { + if r.Name != nil { + strs = append(strs, *r.Name) + } + } + return strs +} + +// There are several parts of the AWS API that will sort lists of strings, +// causing diffs inbetween resources that use lists. This avoids a bit of +// code duplication for pre-sorts that can be used for things like hash +// functions, etc. +func sortInterfaceSlice(in []interface{}) []interface{} { + a := []string{} + b := []interface{}{} + for _, v := range in { + a = append(a, v.(string)) + } + + sort.Strings(a) + + for _, v := range a { + b = append(b, v) + } + + return b +} + +// This function sorts List A to look like a list found in the tf file. +func sortListBasedonTFFile(in []string, d *schema.ResourceData, listName string) ([]string, error) { + if attributeCount, ok := d.Get(listName + ".#").(int); ok { + for i := 0; i < attributeCount; i++ { + currAttributeId := d.Get(listName + "." + strconv.Itoa(i)) + for j := 0; j < len(in); j++ { + if currAttributeId == in[j] { + in[i], in[j] = in[j], in[i] + } + } + } + return in, nil + } + return in, fmt.Errorf("Could not find list: %s", listName) +} + +func flattenApiGatewayThrottleSettings(settings *apigateway.ThrottleSettings) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + + if settings != nil { + r := make(map[string]interface{}) + if settings.BurstLimit != nil { + r["burst_limit"] = *settings.BurstLimit + } + + if settings.RateLimit != nil { + r["rate_limit"] = *settings.RateLimit + } + + result = append(result, r) + } + + return result +} + +// TODO: refactor some of these helper functions and types in the terraform/helper packages + +// getStringPtr returns a *string version of the value taken from m, where m +// can be a map[string]interface{} or a *schema.ResourceData. If the key isn't +// present or is empty, getNilString returns nil. +func getStringPtr(m interface{}, key string) *string { + switch m := m.(type) { + case map[string]interface{}: + v := m[key] + + if v == nil { + return nil + } + + s := v.(string) + if s == "" { + return nil + } + + return &s + + case *schema.ResourceData: + if v, ok := m.GetOk(key); ok { + if v == nil || v.(string) == "" { + return nil + } + s := v.(string) + return &s + } + + default: + panic("unknown type in getStringPtr") + } + + return nil +} + +// getStringPtrList returns a []*string version of the map value. If the key +// isn't present, getNilStringList returns nil. +func getStringPtrList(m map[string]interface{}, key string) []*string { + if v, ok := m[key]; ok { + var stringList []*string + for _, i := range v.([]interface{}) { + s := i.(string) + stringList = append(stringList, &s) + } + + return stringList + } + + return nil +} + +// a convenience wrapper type for the schema.Set map[string]interface{} +// Set operations only alter the underlying map if the value is not nil +type setMap map[string]interface{} + +// SetString sets m[key] = *value only if `value != nil` +func (s setMap) SetString(key string, value *string) { + if value == nil { + return + } + + s[key] = *value +} + +// SetStringMap sets key to value as a map[string]interface{}, stripping any nil +// values. The value parameter can be a map[string]interface{}, a +// map[string]*string, or a map[string]string. +func (s setMap) SetStringMap(key string, value interface{}) { + // because these methods are meant to be chained without intermediate + // checks for nil, we are likely to get interfaces with dynamic types but + // a nil value. + if reflect.ValueOf(value).IsNil() { + return + } + + m := make(map[string]interface{}) + + switch value := value.(type) { + case map[string]string: + for k, v := range value { + m[k] = v + } + case map[string]*string: + for k, v := range value { + if v == nil { + continue + } + m[k] = *v + } + case map[string]interface{}: + for k, v := range value { + if v == nil { + continue + } + + switch v := v.(type) { + case string: + m[k] = v + case *string: + if v != nil { + m[k] = *v + } + default: + panic(fmt.Sprintf("unknown type for SetString: %T", v)) + } + } + } + + // catch the case where the interface wasn't nil, but we had no non-nil values + if len(m) > 0 { + s[key] = m + } +} + +// Set assigns value to s[key] if value isn't nil +func (s setMap) Set(key string, value interface{}) { + if reflect.ValueOf(value).IsNil() { + return + } + + s[key] = value +} + +// Map returns the raw map type for a shorter type conversion +func (s setMap) Map() map[string]interface{} { + return map[string]interface{}(s) +} + +// MapList returns the map[string]interface{} as a single element in a slice to +// match the schema.Set data type used for structs. +func (s setMap) MapList() []map[string]interface{} { + return []map[string]interface{}{s.Map()} +} + +// Takes the result of flatmap.Expand for an array of policy attributes and +// returns ELB API compatible objects +func expandPolicyAttributes(configured []interface{}) ([]*elb.PolicyAttribute, error) { + attributes := make([]*elb.PolicyAttribute, 0, len(configured)) + + // Loop over our configured attributes and create + // an array of aws-sdk-go compatible objects + for _, lRaw := range configured { + data := lRaw.(map[string]interface{}) + + a := &elb.PolicyAttribute{ + AttributeName: aws.String(data["name"].(string)), + AttributeValue: aws.String(data["value"].(string)), + } + + attributes = append(attributes, a) + + } + + return attributes, nil +} + +// Flattens an array of PolicyAttributes into a []interface{} +func flattenPolicyAttributes(list []*elb.PolicyAttributeDescription) []interface{} { + attributes := []interface{}{} + for _, attrdef := range list { + attribute := map[string]string{ + "name": *attrdef.AttributeName, + "value": *attrdef.AttributeValue, + } + + attributes = append(attributes, attribute) + + } + + return attributes +} + +func flattenConfigRuleSource(source *configservice.Source) []interface{} { + var result []interface{} + m := make(map[string]interface{}) + m["owner"] = *source.Owner + m["source_identifier"] = *source.SourceIdentifier + if len(source.SourceDetails) > 0 { + m["source_detail"] = schema.NewSet(configRuleSourceDetailsHash, flattenConfigRuleSourceDetails(source.SourceDetails)) + } + result = append(result, m) + return result +} + +func flattenConfigRuleSourceDetails(details []*configservice.SourceDetail) []interface{} { + var items []interface{} + for _, d := range details { + m := make(map[string]interface{}) + if d.MessageType != nil { + m["message_type"] = *d.MessageType + } + if d.EventSource != nil { + m["event_source"] = *d.EventSource + } + if d.MaximumExecutionFrequency != nil { + m["maximum_execution_frequency"] = *d.MaximumExecutionFrequency + } + + items = append(items, m) + } + + return items +} + +func expandConfigRuleSource(configured []interface{}) *configservice.Source { + cfg := configured[0].(map[string]interface{}) + source := configservice.Source{ + Owner: aws.String(cfg["owner"].(string)), + SourceIdentifier: aws.String(cfg["source_identifier"].(string)), + } + if details, ok := cfg["source_detail"]; ok { + source.SourceDetails = expandConfigRuleSourceDetails(details.(*schema.Set)) + } + return &source +} + +func expandConfigRuleSourceDetails(configured *schema.Set) []*configservice.SourceDetail { + var results []*configservice.SourceDetail + + for _, item := range configured.List() { + detail := item.(map[string]interface{}) + src := configservice.SourceDetail{} + + if msgType, ok := detail["message_type"].(string); ok && msgType != "" { + src.MessageType = aws.String(msgType) + } + if eventSource, ok := detail["event_source"].(string); ok && eventSource != "" { + src.EventSource = aws.String(eventSource) + } + if maxExecFreq, ok := detail["maximum_execution_frequency"].(string); ok && maxExecFreq != "" { + src.MaximumExecutionFrequency = aws.String(maxExecFreq) + } + + results = append(results, &src) + } + + return results +} + +func flattenConfigRuleScope(scope *configservice.Scope) []interface{} { + var items []interface{} + + m := make(map[string]interface{}) + if scope.ComplianceResourceId != nil { + m["compliance_resource_id"] = *scope.ComplianceResourceId + } + if scope.ComplianceResourceTypes != nil { + m["compliance_resource_types"] = schema.NewSet(schema.HashString, flattenStringList(scope.ComplianceResourceTypes)) + } + if scope.TagKey != nil { + m["tag_key"] = *scope.TagKey + } + if scope.TagValue != nil { + m["tag_value"] = *scope.TagValue + } + + items = append(items, m) + return items +} + +func expandConfigRuleScope(configured map[string]interface{}) *configservice.Scope { + scope := &configservice.Scope{} + + if v, ok := configured["compliance_resource_id"].(string); ok && v != "" { + scope.ComplianceResourceId = aws.String(v) + } + if v, ok := configured["compliance_resource_types"]; ok { + l := v.(*schema.Set) + if l.Len() > 0 { + scope.ComplianceResourceTypes = expandStringList(l.List()) + } + } + if v, ok := configured["tag_key"].(string); ok && v != "" { + scope.TagKey = aws.String(v) + } + if v, ok := configured["tag_value"].(string); ok && v != "" { + scope.TagValue = aws.String(v) + } + + return scope +} + +// Takes a value containing JSON string and passes it through +// the JSON parser to normalize it, returns either a parsing +// error or normalized JSON string. +func normalizeJsonString(jsonString interface{}) (string, error) { + var j interface{} + + if jsonString == nil || jsonString.(string) == "" { + return "", nil + } + + s := jsonString.(string) + + err := json.Unmarshal([]byte(s), &j) + if err != nil { + return s, err + } + + // The error is intentionally ignored here to allow empty policies to passthrough validation. + // This covers any interpolated values + bytes, _ := json.Marshal(j) + + return string(bytes[:]), nil +} + +// Takes a value containing YAML string and passes it through +// the YAML parser. Returns either a parsing +// error or original YAML string. +func checkYamlString(yamlString interface{}) (string, error) { + var y interface{} + + if yamlString == nil || yamlString.(string) == "" { + return "", nil + } + + s := yamlString.(string) + + err := yaml.Unmarshal([]byte(s), &y) + if err != nil { + return s, err + } + + return s, nil +} + +func normalizeCloudFormationTemplate(templateString interface{}) (string, error) { + if looksLikeJsonString(templateString) { + return normalizeJsonString(templateString) + } else { + return checkYamlString(templateString) + } +} + +func flattenInspectorTags(cfTags []*cloudformation.Tag) map[string]string { + tags := make(map[string]string, len(cfTags)) + for _, t := range cfTags { + tags[*t.Key] = *t.Value + } + return tags +} + +func flattenApiGatewayUsageApiStages(s []*apigateway.ApiStage) []map[string]interface{} { + stages := make([]map[string]interface{}, 0) + + for _, bd := range s { + if bd.ApiId != nil && bd.Stage != nil { + stage := make(map[string]interface{}) + stage["api_id"] = *bd.ApiId + stage["stage"] = *bd.Stage + + stages = append(stages, stage) + } + } + + if len(stages) > 0 { + return stages + } + + return nil +} + +func flattenApiGatewayUsagePlanThrottling(s *apigateway.ThrottleSettings) []map[string]interface{} { + settings := make(map[string]interface{}, 0) + + if s == nil { + return nil + } + + if s.BurstLimit != nil { + settings["burst_limit"] = *s.BurstLimit + } + + if s.RateLimit != nil { + settings["rate_limit"] = *s.RateLimit + } + + return []map[string]interface{}{settings} +} + +func flattenApiGatewayUsagePlanQuota(s *apigateway.QuotaSettings) []map[string]interface{} { + settings := make(map[string]interface{}, 0) + + if s == nil { + return nil + } + + if s.Limit != nil { + settings["limit"] = *s.Limit + } + + if s.Offset != nil { + settings["offset"] = *s.Offset + } + + if s.Period != nil { + settings["period"] = *s.Period + } + + return []map[string]interface{}{settings} +} + +func buildApiGatewayInvokeURL(restApiId, region, stageName string) string { + return fmt.Sprintf("https://%s.execute-api.%s.amazonaws.com/%s", + restApiId, region, stageName) +} + +func buildApiGatewayExecutionARN(restApiId, region, accountId string) (string, error) { + if accountId == "" { + return "", fmt.Errorf("Unable to build execution ARN for %s as account ID is missing", + restApiId) + } + return fmt.Sprintf("arn:aws:execute-api:%s:%s:%s", + region, accountId, restApiId), nil +} + +func expandCognitoSupportedLoginProviders(config map[string]interface{}) map[string]*string { + m := map[string]*string{} + for k, v := range config { + s := v.(string) + m[k] = &s + } + return m +} + +func flattenCognitoSupportedLoginProviders(config map[string]*string) map[string]string { + m := map[string]string{} + for k, v := range config { + m[k] = *v + } + return m +} + +func expandCognitoIdentityProviders(s *schema.Set) []*cognitoidentity.Provider { + ips := make([]*cognitoidentity.Provider, 0) + + for _, v := range s.List() { + s := v.(map[string]interface{}) + + ip := &cognitoidentity.Provider{} + + if sv, ok := s["client_id"].(string); ok { + ip.ClientId = aws.String(sv) + } + + if sv, ok := s["provider_name"].(string); ok { + ip.ProviderName = aws.String(sv) + } + + if sv, ok := s["server_side_token_check"].(bool); ok { + ip.ServerSideTokenCheck = aws.Bool(sv) + } + + ips = append(ips, ip) + } + + return ips +} + +func flattenCognitoIdentityProviders(ips []*cognitoidentity.Provider) []map[string]interface{} { + values := make([]map[string]interface{}, 0) + + for _, v := range ips { + ip := make(map[string]interface{}) + + if v == nil { + return nil + } + + if v.ClientId != nil { + ip["client_id"] = *v.ClientId + } + + if v.ProviderName != nil { + ip["provider_name"] = *v.ProviderName + } + + if v.ServerSideTokenCheck != nil { + ip["server_side_token_check"] = *v.ServerSideTokenCheck + } + + values = append(values, ip) + } + + return values +} + +func buildLambdaInvokeArn(lambdaArn, region string) string { + apiVersion := "2015-03-31" + return fmt.Sprintf("arn:aws:apigateway:%s:lambda:path/%s/functions/%s/invocations", + region, apiVersion, lambdaArn) +} + +func sliceContainsMap(l []interface{}, m map[string]interface{}) (int, bool) { + for i, t := range l { + if reflect.DeepEqual(m, t.(map[string]interface{})) { + return i, true + } + } + + return -1, false +} + +func expandAwsSsmTargets(d *schema.ResourceData) []*ssm.Target { + var targets []*ssm.Target + + targetConfig := d.Get("targets").([]interface{}) + + for _, tConfig := range targetConfig { + config := tConfig.(map[string]interface{}) + + target := &ssm.Target{ + Key: aws.String(config["key"].(string)), + Values: expandStringList(config["values"].([]interface{})), + } + + targets = append(targets, target) + } + + return targets +} + +func flattenAwsSsmTargets(targets []*ssm.Target) []map[string]interface{} { + if len(targets) == 0 { + return nil + } + + result := make([]map[string]interface{}, 0, len(targets)) + target := targets[0] + + t := make(map[string]interface{}) + t["key"] = *target.Key + t["values"] = flattenStringList(target.Values) + + result = append(result, t) + + return result +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags.go new file mode 100644 index 000000000..46438c0fd --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags.go @@ -0,0 +1,407 @@ +package aws + +import ( + "log" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +// tagsSchema returns the schema to use for tags. +// +func tagsSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + } +} + +func tagsSchemaComputed() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Computed: true, + } +} + +func setElbV2Tags(conn *elbv2.ELBV2, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffElbV2Tags(tagsFromMapELBv2(o), tagsFromMapELBv2(n)) + + // Set tags + if len(remove) > 0 { + var tagKeys []*string + for _, tag := range remove { + tagKeys = append(tagKeys, tag.Key) + } + log.Printf("[DEBUG] Removing tags: %#v from %s", remove, d.Id()) + _, err := conn.RemoveTags(&elbv2.RemoveTagsInput{ + ResourceArns: []*string{aws.String(d.Id())}, + TagKeys: tagKeys, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id()) + _, err := conn.AddTags(&elbv2.AddTagsInput{ + ResourceArns: []*string{aws.String(d.Id())}, + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} + +func setVolumeTags(conn *ec2.EC2, d *schema.ResourceData) error { + if d.HasChange("volume_tags") { + oraw, nraw := d.GetChange("volume_tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTags(tagsFromMap(o), tagsFromMap(n)) + + volumeIds, err := getAwsInstanceVolumeIds(conn, d) + if err != nil { + return err + } + + if len(remove) > 0 { + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + log.Printf("[DEBUG] Removing volume tags: %#v from %s", remove, d.Id()) + _, err := conn.DeleteTags(&ec2.DeleteTagsInput{ + Resources: volumeIds, + Tags: remove, + }) + if err != nil { + ec2err, ok := err.(awserr.Error) + if ok && strings.Contains(ec2err.Code(), ".NotFound") { + return resource.RetryableError(err) // retry + } + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return err + } + } + if len(create) > 0 { + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + log.Printf("[DEBUG] Creating vol tags: %s for %s", create, d.Id()) + _, err := conn.CreateTags(&ec2.CreateTagsInput{ + Resources: volumeIds, + Tags: create, + }) + if err != nil { + ec2err, ok := err.(awserr.Error) + if ok && strings.Contains(ec2err.Code(), ".NotFound") { + return resource.RetryableError(err) // retry + } + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return err + } + } + } + + return nil +} + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTags(conn *ec2.EC2, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTags(tagsFromMap(o), tagsFromMap(n)) + + // Set tags + if len(remove) > 0 { + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + log.Printf("[DEBUG] Removing tags: %#v from %s", remove, d.Id()) + _, err := conn.DeleteTags(&ec2.DeleteTagsInput{ + Resources: []*string{aws.String(d.Id())}, + Tags: remove, + }) + if err != nil { + ec2err, ok := err.(awserr.Error) + if ok && strings.Contains(ec2err.Code(), ".NotFound") { + return resource.RetryableError(err) // retry + } + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return err + } + } + if len(create) > 0 { + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id()) + _, err := conn.CreateTags(&ec2.CreateTagsInput{ + Resources: []*string{aws.String(d.Id())}, + Tags: create, + }) + if err != nil { + ec2err, ok := err.(awserr.Error) + if ok && strings.Contains(ec2err.Code(), ".NotFound") { + return resource.RetryableError(err) // retry + } + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTags(oldTags, newTags []*ec2.Tag) ([]*ec2.Tag, []*ec2.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*ec2.Tag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + remove = append(remove, t) + } + } + + return tagsFromMap(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMap(m map[string]interface{}) []*ec2.Tag { + result := make([]*ec2.Tag, 0, len(m)) + for k, v := range m { + t := &ec2.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnored(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMap(ts []*ec2.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnored(t) { + result[*t.Key] = *t.Value + } + } + + return result +} + +func diffElbV2Tags(oldTags, newTags []*elbv2.Tag) ([]*elbv2.Tag, []*elbv2.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*elbv2.Tag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + return tagsFromMapELBv2(create), remove +} + +// tagsToMapELBv2 turns the list of tags into a map. +func tagsToMapELBv2(ts []*elbv2.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredELBv2(t) { + result[*t.Key] = *t.Value + } + } + + return result +} + +// tagsFromMapELBv2 returns the tags for the given map of data. +func tagsFromMapELBv2(m map[string]interface{}) []*elbv2.Tag { + var result []*elbv2.Tag + for k, v := range m { + t := &elbv2.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredELBv2(t) { + result = append(result, t) + } + } + + return result +} + +// tagIgnored compares a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnored(t *ec2.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + if r, _ := regexp.MatchString(v, *t.Key); r == true { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} + +// and for ELBv2 as well +func tagIgnoredELBv2(t *elbv2.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + if r, _ := regexp.MatchString(v, *t.Key); r == true { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} + +// tagsToMapDynamoDb turns the list of tags into a map for dynamoDB +func tagsToMapDynamoDb(ts []*dynamodb.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + result[*t.Key] = *t.Value + } + return result +} + +// tagsFromMapDynamoDb returns the tags for a given map +func tagsFromMapDynamoDb(m map[string]interface{}) []*dynamodb.Tag { + result := make([]*dynamodb.Tag, 0, len(m)) + for k, v := range m { + t := &dynamodb.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + result = append(result, t) + } + return result +} + +// setTagsDynamoDb is a helper to set the tags for a dynamoDB resource +// This is needed because dynamodb requires a completely different set and delete +// method from the ec2 tag resource handling. Also the `UntagResource` method +// for dynamoDB only requires a list of tag keys, instead of the full map of keys. +func setTagsDynamoDb(conn *dynamodb.DynamoDB, d *schema.ResourceData) error { + if d.HasChange("tags") { + arn := d.Get("arn").(string) + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsDynamoDb(tagsFromMapDynamoDb(o), tagsFromMapDynamoDb(n)) + + // Set tags + if len(remove) > 0 { + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + log.Printf("[DEBUG] Removing tags: %#v from %s", remove, d.Id()) + _, err := conn.UntagResource(&dynamodb.UntagResourceInput{ + ResourceArn: aws.String(arn), + TagKeys: remove, + }) + if err != nil { + ec2err, ok := err.(awserr.Error) + if ok && strings.Contains(ec2err.Code(), "ResourceNotFoundException") { + return resource.RetryableError(err) // retry + } + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return err + } + } + if len(create) > 0 { + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id()) + _, err := conn.TagResource(&dynamodb.TagResourceInput{ + ResourceArn: aws.String(arn), + Tags: create, + }) + if err != nil { + ec2err, ok := err.(awserr.Error) + if ok && strings.Contains(ec2err.Code(), "ResourceNotFoundException") { + return resource.RetryableError(err) // retry + } + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTagsDynamoDb takes a local set of dynamodb tags and the ones found remotely +// and returns the set of tags that must be created as a map, and returns a list of tag keys +// that must be destroyed. +func diffTagsDynamoDb(oldTags, newTags []*dynamodb.Tag) ([]*dynamodb.Tag, []*string) { + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + var remove []*string + for _, t := range oldTags { + // Verify the old tag is not a tag we're currently attempting to create + old, ok := create[*t.Key] + if !ok || old != *t.Value { + remove = append(remove, t.Key) + } + } + return tagsFromMapDynamoDb(create), remove +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsBeanstalk.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsBeanstalk.go new file mode 100644 index 000000000..7b85d6116 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsBeanstalk.go @@ -0,0 +1,74 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/elasticbeanstalk" +) + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsBeanstalk(oldTags, newTags []*elasticbeanstalk.Tag) ([]*elasticbeanstalk.Tag, []*elasticbeanstalk.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*elasticbeanstalk.Tag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + return tagsFromMapBeanstalk(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapBeanstalk(m map[string]interface{}) []*elasticbeanstalk.Tag { + var result []*elasticbeanstalk.Tag + for k, v := range m { + t := &elasticbeanstalk.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredBeanstalk(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapBeanstalk(ts []*elasticbeanstalk.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredBeanstalk(t) { + result[*t.Key] = *t.Value + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredBeanstalk(t *elasticbeanstalk.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + if r, _ := regexp.MatchString(v, *t.Key); r == true { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudFront.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudFront.go new file mode 100644 index 000000000..d2b60c73c --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudFront.go @@ -0,0 +1,98 @@ +package aws + +import ( + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/terraform/helper/schema" +) + +func setTagsCloudFront(conn *cloudfront.CloudFront, d *schema.ResourceData, arn string) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsCloudFront(tagsFromMapCloudFront(o), tagsFromMapCloudFront(n)) + + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %s", remove) + k := make([]*string, 0, len(remove)) + for _, t := range remove { + k = append(k, t.Key) + } + + _, err := conn.UntagResource(&cloudfront.UntagResourceInput{ + Resource: aws.String(arn), + TagKeys: &cloudfront.TagKeys{ + Items: k, + }, + }) + if err != nil { + return err + } + } + + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %s", create) + _, err := conn.TagResource(&cloudfront.TagResourceInput{ + Resource: aws.String(arn), + Tags: &cloudfront.Tags{ + Items: create, + }, + }) + if err != nil { + return err + } + } + + } + + return nil +} +func diffTagsCloudFront(oldTags, newTags *cloudfront.Tags) ([]*cloudfront.Tag, []*cloudfront.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags.Items { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*cloudfront.Tag + for _, t := range oldTags.Items { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + createTags := tagsFromMapCloudFront(create) + return createTags.Items, remove +} + +func tagsFromMapCloudFront(m map[string]interface{}) *cloudfront.Tags { + result := make([]*cloudfront.Tag, 0, len(m)) + for k, v := range m { + result = append(result, &cloudfront.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + }) + } + + tags := &cloudfront.Tags{ + Items: result, + } + + return tags +} + +func tagsToMapCloudFront(ts *cloudfront.Tags) map[string]string { + result := make(map[string]string) + + for _, t := range ts.Items { + result[*t.Key] = *t.Value + } + + return result +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudtrail.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudtrail.go new file mode 100644 index 000000000..b4302ddd1 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCloudtrail.go @@ -0,0 +1,112 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudtrail" + "github.com/hashicorp/terraform/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsCloudtrail(conn *cloudtrail.CloudTrail, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsCloudtrail(tagsFromMapCloudtrail(o), tagsFromMapCloudtrail(n)) + + // Set tags + if len(remove) > 0 { + input := cloudtrail.RemoveTagsInput{ + ResourceId: aws.String(d.Get("arn").(string)), + TagsList: remove, + } + log.Printf("[DEBUG] Removing CloudTrail tags: %s", input) + _, err := conn.RemoveTags(&input) + if err != nil { + return err + } + } + if len(create) > 0 { + input := cloudtrail.AddTagsInput{ + ResourceId: aws.String(d.Get("arn").(string)), + TagsList: create, + } + log.Printf("[DEBUG] Adding CloudTrail tags: %s", input) + _, err := conn.AddTags(&input) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsCloudtrail(oldTags, newTags []*cloudtrail.Tag) ([]*cloudtrail.Tag, []*cloudtrail.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*cloudtrail.Tag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + return tagsFromMapCloudtrail(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapCloudtrail(m map[string]interface{}) []*cloudtrail.Tag { + var result []*cloudtrail.Tag + for k, v := range m { + t := &cloudtrail.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredCloudtrail(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapCloudtrail(ts []*cloudtrail.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredCloudtrail(t) { + result[*t.Key] = *t.Value + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredCloudtrail(t *cloudtrail.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + if r, _ := regexp.MatchString(v, *t.Key); r == true { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCodeBuild.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCodeBuild.go new file mode 100644 index 000000000..3302d7426 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsCodeBuild.go @@ -0,0 +1,67 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/codebuild" +) + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsCodeBuild(oldTags, newTags []*codebuild.Tag) ([]*codebuild.Tag, []*codebuild.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*codebuild.Tag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + return tagsFromMapCodeBuild(create), remove +} + +func tagsFromMapCodeBuild(m map[string]interface{}) []*codebuild.Tag { + result := []*codebuild.Tag{} + for k, v := range m { + result = append(result, &codebuild.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + }) + } + + return result +} + +func tagsToMapCodeBuild(ts []*codebuild.Tag) map[string]string { + result := map[string]string{} + for _, t := range ts { + result[*t.Key] = *t.Value + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredCodeBuild(t *codebuild.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + if r, _ := regexp.MatchString(v, *t.Key); r == true { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsEC.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsEC.go new file mode 100644 index 000000000..b9b22af9c --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsEC.go @@ -0,0 +1,115 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/hashicorp/terraform/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsEC(conn *elasticache.ElastiCache, d *schema.ResourceData, arn string) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsEC(tagsFromMapEC(o), tagsFromMapEC(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %#v", remove) + k := make([]*string, len(remove), len(remove)) + for i, t := range remove { + k[i] = t.Key + } + + _, err := conn.RemoveTagsFromResource(&elasticache.RemoveTagsFromResourceInput{ + ResourceName: aws.String(arn), + TagKeys: k, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %#v", create) + _, err := conn.AddTagsToResource(&elasticache.AddTagsToResourceInput{ + ResourceName: aws.String(arn), + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsEC(oldTags, newTags []*elasticache.Tag) ([]*elasticache.Tag, []*elasticache.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*elasticache.Tag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + return tagsFromMapEC(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapEC(m map[string]interface{}) []*elasticache.Tag { + result := make([]*elasticache.Tag, 0, len(m)) + for k, v := range m { + t := &elasticache.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredEC(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapEC(ts []*elasticache.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredEC(t) { + result[*t.Key] = *t.Value + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredEC(t *elasticache.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + if r, _ := regexp.MatchString(v, *t.Key); r == true { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsEFS.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsEFS.go new file mode 100644 index 000000000..b61973165 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsEFS.go @@ -0,0 +1,114 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/efs" + "github.com/hashicorp/terraform/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsEFS(conn *efs.EFS, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsEFS(tagsFromMapEFS(o), tagsFromMapEFS(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %#v", remove) + k := make([]*string, 0, len(remove)) + for _, t := range remove { + k = append(k, t.Key) + } + _, err := conn.DeleteTags(&efs.DeleteTagsInput{ + FileSystemId: aws.String(d.Id()), + TagKeys: k, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %#v", create) + _, err := conn.CreateTags(&efs.CreateTagsInput{ + FileSystemId: aws.String(d.Id()), + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsEFS(oldTags, newTags []*efs.Tag) ([]*efs.Tag, []*efs.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*efs.Tag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + return tagsFromMapEFS(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapEFS(m map[string]interface{}) []*efs.Tag { + var result []*efs.Tag + for k, v := range m { + t := &efs.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredEFS(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapEFS(ts []*efs.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredEFS(t) { + result[*t.Key] = *t.Value + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredEFS(t *efs.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + if r, _ := regexp.MatchString(v, *t.Key); r == true { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsELB.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsELB.go new file mode 100644 index 000000000..081de9cc1 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsELB.go @@ -0,0 +1,114 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/hashicorp/terraform/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsELB(conn *elb.ELB, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsELB(tagsFromMapELB(o), tagsFromMapELB(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %#v", remove) + k := make([]*elb.TagKeyOnly, 0, len(remove)) + for _, t := range remove { + k = append(k, &elb.TagKeyOnly{Key: t.Key}) + } + _, err := conn.RemoveTags(&elb.RemoveTagsInput{ + LoadBalancerNames: []*string{aws.String(d.Get("name").(string))}, + Tags: k, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %#v", create) + _, err := conn.AddTags(&elb.AddTagsInput{ + LoadBalancerNames: []*string{aws.String(d.Get("name").(string))}, + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsELB(oldTags, newTags []*elb.Tag) ([]*elb.Tag, []*elb.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*elb.Tag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + return tagsFromMapELB(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapELB(m map[string]interface{}) []*elb.Tag { + var result []*elb.Tag + for k, v := range m { + t := &elb.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredELB(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapELB(ts []*elb.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredELB(t) { + result[*t.Key] = *t.Value + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredELB(t *elb.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + if r, _ := regexp.MatchString(v, *t.Key); r == true { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsGeneric.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsGeneric.go new file mode 100644 index 000000000..d494a4972 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsGeneric.go @@ -0,0 +1,69 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" +) + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsGeneric(oldTags, newTags map[string]interface{}) (map[string]*string, map[string]*string) { + // First, we're creating everything we have + create := make(map[string]*string) + for k, v := range newTags { + create[k] = aws.String(v.(string)) + } + + // Build the map of what to remove + remove := make(map[string]*string) + for k, v := range oldTags { + old, ok := create[k] + if !ok || old != aws.String(v.(string)) { + // Delete it! + remove[k] = aws.String(v.(string)) + } + } + + return create, remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapGeneric(m map[string]interface{}) map[string]*string { + result := make(map[string]*string) + for k, v := range m { + if !tagIgnoredGeneric(k) { + result[k] = aws.String(v.(string)) + } + } + + return result +} + +// tagsToMap turns the tags into a map. +func tagsToMapGeneric(ts map[string]*string) map[string]string { + result := make(map[string]string) + for k, v := range ts { + if !tagIgnoredGeneric(k) { + result[k] = aws.StringValue(v) + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredGeneric(k string) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, k) + if r, _ := regexp.MatchString(v, k); r == true { + log.Printf("[DEBUG] Found AWS specific tag %s, ignoring.\n", k) + return true + } + } + return false +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsInspector.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsInspector.go new file mode 100644 index 000000000..ef18f33c2 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsInspector.go @@ -0,0 +1,74 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/inspector" +) + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsInspector(oldTags, newTags []*inspector.ResourceGroupTag) ([]*inspector.ResourceGroupTag, []*inspector.ResourceGroupTag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*inspector.ResourceGroupTag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + return tagsFromMapInspector(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapInspector(m map[string]interface{}) []*inspector.ResourceGroupTag { + var result []*inspector.ResourceGroupTag + for k, v := range m { + t := &inspector.ResourceGroupTag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredInspector(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapInspector(ts []*inspector.ResourceGroupTag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredInspector(t) { + result[*t.Key] = *t.Value + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredInspector(t *inspector.ResourceGroupTag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + if r, _ := regexp.MatchString(v, *t.Key); r == true { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsKMS.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsKMS.go new file mode 100644 index 000000000..4e918414e --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsKMS.go @@ -0,0 +1,115 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/kms" + "github.com/hashicorp/terraform/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsKMS(conn *kms.KMS, d *schema.ResourceData, keyId string) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsKMS(tagsFromMapKMS(o), tagsFromMapKMS(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %#v", remove) + k := make([]*string, len(remove), len(remove)) + for i, t := range remove { + k[i] = t.TagKey + } + + _, err := conn.UntagResource(&kms.UntagResourceInput{ + KeyId: aws.String(keyId), + TagKeys: k, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %#v", create) + _, err := conn.TagResource(&kms.TagResourceInput{ + KeyId: aws.String(keyId), + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsKMS(oldTags, newTags []*kms.Tag) ([]*kms.Tag, []*kms.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[aws.StringValue(t.TagKey)] = aws.StringValue(t.TagValue) + } + + // Build the list of what to remove + var remove []*kms.Tag + for _, t := range oldTags { + old, ok := create[aws.StringValue(t.TagKey)] + if !ok || old != aws.StringValue(t.TagValue) { + // Delete it! + remove = append(remove, t) + } + } + + return tagsFromMapKMS(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapKMS(m map[string]interface{}) []*kms.Tag { + result := make([]*kms.Tag, 0, len(m)) + for k, v := range m { + t := &kms.Tag{ + TagKey: aws.String(k), + TagValue: aws.String(v.(string)), + } + if !tagIgnoredKMS(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapKMS(ts []*kms.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredKMS(t) { + result[aws.StringValue(t.TagKey)] = aws.StringValue(t.TagValue) + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredKMS(t *kms.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.TagKey) + if r, _ := regexp.MatchString(v, *t.TagKey); r == true { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.TagKey, *t.TagValue) + return true + } + } + return false +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsLambda.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsLambda.go new file mode 100644 index 000000000..28aa25121 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsLambda.go @@ -0,0 +1,50 @@ +package aws + +import ( + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/lambda" + "github.com/hashicorp/terraform/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsLambda(conn *lambda.Lambda, d *schema.ResourceData, arn string) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsGeneric(o, n) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %#v", remove) + keys := make([]*string, 0, len(remove)) + for k := range remove { + keys = append(keys, aws.String(k)) + } + + _, err := conn.UntagResource(&lambda.UntagResourceInput{ + Resource: aws.String(arn), + TagKeys: keys, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %#v", create) + + _, err := conn.TagResource(&lambda.TagResourceInput{ + Resource: aws.String(arn), + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsRDS.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsRDS.go new file mode 100644 index 000000000..2d6411348 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsRDS.go @@ -0,0 +1,133 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/hashicorp/terraform/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsRDS(conn *rds.RDS, d *schema.ResourceData, arn string) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsRDS(tagsFromMapRDS(o), tagsFromMapRDS(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %s", remove) + k := make([]*string, len(remove), len(remove)) + for i, t := range remove { + k[i] = t.Key + } + + _, err := conn.RemoveTagsFromResource(&rds.RemoveTagsFromResourceInput{ + ResourceName: aws.String(arn), + TagKeys: k, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %s", create) + _, err := conn.AddTagsToResource(&rds.AddTagsToResourceInput{ + ResourceName: aws.String(arn), + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsRDS(oldTags, newTags []*rds.Tag) ([]*rds.Tag, []*rds.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*rds.Tag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + return tagsFromMapRDS(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapRDS(m map[string]interface{}) []*rds.Tag { + result := make([]*rds.Tag, 0, len(m)) + for k, v := range m { + t := &rds.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredRDS(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapRDS(ts []*rds.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredRDS(t) { + result[*t.Key] = *t.Value + } + } + + return result +} + +func saveTagsRDS(conn *rds.RDS, d *schema.ResourceData, arn string) error { + resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ + ResourceName: aws.String(arn), + }) + + if err != nil { + return fmt.Errorf("[DEBUG] Error retreiving tags for ARN: %s", arn) + } + + var dt []*rds.Tag + if len(resp.TagList) > 0 { + dt = resp.TagList + } + + return d.Set("tags", tagsToMapRDS(dt)) +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredRDS(t *rds.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + if r, _ := regexp.MatchString(v, *t.Key); r == true { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsRedshift.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsRedshift.go new file mode 100644 index 000000000..715e82045 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tagsRedshift.go @@ -0,0 +1,108 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/redshift" + "github.com/hashicorp/terraform/helper/schema" +) + +func setTagsRedshift(conn *redshift.Redshift, d *schema.ResourceData, arn string) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsRedshift(tagsFromMapRedshift(o), tagsFromMapRedshift(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %#v", remove) + k := make([]*string, len(remove), len(remove)) + for i, t := range remove { + k[i] = t.Key + } + + _, err := conn.DeleteTags(&redshift.DeleteTagsInput{ + ResourceName: aws.String(arn), + TagKeys: k, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %#v", create) + _, err := conn.CreateTags(&redshift.CreateTagsInput{ + ResourceName: aws.String(arn), + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} + +func diffTagsRedshift(oldTags, newTags []*redshift.Tag) ([]*redshift.Tag, []*redshift.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*redshift.Tag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + return tagsFromMapRedshift(create), remove +} + +func tagsFromMapRedshift(m map[string]interface{}) []*redshift.Tag { + result := make([]*redshift.Tag, 0, len(m)) + for k, v := range m { + t := &redshift.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredRedshift(t) { + result = append(result, t) + } + } + + return result +} + +func tagsToMapRedshift(ts []*redshift.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredRedshift(t) { + result[*t.Key] = *t.Value + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredRedshift(t *redshift.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + if r, _ := regexp.MatchString(v, *t.Key); r == true { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_dms.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_dms.go new file mode 100644 index 000000000..c88050059 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_dms.go @@ -0,0 +1,91 @@ +package aws + +import ( + "github.com/aws/aws-sdk-go/aws" + dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/hashicorp/terraform/helper/schema" +) + +func dmsTagsToMap(tags []*dms.Tag) map[string]string { + result := make(map[string]string) + + for _, tag := range tags { + result[*tag.Key] = *tag.Value + } + + return result +} + +func dmsTagsFromMap(m map[string]interface{}) []*dms.Tag { + result := make([]*dms.Tag, 0, len(m)) + + for k, v := range m { + result = append(result, &dms.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + }) + } + + return result +} + +func dmsDiffTags(oldTags, newTags []*dms.Tag) ([]*dms.Tag, []*dms.Tag) { + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + remove := []*dms.Tag{} + for _, t := range oldTags { + v, ok := create[*t.Key] + if !ok || v != *t.Value { + remove = append(remove, t) + } + } + + return dmsTagsFromMap(create), remove +} + +func dmsGetTagKeys(tags []*dms.Tag) []*string { + keys := []*string{} + + for _, tag := range tags { + keys = append(keys, tag.Key) + } + + return keys +} + +func dmsSetTags(arn string, d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dmsconn + + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + + add, remove := dmsDiffTags(dmsTagsFromMap(o), dmsTagsFromMap(n)) + + if len(remove) > 0 { + _, err := conn.RemoveTagsFromResource(&dms.RemoveTagsFromResourceInput{ + ResourceArn: aws.String(arn), + TagKeys: dmsGetTagKeys(remove), + }) + if err != nil { + return err + } + } + + if len(add) > 0 { + _, err := conn.AddTagsToResource(&dms.AddTagsToResourceInput{ + ResourceArn: aws.String(arn), + Tags: add, + }) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_elasticsearchservice.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_elasticsearchservice.go new file mode 100644 index 000000000..e585d1afa --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_elasticsearchservice.go @@ -0,0 +1,114 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" + "github.com/hashicorp/terraform/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsElasticsearchService(conn *elasticsearch.ElasticsearchService, d *schema.ResourceData, arn string) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsElasticsearchService(tagsFromMapElasticsearchService(o), tagsFromMapElasticsearchService(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %#v", remove) + k := make([]*string, 0, len(remove)) + for _, t := range remove { + k = append(k, t.Key) + } + _, err := conn.RemoveTags(&elasticsearch.RemoveTagsInput{ + ARN: aws.String(arn), + TagKeys: k, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %#v", create) + _, err := conn.AddTags(&elasticsearch.AddTagsInput{ + ARN: aws.String(arn), + TagList: create, + }) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsElasticsearchService(oldTags, newTags []*elasticsearch.Tag) ([]*elasticsearch.Tag, []*elasticsearch.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*elasticsearch.Tag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + return tagsFromMapElasticsearchService(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapElasticsearchService(m map[string]interface{}) []*elasticsearch.Tag { + var result []*elasticsearch.Tag + for k, v := range m { + t := &elasticsearch.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredElasticsearchService(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapElasticsearchService(ts []*elasticsearch.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredElasticsearchService(t) { + result[*t.Key] = *t.Value + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredElasticsearchService(t *elasticsearch.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + if r, _ := regexp.MatchString(v, *t.Key); r == true { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_kinesis.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_kinesis.go new file mode 100644 index 000000000..a5622e95d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_kinesis.go @@ -0,0 +1,125 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/hashicorp/terraform/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsKinesis(conn *kinesis.Kinesis, d *schema.ResourceData) error { + + sn := d.Get("name").(string) + + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsKinesis(tagsFromMapKinesis(o), tagsFromMapKinesis(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %#v", remove) + k := make([]*string, len(remove), len(remove)) + for i, t := range remove { + k[i] = t.Key + } + + _, err := conn.RemoveTagsFromStream(&kinesis.RemoveTagsFromStreamInput{ + StreamName: aws.String(sn), + TagKeys: k, + }) + if err != nil { + return err + } + } + + if len(create) > 0 { + + log.Printf("[DEBUG] Creating tags: %#v", create) + t := make(map[string]*string) + for _, tag := range create { + t[*tag.Key] = tag.Value + } + + _, err := conn.AddTagsToStream(&kinesis.AddTagsToStreamInput{ + StreamName: aws.String(sn), + Tags: t, + }) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsKinesis(oldTags, newTags []*kinesis.Tag) ([]*kinesis.Tag, []*kinesis.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*kinesis.Tag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + return tagsFromMapKinesis(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapKinesis(m map[string]interface{}) []*kinesis.Tag { + var result []*kinesis.Tag + for k, v := range m { + t := &kinesis.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredKinesis(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapKinesis(ts []*kinesis.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredKinesis(t) { + result[*t.Key] = *t.Value + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredKinesis(t *kinesis.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + if r, _ := regexp.MatchString(v, *t.Key); r == true { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_route53.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_route53.go new file mode 100644 index 000000000..372167291 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/tags_route53.go @@ -0,0 +1,111 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/hashicorp/terraform/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsR53(conn *route53.Route53, d *schema.ResourceData, resourceType string) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsR53(tagsFromMapR53(o), tagsFromMapR53(n)) + + // Set tags + r := make([]*string, len(remove)) + for i, t := range remove { + r[i] = t.Key + } + log.Printf("[DEBUG] Changing tags: \n\tadding: %#v\n\tremoving:%#v", create, remove) + req := &route53.ChangeTagsForResourceInput{ + ResourceId: aws.String(d.Id()), + ResourceType: aws.String(resourceType), + } + + if len(create) > 0 { + req.AddTags = create + } + if len(r) > 0 { + req.RemoveTagKeys = r + } + + _, err := conn.ChangeTagsForResource(req) + if err != nil { + return err + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsR53(oldTags, newTags []*route53.Tag) ([]*route53.Tag, []*route53.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*route53.Tag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + return tagsFromMapR53(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapR53(m map[string]interface{}) []*route53.Tag { + result := make([]*route53.Tag, 0, len(m)) + for k, v := range m { + t := &route53.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredRoute53(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapR53(ts []*route53.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredRoute53(t) { + result[*t.Key] = *t.Value + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredRoute53(t *route53.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + if r, _ := regexp.MatchString(v, *t.Key); r == true { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/utils.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/utils.go new file mode 100644 index 000000000..bfca044cf --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/utils.go @@ -0,0 +1,42 @@ +package aws + +import ( + "encoding/base64" + "encoding/json" + "reflect" + "regexp" +) + +// Base64Encode encodes data if the input isn't already encoded using base64.StdEncoding.EncodeToString. +// If the input is already base64 encoded, return the original input unchanged. +func base64Encode(data []byte) string { + // Check whether the data is already Base64 encoded; don't double-encode + if isBase64Encoded(data) { + return string(data) + } + // data has not been encoded encode and return + return base64.StdEncoding.EncodeToString(data) +} + +func isBase64Encoded(data []byte) bool { + _, err := base64.StdEncoding.DecodeString(string(data)) + return err == nil +} + +func looksLikeJsonString(s interface{}) bool { + return regexp.MustCompile(`^\s*{`).MatchString(s.(string)) +} + +func jsonBytesEqual(b1, b2 []byte) bool { + var o1 interface{} + if err := json.Unmarshal(b1, &o1); err != nil { + return false + } + + var o2 interface{} + if err := json.Unmarshal(b2, &o2); err != nil { + return false + } + + return reflect.DeepEqual(o1, o2) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/validators.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/validators.go new file mode 100644 index 000000000..8102b456a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/validators.go @@ -0,0 +1,1351 @@ +package aws + +import ( + "fmt" + "net" + "net/url" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/terraform/helper/schema" +) + +func validateRdsIdentifier(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only lowercase alphanumeric characters and hyphens allowed in %q", k)) + } + if !regexp.MustCompile(`^[a-z]`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "first character of %q must be a letter", k)) + } + if regexp.MustCompile(`--`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot contain two consecutive hyphens", k)) + } + if regexp.MustCompile(`-$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot end with a hyphen", k)) + } + return +} + +func validateRdsIdentifierPrefix(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only lowercase alphanumeric characters and hyphens allowed in %q", k)) + } + if !regexp.MustCompile(`^[a-z]`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "first character of %q must be a letter", k)) + } + if regexp.MustCompile(`--`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot contain two consecutive hyphens", k)) + } + return +} + +func validateElastiCacheClusterId(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if (len(value) < 1) || (len(value) > 20) { + errors = append(errors, fmt.Errorf( + "%q (%q) must contain from 1 to 20 alphanumeric characters or hyphens", k, value)) + } + if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only lowercase alphanumeric characters and hyphens allowed in %q (%q)", k, value)) + } + if !regexp.MustCompile(`^[a-z]`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "first character of %q (%q) must be a letter", k, value)) + } + if regexp.MustCompile(`--`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q (%q) cannot contain two consecutive hyphens", k, value)) + } + if regexp.MustCompile(`-$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q (%q) cannot end with a hyphen", k, value)) + } + return +} + +func validateASGScheduleTimestamp(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + _, err := time.Parse(awsAutoscalingScheduleTimeLayout, value) + if err != nil { + errors = append(errors, fmt.Errorf( + "%q cannot be parsed as iso8601 Timestamp Format", value)) + } + + return +} + +// validateTagFilters confirms the "value" component of a tag filter is one of +// AWS's three allowed types. +func validateTagFilters(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "KEY_ONLY" && value != "VALUE_ONLY" && value != "KEY_AND_VALUE" { + errors = append(errors, fmt.Errorf( + "%q must be one of \"KEY_ONLY\", \"VALUE_ONLY\", or \"KEY_AND_VALUE\"", k)) + } + return +} + +func validateDbParamGroupName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only lowercase alphanumeric characters and hyphens allowed in %q", k)) + } + if !regexp.MustCompile(`^[a-z]`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "first character of %q must be a letter", k)) + } + if regexp.MustCompile(`--`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot contain two consecutive hyphens", k)) + } + if regexp.MustCompile(`-$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot end with a hyphen", k)) + } + if len(value) > 255 { + errors = append(errors, fmt.Errorf( + "%q cannot be greater than 255 characters", k)) + } + return +} + +func validateDbParamGroupNamePrefix(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only lowercase alphanumeric characters and hyphens allowed in %q", k)) + } + if !regexp.MustCompile(`^[a-z]`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "first character of %q must be a letter", k)) + } + if regexp.MustCompile(`--`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot contain two consecutive hyphens", k)) + } + if len(value) > 255 { + errors = append(errors, fmt.Errorf( + "%q cannot be greater than 226 characters", k)) + } + return +} + +func validateStreamViewType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + viewTypes := map[string]bool{ + "KEYS_ONLY": true, + "NEW_IMAGE": true, + "OLD_IMAGE": true, + "NEW_AND_OLD_IMAGES": true, + } + + if !viewTypes[value] { + errors = append(errors, fmt.Errorf("%q must be a valid DynamoDB StreamViewType", k)) + } + return +} + +func validateElbName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) == 0 { + return // short-circuit + } + if len(value) > 32 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 32 characters: %q", k, value)) + } + if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only alphanumeric characters and hyphens allowed in %q: %q", + k, value)) + } + if regexp.MustCompile(`^-`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot begin with a hyphen: %q", k, value)) + } + if regexp.MustCompile(`-$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot end with a hyphen: %q", k, value)) + } + return +} + +func validateElbNamePrefix(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only alphanumeric characters and hyphens allowed in %q: %q", + k, value)) + } + if len(value) > 6 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 6 characters: %q", k, value)) + } + if regexp.MustCompile(`^-`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot begin with a hyphen: %q", k, value)) + } + return +} + +func validateEcrRepositoryName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) < 2 { + errors = append(errors, fmt.Errorf( + "%q must be at least 2 characters long: %q", k, value)) + } + if len(value) > 256 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 256 characters: %q", k, value)) + } + + // http://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_CreateRepository.html + pattern := `^(?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*$` + if !regexp.MustCompile(pattern).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q doesn't comply with restrictions (%q): %q", + k, pattern, value)) + } + + return +} + +func validateCloudWatchEventRuleName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 64 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 64 characters: %q", k, value)) + } + + // http://docs.aws.amazon.com/AmazonCloudWatchEvents/latest/APIReference/API_PutRule.html + pattern := `^[\.\-_A-Za-z0-9]+$` + if !regexp.MustCompile(pattern).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q doesn't comply with restrictions (%q): %q", + k, pattern, value)) + } + + return +} + +func validateMaxLength(length int) schema.SchemaValidateFunc { + return func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > length { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than %d characters: %q", k, length, value)) + } + return + } +} + +func validateIntegerInRange(min, max int) schema.SchemaValidateFunc { + return func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < min { + errors = append(errors, fmt.Errorf( + "%q cannot be lower than %d: %d", k, min, value)) + } + if value > max { + errors = append(errors, fmt.Errorf( + "%q cannot be higher than %d: %d", k, max, value)) + } + return + } +} + +func validateCloudWatchEventTargetId(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 64 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 64 characters: %q", k, value)) + } + + // http://docs.aws.amazon.com/AmazonCloudWatchEvents/latest/APIReference/API_Target.html + pattern := `^[\.\-_A-Za-z0-9]+$` + if !regexp.MustCompile(pattern).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q doesn't comply with restrictions (%q): %q", + k, pattern, value)) + } + + return +} + +func validateLambdaFunctionName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 140 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 140 characters: %q", k, value)) + } + // http://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html + pattern := `^(arn:[\w-]+:lambda:)?([a-z]{2}-[a-z]+-\d{1}:)?(\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\$LATEST|[a-zA-Z0-9-_]+))?$` + if !regexp.MustCompile(pattern).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q doesn't comply with restrictions (%q): %q", + k, pattern, value)) + } + + return +} + +func validateLambdaQualifier(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 128 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 128 characters: %q", k, value)) + } + // http://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html + pattern := `^[a-zA-Z0-9$_-]+$` + if !regexp.MustCompile(pattern).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q doesn't comply with restrictions (%q): %q", + k, pattern, value)) + } + + return +} + +func validateLambdaPermissionAction(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + // http://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html + pattern := `^(lambda:[*]|lambda:[a-zA-Z]+|[*])$` + if !regexp.MustCompile(pattern).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q doesn't comply with restrictions (%q): %q", + k, pattern, value)) + } + + return +} + +func validateAwsAccountId(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + // http://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html + pattern := `^\d{12}$` + if !regexp.MustCompile(pattern).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q doesn't look like AWS Account ID (exactly 12 digits): %q", + k, value)) + } + + return +} + +func validateArn(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if value == "" { + return + } + + // http://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html + pattern := `^arn:[\w-]+:([a-zA-Z0-9\-])+:([a-z]{2}-(gov-)?[a-z]+-\d{1})?:(\d{12})?:(.*)$` + if !regexp.MustCompile(pattern).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q doesn't look like a valid ARN (%q): %q", + k, pattern, value)) + } + + return +} + +func validatePolicyStatementId(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) > 100 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 100 characters: %q", k, value)) + } + + // http://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html + pattern := `^[a-zA-Z0-9-_]+$` + if !regexp.MustCompile(pattern).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q doesn't look like a valid statement ID (%q): %q", + k, pattern, value)) + } + + return +} + +// validateCIDRNetworkAddress ensures that the string value is a valid CIDR that +// represents a network address - it adds an error otherwise +func validateCIDRNetworkAddress(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + _, ipnet, err := net.ParseCIDR(value) + if err != nil { + errors = append(errors, fmt.Errorf( + "%q must contain a valid CIDR, got error parsing: %s", k, err)) + return + } + + if ipnet == nil || value != ipnet.String() { + errors = append(errors, fmt.Errorf( + "%q must contain a valid network CIDR, got %q", k, value)) + } + + return +} + +func validateHTTPMethod(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + validMethods := map[string]bool{ + "ANY": true, + "DELETE": true, + "GET": true, + "HEAD": true, + "OPTIONS": true, + "PATCH": true, + "POST": true, + "PUT": true, + } + + if _, ok := validMethods[value]; !ok { + errors = append(errors, fmt.Errorf( + "%q contains an invalid method %q. Valid methods are either %q, %q, %q, %q, %q, %q, %q, or %q.", + k, value, "ANY", "DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT")) + } + return +} + +func validateLogMetricFilterName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) > 512 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 512 characters: %q", k, value)) + } + + // http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutMetricFilter.html + pattern := `^[^:*]+$` + if !regexp.MustCompile(pattern).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q isn't a valid log metric name (must not contain colon nor asterisk): %q", + k, value)) + } + + return +} + +func validateLogMetricFilterTransformationName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) > 255 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 255 characters: %q", k, value)) + } + + // http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_MetricTransformation.html + pattern := `^[^:*$]*$` + if !regexp.MustCompile(pattern).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q isn't a valid log metric transformation name (must not contain"+ + " colon, asterisk nor dollar sign): %q", + k, value)) + } + + return +} + +func validateLogGroupName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) > 512 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 512 characters: %q", k, value)) + } + + // http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateLogGroup.html + pattern := `^[\.\-_/#A-Za-z0-9]+$` + if !regexp.MustCompile(pattern).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q isn't a valid log group name (alphanumeric characters, underscores,"+ + " hyphens, slashes, hash signs and dots are allowed): %q", + k, value)) + } + + return +} + +func validateLogGroupNamePrefix(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) > 483 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 483 characters: %q", k, value)) + } + + // http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateLogGroup.html + pattern := `^[\.\-_/#A-Za-z0-9]+$` + if !regexp.MustCompile(pattern).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q isn't a valid log group name (alphanumeric characters, underscores,"+ + " hyphens, slashes, hash signs and dots are allowed): %q", + k, value)) + } + + return +} + +func validateS3BucketLifecycleTimestamp(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + _, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", value)) + if err != nil { + errors = append(errors, fmt.Errorf( + "%q cannot be parsed as RFC3339 Timestamp Format", value)) + } + + return +} + +func validateS3BucketLifecycleStorageClass(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != s3.TransitionStorageClassStandardIa && value != s3.TransitionStorageClassGlacier { + errors = append(errors, fmt.Errorf( + "%q must be one of '%q', '%q'", k, s3.TransitionStorageClassStandardIa, s3.TransitionStorageClassGlacier)) + } + + return +} + +func validateS3BucketReplicationRuleId(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 255 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 255 characters: %q", k, value)) + } + + return +} + +func validateS3BucketReplicationRulePrefix(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 1024 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 1024 characters: %q", k, value)) + } + + return +} + +func validateS3BucketReplicationDestinationStorageClass(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != s3.StorageClassStandard && value != s3.StorageClassStandardIa && value != s3.StorageClassReducedRedundancy { + errors = append(errors, fmt.Errorf( + "%q must be one of '%q', '%q' or '%q'", k, s3.StorageClassStandard, s3.StorageClassStandardIa, s3.StorageClassReducedRedundancy)) + } + + return +} + +func validateS3BucketReplicationRuleStatus(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != s3.ReplicationRuleStatusEnabled && value != s3.ReplicationRuleStatusDisabled { + errors = append(errors, fmt.Errorf( + "%q must be one of '%q' or '%q'", k, s3.ReplicationRuleStatusEnabled, s3.ReplicationRuleStatusDisabled)) + } + + return +} + +func validateS3BucketLifecycleRuleId(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 255 { + errors = append(errors, fmt.Errorf( + "%q cannot exceed 255 characters", k)) + } + return +} + +func validateDbEventSubscriptionName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only alphanumeric characters and hyphens allowed in %q", k)) + } + if len(value) > 255 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 255 characters", k)) + } + return +} + +func validateApiGatewayIntegrationPassthroughBehavior(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "WHEN_NO_MATCH" && value != "WHEN_NO_TEMPLATES" && value != "NEVER" { + errors = append(errors, fmt.Errorf( + "%q must be one of 'WHEN_NO_MATCH', 'WHEN_NO_TEMPLATES', 'NEVER'", k)) + } + return +} + +func validateJsonString(v interface{}, k string) (ws []string, errors []error) { + if _, err := normalizeJsonString(v); err != nil { + errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err)) + } + return +} + +func validateIAMPolicyJson(v interface{}, k string) (ws []string, errors []error) { + // IAM Policy documents need to be valid JSON, and pass legacy parsing + value := v.(string) + if len(value) < 1 { + errors = append(errors, fmt.Errorf("%q contains an invalid JSON policy", k)) + return + } + if value[:1] != "{" { + errors = append(errors, fmt.Errorf("%q contains an invalid JSON policy", k)) + return + } + if _, err := normalizeJsonString(v); err != nil { + errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err)) + } + return +} + +func validateCloudFormationTemplate(v interface{}, k string) (ws []string, errors []error) { + if looksLikeJsonString(v) { + if _, err := normalizeJsonString(v); err != nil { + errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err)) + } + } else { + if _, err := checkYamlString(v); err != nil { + errors = append(errors, fmt.Errorf("%q contains an invalid YAML: %s", k, err)) + } + } + return +} + +func validateApiGatewayIntegrationType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + validTypes := map[string]bool{ + "AWS": true, + "AWS_PROXY": true, + "HTTP": true, + "HTTP_PROXY": true, + "MOCK": true, + } + + if _, ok := validTypes[value]; !ok { + errors = append(errors, fmt.Errorf( + "%q contains an invalid integration type %q. Valid types are either %q, %q, %q, %q, or %q.", + k, value, "AWS", "AWS_PROXY", "HTTP", "HTTP_PROXY", "MOCK")) + } + return +} + +func validateApiGatewayIntegrationContentHandling(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + validTypes := map[string]bool{ + "CONVERT_TO_BINARY": true, + "CONVERT_TO_TEXT": true, + } + + if _, ok := validTypes[value]; !ok { + errors = append(errors, fmt.Errorf( + "%q contains an invalid integration type %q. Valid types are either %q or %q.", + k, value, "CONVERT_TO_BINARY", "CONVERT_TO_TEXT")) + } + return +} + +func validateSQSQueueName(v interface{}, k string) (errors []error) { + value := v.(string) + if len(value) > 80 { + errors = append(errors, fmt.Errorf("%q cannot be longer than 80 characters", k)) + } + + if !regexp.MustCompile(`^[0-9A-Za-z-_]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf("only alphanumeric characters and hyphens allowed in %q", k)) + } + return +} + +func validateSQSFifoQueueName(v interface{}, k string) (errors []error) { + value := v.(string) + + if len(value) > 80 { + errors = append(errors, fmt.Errorf("%q cannot be longer than 80 characters", k)) + } + + if !regexp.MustCompile(`^[0-9A-Za-z-_.]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf("only alphanumeric characters and hyphens allowed in %q", k)) + } + + if regexp.MustCompile(`^[^a-zA-Z0-9-_]`).MatchString(value) { + errors = append(errors, fmt.Errorf("FIFO queue name must start with one of these characters [a-zA-Z0-9-_]: %v", value)) + } + + if !regexp.MustCompile(`\.fifo$`).MatchString(value) { + errors = append(errors, fmt.Errorf("FIFO queue name should ends with \".fifo\": %v", value)) + } + + return +} + +func validateSNSSubscriptionProtocol(v interface{}, k string) (ws []string, errors []error) { + value := strings.ToLower(v.(string)) + forbidden := []string{"email", "sms"} + for _, f := range forbidden { + if strings.Contains(value, f) { + errors = append( + errors, + fmt.Errorf("Unsupported protocol (%s) for SNS Topic", value), + ) + } + } + return +} + +func validateSecurityRuleType(v interface{}, k string) (ws []string, errors []error) { + value := strings.ToLower(v.(string)) + + validTypes := map[string]bool{ + "ingress": true, + "egress": true, + } + + if _, ok := validTypes[value]; !ok { + errors = append(errors, fmt.Errorf( + "%q contains an invalid Security Group Rule type %q. Valid types are either %q or %q.", + k, value, "ingress", "egress")) + } + return +} + +func validateOnceAWeekWindowFormat(v interface{}, k string) (ws []string, errors []error) { + // valid time format is "ddd:hh24:mi" + validTimeFormat := "(sun|mon|tue|wed|thu|fri|sat):([0-1][0-9]|2[0-3]):([0-5][0-9])" + validTimeFormatConsolidated := "^(" + validTimeFormat + "-" + validTimeFormat + "|)$" + + value := strings.ToLower(v.(string)) + if !regexp.MustCompile(validTimeFormatConsolidated).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must satisfy the format of \"ddd:hh24:mi-ddd:hh24:mi\".", k)) + } + return +} + +func validateOnceADayWindowFormat(v interface{}, k string) (ws []string, errors []error) { + // valid time format is "hh24:mi" + validTimeFormat := "([0-1][0-9]|2[0-3]):([0-5][0-9])" + validTimeFormatConsolidated := "^(" + validTimeFormat + "-" + validTimeFormat + "|)$" + + value := v.(string) + if !regexp.MustCompile(validTimeFormatConsolidated).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must satisfy the format of \"hh24:mi-hh24:mi\".", k)) + } + return +} + +func validateRoute53RecordType(v interface{}, k string) (ws []string, errors []error) { + // Valid Record types + // SOA, A, TXT, NS, CNAME, MX, NAPTR, PTR, SRV, SPF, AAAA + validTypes := map[string]struct{}{ + "SOA": {}, + "A": {}, + "TXT": {}, + "NS": {}, + "CNAME": {}, + "MX": {}, + "NAPTR": {}, + "PTR": {}, + "SRV": {}, + "SPF": {}, + "AAAA": {}, + } + + value := v.(string) + if _, ok := validTypes[value]; !ok { + errors = append(errors, fmt.Errorf( + "%q must be one of [SOA, A, TXT, NS, CNAME, MX, NAPTR, PTR, SRV, SPF, AAAA]", k)) + } + return +} + +// Validates that ECS Placement Constraints are set correctly +// Takes type, and expression as strings +func validateAwsEcsPlacementConstraint(constType, constExpr string) error { + switch constType { + case "distinctInstance": + // Expression can be nil for distinctInstance + return nil + case "memberOf": + if constExpr == "" { + return fmt.Errorf("Expression cannot be nil for 'memberOf' type") + } + default: + return fmt.Errorf("Unknown type provided: %q", constType) + } + return nil +} + +// Validates that an Ecs placement strategy is set correctly +// Takes type, and field as strings +func validateAwsEcsPlacementStrategy(stratType, stratField string) error { + switch stratType { + case "random": + // random does not need the field attribute set, could error, but it isn't read at the API level + return nil + case "spread": + // For the spread placement strategy, valid values are instanceId + // (or host, which has the same effect), or any platform or custom attribute + // that is applied to a container instance + // stratField is already cased to a string + return nil + case "binpack": + if stratField != "cpu" && stratField != "memory" { + return fmt.Errorf("Binpack type requires the field attribute to be either 'cpu' or 'memory'. Got: %s", + stratField) + } + default: + return fmt.Errorf("Unknown type %s. Must be one of 'random', 'spread', or 'binpack'.", stratType) + } + return nil +} + +func validateAwsEmrEbsVolumeType(v interface{}, k string) (ws []string, errors []error) { + validTypes := map[string]struct{}{ + "gp2": {}, + "io1": {}, + "standard": {}, + } + + value := v.(string) + + if _, ok := validTypes[value]; !ok { + errors = append(errors, fmt.Errorf( + "%q must be one of ['gp2', 'io1', 'standard']", k)) + } + return +} + +func validateSfnActivityName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 80 { + errors = append(errors, fmt.Errorf("%q cannot be longer than 80 characters", k)) + } + + return +} + +func validateSfnStateMachineDefinition(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 1048576 { + errors = append(errors, fmt.Errorf("%q cannot be longer than 1048576 characters", k)) + } + return +} + +func validateSfnStateMachineName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 80 { + errors = append(errors, fmt.Errorf("%q cannot be longer than 80 characters", k)) + } + + if !regexp.MustCompile(`^[a-zA-Z0-9-_]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must be composed with only these characters [a-zA-Z0-9-_]: %v", k, value)) + } + return +} + +func validateDmsCertificateId(v interface{}, k string) (ws []string, es []error) { + val := v.(string) + + if len(val) > 255 { + es = append(es, fmt.Errorf("%q must not be longer than 255 characters", k)) + } + if !regexp.MustCompile("^[a-zA-Z][a-zA-Z0-9-]+$").MatchString(val) { + es = append(es, fmt.Errorf("%q must start with a letter, only contain alphanumeric characters and hyphens", k)) + } + if strings.Contains(val, "--") { + es = append(es, fmt.Errorf("%q must not contain consecutive hyphens", k)) + } + if strings.HasSuffix(val, "-") { + es = append(es, fmt.Errorf("%q must not end in a hyphen", k)) + } + + return +} + +func validateDmsEndpointId(v interface{}, k string) (ws []string, es []error) { + val := v.(string) + + if len(val) > 255 { + es = append(es, fmt.Errorf("%q must not be longer than 255 characters", k)) + } + if !regexp.MustCompile("^[a-zA-Z][a-zA-Z0-9-]+$").MatchString(val) { + es = append(es, fmt.Errorf("%q must start with a letter, only contain alphanumeric characters and hyphens", k)) + } + if strings.Contains(val, "--") { + es = append(es, fmt.Errorf("%q must not contain consecutive hyphens", k)) + } + if strings.HasSuffix(val, "-") { + es = append(es, fmt.Errorf("%q must not end in a hyphen", k)) + } + + return +} + +func validateDmsReplicationInstanceId(v interface{}, k string) (ws []string, es []error) { + val := v.(string) + + if len(val) > 63 { + es = append(es, fmt.Errorf("%q must not be longer than 63 characters", k)) + } + if !regexp.MustCompile("^[a-zA-Z][a-zA-Z0-9-]+$").MatchString(val) { + es = append(es, fmt.Errorf("%q must start with a letter, only contain alphanumeric characters and hyphens", k)) + } + if strings.Contains(val, "--") { + es = append(es, fmt.Errorf("%q must not contain consecutive hyphens", k)) + } + if strings.HasSuffix(val, "-") { + es = append(es, fmt.Errorf("%q must not end in a hyphen", k)) + } + + return +} + +func validateDmsReplicationSubnetGroupId(v interface{}, k string) (ws []string, es []error) { + val := v.(string) + + if val == "default" { + es = append(es, fmt.Errorf("%q must not be default", k)) + } + if len(val) > 255 { + es = append(es, fmt.Errorf("%q must not be longer than 255 characters", k)) + } + if !regexp.MustCompile(`^[a-zA-Z0-9. _-]+$`).MatchString(val) { + es = append(es, fmt.Errorf("%q must only contain alphanumeric characters, periods, spaces, underscores and hyphens", k)) + } + + return +} + +func validateDmsReplicationTaskId(v interface{}, k string) (ws []string, es []error) { + val := v.(string) + + if len(val) > 255 { + es = append(es, fmt.Errorf("%q must not be longer than 255 characters", k)) + } + if !regexp.MustCompile("^[a-zA-Z][a-zA-Z0-9-]+$").MatchString(val) { + es = append(es, fmt.Errorf("%q must start with a letter, only contain alphanumeric characters and hyphens", k)) + } + if strings.Contains(val, "--") { + es = append(es, fmt.Errorf("%q must not contain consecutive hyphens", k)) + } + if strings.HasSuffix(val, "-") { + es = append(es, fmt.Errorf("%q must not end in a hyphen", k)) + } + + return +} + +func validateAppautoscalingScalableDimension(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + dimensions := map[string]bool{ + "ecs:service:DesiredCount": true, + "ec2:spot-fleet-request:TargetCapacity": true, + "elasticmapreduce:instancegroup:InstanceCount": true, + } + + if !dimensions[value] { + errors = append(errors, fmt.Errorf("%q must be a valid scalable dimension value: %q", k, value)) + } + return +} + +func validateAppautoscalingServiceNamespace(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + namespaces := map[string]bool{ + "ecs": true, + "ec2": true, + "elasticmapreduce": true, + } + + if !namespaces[value] { + errors = append(errors, fmt.Errorf("%q must be a valid service namespace value: %q", k, value)) + } + return +} + +func validateConfigRuleSourceOwner(v interface{}, k string) (ws []string, errors []error) { + validOwners := []string{ + "CUSTOM_LAMBDA", + "AWS", + } + owner := v.(string) + for _, o := range validOwners { + if owner == o { + return + } + } + errors = append(errors, fmt.Errorf( + "%q contains an invalid owner %q. Valid owners are %q.", + k, owner, validOwners)) + return +} + +func validateConfigExecutionFrequency(v interface{}, k string) (ws []string, errors []error) { + validFrequencies := []string{ + "One_Hour", + "Three_Hours", + "Six_Hours", + "Twelve_Hours", + "TwentyFour_Hours", + } + frequency := v.(string) + for _, f := range validFrequencies { + if frequency == f { + return + } + } + errors = append(errors, fmt.Errorf( + "%q contains an invalid frequency %q. Valid frequencies are %q.", + k, frequency, validFrequencies)) + return +} + +func validateAccountAlias(v interface{}, k string) (ws []string, es []error) { + val := v.(string) + + if (len(val) < 3) || (len(val) > 63) { + es = append(es, fmt.Errorf("%q must contain from 3 to 63 alphanumeric characters or hyphens", k)) + } + if !regexp.MustCompile("^[a-z0-9][a-z0-9-]+$").MatchString(val) { + es = append(es, fmt.Errorf("%q must start with an alphanumeric character and only contain lowercase alphanumeric characters and hyphens", k)) + } + if strings.Contains(val, "--") { + es = append(es, fmt.Errorf("%q must not contain consecutive hyphens", k)) + } + if strings.HasSuffix(val, "-") { + es = append(es, fmt.Errorf("%q must not end in a hyphen", k)) + } + return +} + +func validateApiGatewayApiKeyValue(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) < 30 { + errors = append(errors, fmt.Errorf( + "%q must be at least 30 characters long", k)) + } + if len(value) > 128 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 128 characters", k)) + } + return +} + +func validateIamRolePolicyName(v interface{}, k string) (ws []string, errors []error) { + // https://github.com/boto/botocore/blob/2485f5c/botocore/data/iam/2010-05-08/service-2.json#L8291-L8296 + value := v.(string) + if len(value) > 128 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 128 characters", k)) + } + if !regexp.MustCompile("^[\\w+=,.@-]+$").MatchString(value) { + errors = append(errors, fmt.Errorf("%q must match [\\w+=,.@-]", k)) + } + return +} + +func validateIamRolePolicyNamePrefix(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 100 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 100 characters", k)) + } + if !regexp.MustCompile("^[\\w+=,.@-]+$").MatchString(value) { + errors = append(errors, fmt.Errorf("%q must match [\\w+=,.@-]", k)) + } + return +} + +func validateApiGatewayUsagePlanQuotaSettingsPeriod(v interface{}, k string) (ws []string, errors []error) { + validPeriods := []string{ + apigateway.QuotaPeriodTypeDay, + apigateway.QuotaPeriodTypeWeek, + apigateway.QuotaPeriodTypeMonth, + } + period := v.(string) + for _, f := range validPeriods { + if period == f { + return + } + } + errors = append(errors, fmt.Errorf( + "%q contains an invalid period %q. Valid period are %q.", + k, period, validPeriods)) + return +} + +func validateApiGatewayUsagePlanQuotaSettings(v map[string]interface{}) (errors []error) { + period := v["period"].(string) + offset := v["offset"].(int) + + if period == apigateway.QuotaPeriodTypeDay && offset != 0 { + errors = append(errors, fmt.Errorf("Usage Plan quota offset must be zero in the DAY period")) + } + + if period == apigateway.QuotaPeriodTypeWeek && (offset < 0 || offset > 6) { + errors = append(errors, fmt.Errorf("Usage Plan quota offset must be between 0 and 6 inclusive in the WEEK period")) + } + + if period == apigateway.QuotaPeriodTypeMonth && (offset < 0 || offset > 27) { + errors = append(errors, fmt.Errorf("Usage Plan quota offset must be between 0 and 27 inclusive in the MONTH period")) + } + + return +} + +func validateDbSubnetGroupName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[ .0-9a-z-_]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only lowercase alphanumeric characters, hyphens, underscores, periods, and spaces allowed in %q", k)) + } + if len(value) > 255 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 255 characters", k)) + } + if regexp.MustCompile(`(?i)^default$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q is not allowed as %q", "Default", k)) + } + return +} + +func validateDbSubnetGroupNamePrefix(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[ .0-9a-z-_]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only lowercase alphanumeric characters, hyphens, underscores, periods, and spaces allowed in %q", k)) + } + if len(value) > 229 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 229 characters", k)) + } + return +} + +func validateDbOptionGroupName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[a-z]`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "first character of %q must be a letter", k)) + } + if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only lowercase alphanumeric characters and hyphens allowed in %q", k)) + } + if regexp.MustCompile(`--`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot contain two consecutive hyphens", k)) + } + if regexp.MustCompile(`-$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot end with a hyphen", k)) + } + if len(value) > 255 { + errors = append(errors, fmt.Errorf( + "%q cannot be greater than 255 characters", k)) + } + return +} + +func validateDbOptionGroupNamePrefix(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[a-z]`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "first character of %q must be a letter", k)) + } + if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only alphanumeric characters and hyphens allowed in %q", k)) + } + if regexp.MustCompile(`--`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q cannot contain two consecutive hyphens", k)) + } + if len(value) > 229 { + errors = append(errors, fmt.Errorf( + "%q cannot be greater than 229 characters", k)) + } + return +} + +func validateAwsAlbTargetGroupName(v interface{}, k string) (ws []string, errors []error) { + name := v.(string) + if len(name) > 32 { + errors = append(errors, fmt.Errorf("%q (%q) cannot be longer than '32' characters", k, name)) + } + return +} + +func validateAwsAlbTargetGroupNamePrefix(v interface{}, k string) (ws []string, errors []error) { + name := v.(string) + if len(name) > 32 { + errors = append(errors, fmt.Errorf("%q (%q) cannot be longer than '6' characters", k, name)) + } + return +} + +func validateOpenIdURL(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + u, err := url.Parse(value) + if err != nil { + errors = append(errors, fmt.Errorf("%q has to be a valid URL", k)) + return + } + if u.Scheme != "https" { + errors = append(errors, fmt.Errorf("%q has to use HTTPS scheme (i.e. begin with https://)", k)) + } + if len(u.Query()) > 0 { + errors = append(errors, fmt.Errorf("%q cannot contain query parameters per the OIDC standard", k)) + } + return +} + +func validateAwsKmsName(v interface{}, k string) (ws []string, es []error) { + value := v.(string) + if !regexp.MustCompile(`^(alias\/)[a-zA-Z0-9:/_-]+$`).MatchString(value) { + es = append(es, fmt.Errorf( + "%q must begin with 'alias/' and be comprised of only [a-zA-Z0-9:/_-]", k)) + } + return +} + +func validateCognitoIdentityPoolName(v interface{}, k string) (ws []string, errors []error) { + val := v.(string) + if !regexp.MustCompile("^[\\w _]+$").MatchString(val) { + errors = append(errors, fmt.Errorf("%q must contain only alphanumeric caracters and spaces", k)) + } + + return +} + +func validateCognitoProviderDeveloperName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 100 { + errors = append(errors, fmt.Errorf("%q cannot be longer than 100 caracters", k)) + } + + if !regexp.MustCompile("^[\\w._-]+$").MatchString(value) { + errors = append(errors, fmt.Errorf("%q must contain only alphanumeric caracters, dots, underscores and hyphens", k)) + } + + return +} + +func validateCognitoSupportedLoginProviders(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) < 1 { + errors = append(errors, fmt.Errorf("%q cannot be less than 1 character", k)) + } + + if len(value) > 128 { + errors = append(errors, fmt.Errorf("%q cannot be longer than 128 caracters", k)) + } + + if !regexp.MustCompile("^[\\w.;_/-]+$").MatchString(value) { + errors = append(errors, fmt.Errorf("%q must contain only alphanumeric caracters, dots, semicolons, underscores, slashes and hyphens", k)) + } + + return +} + +func validateCognitoIdentityProvidersClientId(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) < 1 { + errors = append(errors, fmt.Errorf("%q cannot be less than 1 character", k)) + } + + if len(value) > 128 { + errors = append(errors, fmt.Errorf("%q cannot be longer than 128 caracters", k)) + } + + if !regexp.MustCompile("^[\\w_]+$").MatchString(value) { + errors = append(errors, fmt.Errorf("%q must contain only alphanumeric caracters and underscores", k)) + } + + return +} + +func validateCognitoIdentityProvidersProviderName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) < 1 { + errors = append(errors, fmt.Errorf("%q cannot be less than 1 character", k)) + } + + if len(value) > 128 { + errors = append(errors, fmt.Errorf("%q cannot be longer than 128 caracters", k)) + } + + if !regexp.MustCompile("^[\\w._:/-]+$").MatchString(value) { + errors = append(errors, fmt.Errorf("%q must contain only alphanumeric caracters, dots, underscores, colons, slashes and hyphens", k)) + } + + return +} + +func validateWafMetricName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9A-Za-z]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "Only alphanumeric characters allowed in %q: %q", + k, value)) + } + return +} + +func validateIamRoleDescription(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) > 1000 { + errors = append(errors, fmt.Errorf("%q cannot be longer than 1000 caracters", k)) + } + + if !regexp.MustCompile(`[\p{L}\p{M}\p{Z}\p{S}\p{N}\p{P}]*`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "Only alphanumeric & accented characters allowed in %q: %q (Must satisfy regular expression pattern: [\\p{L}\\p{M}\\p{Z}\\p{S}\\p{N}\\p{P}]*)", + k, value)) + } + return +} + +func validateSsmParameterType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + types := map[string]bool{ + "String": true, + "StringList": true, + "SecureString": true, + } + + if !types[value] { + errors = append(errors, fmt.Errorf("Parameter type %s is invalid. Valid types are String, StringList or SecureString", value)) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/waf_token_handlers.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/waf_token_handlers.go new file mode 100644 index 000000000..ac99f0950 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/waf_token_handlers.go @@ -0,0 +1,49 @@ +package aws + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" +) + +type WafRetryer struct { + Connection *waf.WAF + Region string +} + +type withTokenFunc func(token *string) (interface{}, error) + +func (t *WafRetryer) RetryWithToken(f withTokenFunc) (interface{}, error) { + awsMutexKV.Lock(t.Region) + defer awsMutexKV.Unlock(t.Region) + + var out interface{} + err := resource.Retry(15*time.Minute, func() *resource.RetryError { + var err error + var tokenOut *waf.GetChangeTokenOutput + + tokenOut, err = t.Connection.GetChangeToken(&waf.GetChangeTokenInput{}) + if err != nil { + return resource.NonRetryableError(errwrap.Wrapf("Failed to acquire change token: {{err}}", err)) + } + + out, err = f(tokenOut.ChangeToken) + if err != nil { + awsErr, ok := err.(awserr.Error) + if ok && awsErr.Code() == "WAFStaleDataException" { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + + return out, err +} + +func newWafRetryer(conn *waf.WAF, region string) *WafRetryer { + return &WafRetryer{Connection: conn, Region: region} +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-aws/aws/wafregionl_token_handlers.go b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/wafregionl_token_handlers.go new file mode 100644 index 000000000..da3d8b58f --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-aws/aws/wafregionl_token_handlers.go @@ -0,0 +1,50 @@ +package aws + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/waf" + "github.com/aws/aws-sdk-go/service/wafregional" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" +) + +type WafRegionalRetryer struct { + Connection *wafregional.WAFRegional + Region string +} + +type withRegionalTokenFunc func(token *string) (interface{}, error) + +func (t *WafRegionalRetryer) RetryWithToken(f withRegionalTokenFunc) (interface{}, error) { + awsMutexKV.Lock(t.Region) + defer awsMutexKV.Unlock(t.Region) + + var out interface{} + err := resource.Retry(15*time.Minute, func() *resource.RetryError { + var err error + var tokenOut *waf.GetChangeTokenOutput + + tokenOut, err = t.Connection.GetChangeToken(&waf.GetChangeTokenInput{}) + if err != nil { + return resource.NonRetryableError(errwrap.Wrapf("Failed to acquire change token: {{err}}", err)) + } + + out, err = f(tokenOut.ChangeToken) + if err != nil { + awsErr, ok := err.(awserr.Error) + if ok && awsErr.Code() == "WAFStaleDataException" { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + + return out, err +} + +func newWafRegionalRetryer(conn *wafregional.WAFRegional, region string) *WafRegionalRetryer { + return &WafRegionalRetryer{Connection: conn, Region: region} +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/LICENSE b/vendor/github.com/terraform-providers/terraform-provider-openstack/LICENSE new file mode 100644 index 000000000..a612ad981 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/config.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/config.go new file mode 100644 index 000000000..8a2ced2ec --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/config.go @@ -0,0 +1,203 @@ +package openstack + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "net/http" + "os" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth" + "github.com/hashicorp/terraform/helper/pathorcontents" + "github.com/hashicorp/terraform/terraform" +) + +type Config struct { + CACertFile string + ClientCertFile string + ClientKeyFile string + DomainID string + DomainName string + EndpointType string + IdentityEndpoint string + Insecure bool + Password string + Swauth bool + TenantID string + TenantName string + Token string + Username string + UserID string + + osClient *gophercloud.ProviderClient +} + +func (c *Config) loadAndValidate() error { + validEndpoint := false + validEndpoints := []string{ + "internal", "internalURL", + "admin", "adminURL", + "public", "publicURL", + "", + } + + for _, endpoint := range validEndpoints { + if c.EndpointType == endpoint { + validEndpoint = true + } + } + + if !validEndpoint { + return fmt.Errorf("Invalid endpoint type provided") + } + + ao := gophercloud.AuthOptions{ + DomainID: c.DomainID, + DomainName: c.DomainName, + IdentityEndpoint: c.IdentityEndpoint, + Password: c.Password, + TenantID: c.TenantID, + TenantName: c.TenantName, + TokenID: c.Token, + Username: c.Username, + UserID: c.UserID, + } + + client, err := openstack.NewClient(ao.IdentityEndpoint) + if err != nil { + return err + } + + // Set UserAgent + client.UserAgent.Prepend(terraform.UserAgentString()) + + config := &tls.Config{} + if c.CACertFile != "" { + caCert, _, err := pathorcontents.Read(c.CACertFile) + if err != nil { + return fmt.Errorf("Error reading CA Cert: %s", err) + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM([]byte(caCert)) + config.RootCAs = caCertPool + } + + if c.Insecure { + config.InsecureSkipVerify = true + } + + if c.ClientCertFile != "" && c.ClientKeyFile != "" { + clientCert, _, err := pathorcontents.Read(c.ClientCertFile) + if err != nil { + return fmt.Errorf("Error reading Client Cert: %s", err) + } + clientKey, _, err := pathorcontents.Read(c.ClientKeyFile) + if err != nil { + return fmt.Errorf("Error reading Client Key: %s", err) + } + + cert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientKey)) + if err != nil { + return err + } + + config.Certificates = []tls.Certificate{cert} + config.BuildNameToCertificate() + } + + // if OS_DEBUG is set, log the requests and responses + var osDebug bool + if os.Getenv("OS_DEBUG") != "" { + osDebug = true + } + + transport := &http.Transport{Proxy: http.ProxyFromEnvironment, TLSClientConfig: config} + client.HTTPClient = http.Client{ + Transport: &LogRoundTripper{ + Rt: transport, + OsDebug: osDebug, + }, + } + + // If using Swift Authentication, there's no need to validate authentication normally. + if !c.Swauth { + err = openstack.Authenticate(client, ao) + if err != nil { + return err + } + } + + c.osClient = client + + return nil +} + +func (c *Config) blockStorageV1Client(region string) (*gophercloud.ServiceClient, error) { + return openstack.NewBlockStorageV1(c.osClient, gophercloud.EndpointOpts{ + Region: region, + Availability: c.getEndpointType(), + }) +} + +func (c *Config) blockStorageV2Client(region string) (*gophercloud.ServiceClient, error) { + return openstack.NewBlockStorageV2(c.osClient, gophercloud.EndpointOpts{ + Region: region, + Availability: c.getEndpointType(), + }) +} + +func (c *Config) computeV2Client(region string) (*gophercloud.ServiceClient, error) { + return openstack.NewComputeV2(c.osClient, gophercloud.EndpointOpts{ + Region: region, + Availability: c.getEndpointType(), + }) +} + +func (c *Config) dnsV2Client(region string) (*gophercloud.ServiceClient, error) { + return openstack.NewDNSV2(c.osClient, gophercloud.EndpointOpts{ + Region: region, + Availability: c.getEndpointType(), + }) +} + +func (c *Config) imageV2Client(region string) (*gophercloud.ServiceClient, error) { + return openstack.NewImageServiceV2(c.osClient, gophercloud.EndpointOpts{ + Region: region, + Availability: c.getEndpointType(), + }) +} + +func (c *Config) networkingV2Client(region string) (*gophercloud.ServiceClient, error) { + return openstack.NewNetworkV2(c.osClient, gophercloud.EndpointOpts{ + Region: region, + Availability: c.getEndpointType(), + }) +} + +func (c *Config) objectStorageV1Client(region string) (*gophercloud.ServiceClient, error) { + // If Swift Authentication is being used, return a swauth client. + if c.Swauth { + return swauth.NewObjectStorageV1(c.osClient, swauth.AuthOpts{ + User: c.Username, + Key: c.Password, + }) + } + + return openstack.NewObjectStorageV1(c.osClient, gophercloud.EndpointOpts{ + Region: region, + Availability: c.getEndpointType(), + }) +} + +func (c *Config) getEndpointType() gophercloud.Availability { + if c.EndpointType == "internal" || c.EndpointType == "internalURL" { + return gophercloud.AvailabilityInternal + } + if c.EndpointType == "admin" || c.EndpointType == "adminURL" { + return gophercloud.AvailabilityAdmin + } + return gophercloud.AvailabilityPublic +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_images_image_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_images_image_v2.go new file mode 100644 index 000000000..da03b2be7 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_images_image_v2.go @@ -0,0 +1,255 @@ +package openstack + +import ( + "fmt" + "log" + "sort" + + "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" + "github.com/gophercloud/gophercloud/pagination" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceImagesImageV2() *schema.Resource { + return &schema.Resource{ + Read: dataSourceImagesImageV2Read, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "visibility": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "owner": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "size_min": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "size_max": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "sort_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "name", + }, + + "sort_direction": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "asc", + ValidateFunc: dataSourceImagesImageV2SortDirection, + }, + + "tag": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "most_recent": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + // Computed values + "container_format": { + Type: schema.TypeString, + Computed: true, + }, + + "disk_format": { + Type: schema.TypeString, + Computed: true, + }, + + "min_disk_gb": { + Type: schema.TypeInt, + Computed: true, + }, + + "min_ram_mb": { + Type: schema.TypeInt, + Computed: true, + }, + + "protected": { + Type: schema.TypeBool, + Computed: true, + }, + + "checksum": { + Type: schema.TypeString, + Computed: true, + }, + + "size_bytes": { + Type: schema.TypeInt, + Computed: true, + }, + + "metadata": { + Type: schema.TypeMap, + Computed: true, + }, + + "updated_at": { + Type: schema.TypeString, + Computed: true, + }, + + "file": { + Type: schema.TypeString, + Computed: true, + }, + + "schema": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +// dataSourceImagesImageV2Read performs the image lookup. +func dataSourceImagesImageV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + imageClient, err := config.imageV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack image client: %s", err) + } + + visibility := resourceImagesImageV2VisibilityFromString(d.Get("visibility").(string)) + + listOpts := images.ListOpts{ + Name: d.Get("name").(string), + Visibility: visibility, + Owner: d.Get("owner").(string), + Status: images.ImageStatusActive, + SizeMin: int64(d.Get("size_min").(int)), + SizeMax: int64(d.Get("size_max").(int)), + SortKey: d.Get("sort_key").(string), + SortDir: d.Get("sort_direction").(string), + Tag: d.Get("tag").(string), + } + + var allImages []images.Image + pager := images.List(imageClient, listOpts) + err = pager.EachPage(func(page pagination.Page) (bool, error) { + images, err := images.ExtractImages(page) + if err != nil { + return false, err + } + + for _, i := range images { + allImages = append(allImages, i) + } + + return true, nil + }) + + if err != nil { + return fmt.Errorf("Unable to retrieve images: %s", err) + } + + var image images.Image + if len(allImages) < 1 { + return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") + } + + if len(allImages) > 1 { + recent := d.Get("most_recent").(bool) + log.Printf("[DEBUG] openstack_images_image: multiple results found and `most_recent` is set to: %t", recent) + if recent { + image = mostRecentImage(allImages) + } else { + return fmt.Errorf("Your query returned more than one result. Please try a more " + + "specific search criteria, or set `most_recent` attribute to true.") + } + } else { + image = allImages[0] + } + + log.Printf("[DEBUG] openstack_images_image: Single Image found: %s", image.ID) + return dataSourceImagesImageV2Attributes(d, &image) +} + +// dataSourceImagesImageV2Attributes populates the fields of an Image resource. +func dataSourceImagesImageV2Attributes(d *schema.ResourceData, image *images.Image) error { + log.Printf("[DEBUG] openstack_images_image details: %#v", image) + + d.SetId(image.ID) + d.Set("name", image.Name) + d.Set("tags", image.Tags) + d.Set("container_format", image.ContainerFormat) + d.Set("disk_format", image.DiskFormat) + d.Set("min_disk_gb", image.MinDiskGigabytes) + d.Set("min_ram_mb", image.MinRAMMegabytes) + d.Set("owner", image.Owner) + d.Set("protected", image.Protected) + d.Set("visibility", image.Visibility) + d.Set("checksum", image.Checksum) + d.Set("size_bytes", image.SizeBytes) + d.Set("metadata", image.Metadata) + d.Set("created_at", image.CreatedAt) + d.Set("updated_at", image.UpdatedAt) + d.Set("file", image.File) + d.Set("schema", image.Schema) + + return nil +} + +type imageSort []images.Image + +func (a imageSort) Len() int { return len(a) } +func (a imageSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a imageSort) Less(i, j int) bool { + itime := a[i].UpdatedAt + jtime := a[j].UpdatedAt + return itime.Unix() < jtime.Unix() +} + +// Returns the most recent Image out of a slice of images. +func mostRecentImage(images []images.Image) images.Image { + sortedImages := images + sort.Sort(imageSort(sortedImages)) + return sortedImages[len(sortedImages)-1] +} + +func dataSourceImagesImageV2SortDirection(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "asc" && value != "desc" { + err := fmt.Errorf("%s must be either asc or desc", k) + errors = append(errors, err) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_networking_network_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_networking_network_v2.go new file mode 100644 index 000000000..f7615c41a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_networking_network_v2.go @@ -0,0 +1,117 @@ +package openstack + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" +) + +func dataSourceNetworkingNetworkV2() *schema.Resource { + return &schema.Resource{ + Read: dataSourceNetworkingNetworkV2Read, + + Schema: map[string]*schema.Schema{ + "network_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "matching_subnet_cidr": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "OS_TENANT_ID", + "OS_PROJECT_ID", + }, ""), + Description: descriptions["tenant_id"], + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "shared": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceNetworkingNetworkV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + + listOpts := networks.ListOpts{ + ID: d.Get("network_id").(string), + Name: d.Get("name").(string), + TenantID: d.Get("tenant_id").(string), + Status: "ACTIVE", + } + + pages, err := networks.List(networkingClient, listOpts).AllPages() + allNetworks, err := networks.ExtractNetworks(pages) + if err != nil { + return fmt.Errorf("Unable to retrieve networks: %s", err) + } + + var refinedNetworks []networks.Network + if cidr := d.Get("matching_subnet_cidr").(string); cidr != "" { + for _, n := range allNetworks { + for _, s := range n.Subnets { + subnet, err := subnets.Get(networkingClient, s).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + continue + } + return fmt.Errorf("Unable to retrieve network subnet: %s", err) + } + if cidr == subnet.CIDR { + refinedNetworks = append(refinedNetworks, n) + } + } + } + } else { + refinedNetworks = allNetworks + } + + if len(refinedNetworks) < 1 { + return fmt.Errorf("Your query returned no results. " + + "Please change your search criteria and try again.") + } + + if len(refinedNetworks) > 1 { + return fmt.Errorf("Your query returned more than one result." + + " Please try a more specific search criteria") + } + + network := refinedNetworks[0] + + log.Printf("[DEBUG] Retrieved Network %s: %+v", network.ID, network) + d.SetId(network.ID) + + d.Set("name", network.Name) + d.Set("admin_state_up", strconv.FormatBool(network.AdminStateUp)) + d.Set("shared", strconv.FormatBool(network.Shared)) + d.Set("tenant_id", network.TenantID) + d.Set("region", GetRegion(d)) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/provider.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/provider.go new file mode 100644 index 000000000..49a2d45ec --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/provider.go @@ -0,0 +1,247 @@ +package openstack + +import ( + "github.com/hashicorp/terraform/helper/mutexkv" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +// This is a global MutexKV for use within this plugin. +var osMutexKV = mutexkv.NewMutexKV() + +// Provider returns a schema.Provider for OpenStack. +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "auth_url": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_URL", nil), + Description: descriptions["auth_url"], + }, + + "user_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_USERNAME", ""), + Description: descriptions["user_name"], + }, + + "user_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_USER_ID", ""), + Description: descriptions["user_name"], + }, + + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "OS_TENANT_ID", + "OS_PROJECT_ID", + }, ""), + Description: descriptions["tenant_id"], + }, + + "tenant_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "OS_TENANT_NAME", + "OS_PROJECT_NAME", + }, ""), + Description: descriptions["tenant_name"], + }, + + "password": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Sensitive: true, + DefaultFunc: schema.EnvDefaultFunc("OS_PASSWORD", ""), + Description: descriptions["password"], + }, + + "token": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_TOKEN", ""), + Description: descriptions["token"], + }, + + "domain_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "OS_USER_DOMAIN_ID", + "OS_PROJECT_DOMAIN_ID", + "OS_DOMAIN_ID", + }, ""), + Description: descriptions["domain_id"], + }, + + "domain_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "OS_USER_DOMAIN_NAME", + "OS_PROJECT_DOMAIN_NAME", + "OS_DOMAIN_NAME", + "OS_DEFAULT_DOMAIN", + }, ""), + Description: descriptions["domain_name"], + }, + + "insecure": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_INSECURE", ""), + Description: descriptions["insecure"], + }, + + "endpoint_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_ENDPOINT_TYPE", ""), + }, + + "cacert_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_CACERT", ""), + Description: descriptions["cacert_file"], + }, + + "cert": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_CERT", ""), + Description: descriptions["cert"], + }, + + "key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_KEY", ""), + Description: descriptions["key"], + }, + + "swauth": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_SWAUTH", ""), + Description: descriptions["swauth"], + }, + }, + + DataSourcesMap: map[string]*schema.Resource{ + "openstack_images_image_v2": dataSourceImagesImageV2(), + "openstack_networking_network_v2": dataSourceNetworkingNetworkV2(), + }, + + ResourcesMap: map[string]*schema.Resource{ + "openstack_blockstorage_volume_v1": resourceBlockStorageVolumeV1(), + "openstack_blockstorage_volume_v2": resourceBlockStorageVolumeV2(), + "openstack_blockstorage_volume_attach_v2": resourceBlockStorageVolumeAttachV2(), + "openstack_compute_instance_v2": resourceComputeInstanceV2(), + "openstack_compute_keypair_v2": resourceComputeKeypairV2(), + "openstack_compute_secgroup_v2": resourceComputeSecGroupV2(), + "openstack_compute_servergroup_v2": resourceComputeServerGroupV2(), + "openstack_compute_floatingip_v2": resourceComputeFloatingIPV2(), + "openstack_compute_floatingip_associate_v2": resourceComputeFloatingIPAssociateV2(), + "openstack_compute_volume_attach_v2": resourceComputeVolumeAttachV2(), + "openstack_dns_recordset_v2": resourceDNSRecordSetV2(), + "openstack_dns_zone_v2": resourceDNSZoneV2(), + "openstack_fw_firewall_v1": resourceFWFirewallV1(), + "openstack_fw_policy_v1": resourceFWPolicyV1(), + "openstack_fw_rule_v1": resourceFWRuleV1(), + "openstack_images_image_v2": resourceImagesImageV2(), + "openstack_lb_member_v1": resourceLBMemberV1(), + "openstack_lb_monitor_v1": resourceLBMonitorV1(), + "openstack_lb_pool_v1": resourceLBPoolV1(), + "openstack_lb_vip_v1": resourceLBVipV1(), + "openstack_lb_loadbalancer_v2": resourceLoadBalancerV2(), + "openstack_lb_listener_v2": resourceListenerV2(), + "openstack_lb_pool_v2": resourcePoolV2(), + "openstack_lb_member_v2": resourceMemberV2(), + "openstack_lb_monitor_v2": resourceMonitorV2(), + "openstack_networking_network_v2": resourceNetworkingNetworkV2(), + "openstack_networking_subnet_v2": resourceNetworkingSubnetV2(), + "openstack_networking_floatingip_v2": resourceNetworkingFloatingIPV2(), + "openstack_networking_port_v2": resourceNetworkingPortV2(), + "openstack_networking_router_v2": resourceNetworkingRouterV2(), + "openstack_networking_router_interface_v2": resourceNetworkingRouterInterfaceV2(), + "openstack_networking_router_route_v2": resourceNetworkingRouterRouteV2(), + "openstack_networking_secgroup_v2": resourceNetworkingSecGroupV2(), + "openstack_networking_secgroup_rule_v2": resourceNetworkingSecGroupRuleV2(), + "openstack_objectstorage_container_v1": resourceObjectStorageContainerV1(), + }, + + ConfigureFunc: configureProvider, + } +} + +var descriptions map[string]string + +func init() { + descriptions = map[string]string{ + "auth_url": "The Identity authentication URL.", + + "user_name": "Username to login with.", + + "user_id": "User ID to login with.", + + "tenant_id": "The ID of the Tenant (Identity v2) or Project (Identity v3)\n" + + "to login with.", + + "tenant_name": "The name of the Tenant (Identity v2) or Project (Identity v3)\n" + + "to login with.", + + "password": "Password to login with.", + + "token": "Authentication token to use as an alternative to username/password.", + + "domain_id": "The ID of the Domain to scope to (Identity v3).", + + "domain_name": "The name of the Domain to scope to (Identity v3).", + + "insecure": "Trust self-signed certificates.", + + "cacert_file": "A Custom CA certificate.", + + "endpoint_type": "The catalog endpoint type to use.", + + "cert": "A client certificate to authenticate with.", + + "key": "A client private key to authenticate with.", + + "swauth": "Use Swift's authentication system instead of Keystone. Only used for\n" + + "interaction with Swift.", + } +} + +func configureProvider(d *schema.ResourceData) (interface{}, error) { + config := Config{ + CACertFile: d.Get("cacert_file").(string), + ClientCertFile: d.Get("cert").(string), + ClientKeyFile: d.Get("key").(string), + DomainID: d.Get("domain_id").(string), + DomainName: d.Get("domain_name").(string), + EndpointType: d.Get("endpoint_type").(string), + IdentityEndpoint: d.Get("auth_url").(string), + Insecure: d.Get("insecure").(bool), + Password: d.Get("password").(string), + Swauth: d.Get("swauth").(bool), + Token: d.Get("token").(string), + TenantID: d.Get("tenant_id").(string), + TenantName: d.Get("tenant_name").(string), + Username: d.Get("user_name").(string), + UserID: d.Get("user_id").(string), + } + + if err := config.loadAndValidate(); err != nil { + return nil, err + } + + return &config, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_attach_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_attach_v2.go new file mode 100644 index 000000000..4dd28e7bc --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_attach_v2.go @@ -0,0 +1,414 @@ +package openstack + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceBlockStorageVolumeAttachV2() *schema.Resource { + return &schema.Resource{ + Create: resourceBlockStorageVolumeAttachV2Create, + Read: resourceBlockStorageVolumeAttachV2Read, + Delete: resourceBlockStorageVolumeAttachV2Delete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + + "volume_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "instance_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Deprecated: "instance_id is no longer used in this resource", + }, + + "host_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "device": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "attach_mode": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "ro" && value != "rw" { + errors = append(errors, fmt.Errorf( + "Only 'ro' and 'rw' are supported values for 'attach_mode'")) + } + return + }, + }, + + "initiator": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "multipath": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + + "os_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "platform": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "wwpn": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "wwnn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + // Volume attachment information + "data": &schema.Schema{ + Type: schema.TypeMap, + Computed: true, + Sensitive: true, + }, + + "driver_volume_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "mount_point_base": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceBlockStorageVolumeAttachV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + client, err := config.blockStorageV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + // initialize the connection + volumeId := d.Get("volume_id").(string) + connOpts := &volumeactions.InitializeConnectionOpts{} + if v, ok := d.GetOk("host_name"); ok { + connOpts.Host = v.(string) + } + + if v, ok := d.GetOk("multipath"); ok { + multipath := v.(bool) + connOpts.Multipath = &multipath + } + + if v, ok := d.GetOk("ip_address"); ok { + connOpts.IP = v.(string) + } + + if v, ok := d.GetOk("initiator"); ok { + connOpts.Initiator = v.(string) + } + + if v, ok := d.GetOk("os_type"); ok { + connOpts.OSType = v.(string) + } + + if v, ok := d.GetOk("platform"); ok { + connOpts.Platform = v.(string) + } + + if v, ok := d.GetOk("wwnns"); ok { + connOpts.Wwnns = v.(string) + } + + if v, ok := d.GetOk("wwpns"); ok { + var wwpns []string + for _, i := range v.([]string) { + wwpns = append(wwpns, i) + } + + connOpts.Wwpns = wwpns + } + + connInfo, err := volumeactions.InitializeConnection(client, volumeId, connOpts).Extract() + if err != nil { + return fmt.Errorf("Unable to create connection: %s", err) + } + + // Only uncomment this when debugging since connInfo contains sensitive information. + // log.Printf("[DEBUG] Volume Connection for %s: %#v", volumeId, connInfo) + + // Because this information is only returned upon creation, + // it must be set in Create. + if v, ok := connInfo["data"]; ok { + data := make(map[string]string) + for key, value := range v.(map[string]interface{}) { + if v, ok := value.(string); ok { + data[key] = v + } + } + + d.Set("data", data) + } + + if v, ok := connInfo["driver_volume_type"]; ok { + d.Set("driver_volume_type", v) + } + + if v, ok := connInfo["mount_point_base"]; ok { + d.Set("mount_point_base", v) + } + + // Once the connection has been made, tell Cinder to mark the volume as attached. + attachMode, err := blockStorageVolumeAttachV2AttachMode(d.Get("attach_mode").(string)) + if err != nil { + return nil + } + + attachOpts := &volumeactions.AttachOpts{ + HostName: d.Get("host_name").(string), + MountPoint: d.Get("device").(string), + Mode: attachMode, + } + + log.Printf("[DEBUG] Attachment Options: %#v", attachOpts) + + if err := volumeactions.Attach(client, volumeId, attachOpts).ExtractErr(); err != nil { + return err + } + + // Wait for the volume to become available. + log.Printf("[DEBUG] Waiting for volume (%s) to become available", volumeId) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"available", "attaching"}, + Target: []string{"in-use"}, + Refresh: VolumeV2StateRefreshFunc(client, volumeId), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for volume (%s) to become ready: %s", volumeId, err) + } + + // Once the volume has been marked as attached, + // retrieve a fresh copy of it with all information now available. + volume, err := volumes.Get(client, volumeId).Extract() + if err != nil { + return err + } + + // Search for the attachmentId + var attachmentId string + hostName := d.Get("host_name").(string) + for _, attachment := range volume.Attachments { + if hostName != "" && hostName == attachment.HostName { + attachmentId = attachment.AttachmentID + } + } + + if attachmentId == "" { + return fmt.Errorf("Unable to determine attachment ID.") + } + + // The ID must be a combination of the volume and attachment ID + // since a volume ID is required to retrieve an attachment ID. + id := fmt.Sprintf("%s/%s", volumeId, attachmentId) + d.SetId(id) + + return resourceBlockStorageVolumeAttachV2Read(d, meta) +} + +func resourceBlockStorageVolumeAttachV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + client, err := config.blockStorageV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + volumeId, attachmentId, err := blockStorageVolumeAttachV2ParseId(d.Id()) + if err != nil { + return err + } + + volume, err := volumes.Get(client, volumeId).Extract() + if err != nil { + return err + } + + log.Printf("[DEBUG] Retrieved volume %s: %#v", d.Id(), volume) + + var attachment volumes.Attachment + for _, v := range volume.Attachments { + if attachmentId == v.AttachmentID { + attachment = v + } + } + + log.Printf("[DEBUG] Retrieved volume attachment: %#v", attachment) + + return nil +} + +func resourceBlockStorageVolumeAttachV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + client, err := config.blockStorageV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + volumeId, attachmentId, err := blockStorageVolumeAttachV2ParseId(d.Id()) + + // Terminate the connection + termOpts := &volumeactions.TerminateConnectionOpts{} + if v, ok := d.GetOk("host_name"); ok { + termOpts.Host = v.(string) + } + + if v, ok := d.GetOk("multipath"); ok { + multipath := v.(bool) + termOpts.Multipath = &multipath + } + + if v, ok := d.GetOk("ip_address"); ok { + termOpts.IP = v.(string) + } + + if v, ok := d.GetOk("initiator"); ok { + termOpts.Initiator = v.(string) + } + + if v, ok := d.GetOk("os_type"); ok { + termOpts.OSType = v.(string) + } + + if v, ok := d.GetOk("platform"); ok { + termOpts.Platform = v.(string) + } + + if v, ok := d.GetOk("wwnns"); ok { + termOpts.Wwnns = v.(string) + } + + if v, ok := d.GetOk("wwpns"); ok { + var wwpns []string + for _, i := range v.([]string) { + wwpns = append(wwpns, i) + } + + termOpts.Wwpns = wwpns + } + + err = volumeactions.TerminateConnection(client, volumeId, termOpts).ExtractErr() + if err != nil { + return fmt.Errorf("Error terminating volume connection %s: %s", volumeId, err) + } + + // Detach the volume + detachOpts := volumeactions.DetachOpts{ + AttachmentID: attachmentId, + } + + log.Printf("[DEBUG] Detachment Options: %#v", detachOpts) + + if err := volumeactions.Detach(client, volumeId, detachOpts).ExtractErr(); err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"in-use", "attaching", "detaching"}, + Target: []string{"available"}, + Refresh: VolumeV2StateRefreshFunc(client, volumeId), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for volume (%s) to become available: %s", volumeId, err) + } + + return nil +} + +func blockStorageVolumeAttachV2AttachMode(v string) (volumeactions.AttachMode, error) { + var attachMode volumeactions.AttachMode + var attachError error + switch v { + case "": + attachMode = "" + case "ro": + attachMode = volumeactions.ReadOnly + case "rw": + attachMode = volumeactions.ReadWrite + default: + attachError = fmt.Errorf("Invalid attach_mode specified") + } + + return attachMode, attachError +} + +func blockStorageVolumeAttachV2ParseId(id string) (string, string, error) { + parts := strings.Split(id, "/") + if len(parts) < 2 { + return "", "", fmt.Errorf("Unable to determine attachment ID") + } + + return parts[0], parts[1], nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v1.go new file mode 100644 index 000000000..8c84a08e8 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v1.go @@ -0,0 +1,339 @@ +package openstack + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceBlockStorageVolumeV1() *schema.Resource { + return &schema.Resource{ + Create: resourceBlockStorageVolumeV1Create, + Read: resourceBlockStorageVolumeV1Read, + Update: resourceBlockStorageVolumeV1Update, + Delete: resourceBlockStorageVolumeV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "size": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "availability_zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: false, + Computed: true, + }, + "snapshot_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "source_vol_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "image_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "volume_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "attachment": &schema.Schema{ + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "instance_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "device": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Set: resourceVolumeAttachmentHash, + }, + }, + } +} + +func resourceBlockStorageVolumeV1Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + blockStorageClient, err := config.blockStorageV1Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + createOpts := &volumes.CreateOpts{ + Description: d.Get("description").(string), + AvailabilityZone: d.Get("availability_zone").(string), + Name: d.Get("name").(string), + Size: d.Get("size").(int), + SnapshotID: d.Get("snapshot_id").(string), + SourceVolID: d.Get("source_vol_id").(string), + ImageID: d.Get("image_id").(string), + VolumeType: d.Get("volume_type").(string), + Metadata: resourceContainerMetadataV2(d), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + v, err := volumes.Create(blockStorageClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack volume: %s", err) + } + log.Printf("[INFO] Volume ID: %s", v.ID) + + // Wait for the volume to become available. + log.Printf( + "[DEBUG] Waiting for volume (%s) to become available", + v.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"downloading", "creating"}, + Target: []string{"available"}, + Refresh: VolumeV1StateRefreshFunc(blockStorageClient, v.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for volume (%s) to become ready: %s", + v.ID, err) + } + + // Store the ID now + d.SetId(v.ID) + + return resourceBlockStorageVolumeV1Read(d, meta) +} + +func resourceBlockStorageVolumeV1Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + blockStorageClient, err := config.blockStorageV1Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + v, err := volumes.Get(blockStorageClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "volume") + } + + log.Printf("[DEBUG] Retrieved volume %s: %+v", d.Id(), v) + + d.Set("size", v.Size) + d.Set("description", v.Description) + d.Set("availability_zone", v.AvailabilityZone) + d.Set("name", v.Name) + d.Set("snapshot_id", v.SnapshotID) + d.Set("source_vol_id", v.SourceVolID) + d.Set("volume_type", v.VolumeType) + d.Set("metadata", v.Metadata) + d.Set("region", GetRegion(d)) + + attachments := make([]map[string]interface{}, len(v.Attachments)) + for i, attachment := range v.Attachments { + attachments[i] = make(map[string]interface{}) + attachments[i]["id"] = attachment["id"] + attachments[i]["instance_id"] = attachment["server_id"] + attachments[i]["device"] = attachment["device"] + log.Printf("[DEBUG] attachment: %v", attachment) + } + d.Set("attachment", attachments) + + return nil +} + +func resourceBlockStorageVolumeV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + blockStorageClient, err := config.blockStorageV1Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + updateOpts := volumes.UpdateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + } + + if d.HasChange("metadata") { + updateOpts.Metadata = resourceVolumeMetadataV1(d) + } + + _, err = volumes.Update(blockStorageClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack volume: %s", err) + } + + return resourceBlockStorageVolumeV1Read(d, meta) +} + +func resourceBlockStorageVolumeV1Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + blockStorageClient, err := config.blockStorageV1Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + v, err := volumes.Get(blockStorageClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "volume") + } + + // make sure this volume is detached from all instances before deleting + if len(v.Attachments) > 0 { + log.Printf("[DEBUG] detaching volumes") + if computeClient, err := config.computeV2Client(GetRegion(d)); err != nil { + return err + } else { + for _, volumeAttachment := range v.Attachments { + log.Printf("[DEBUG] Attachment: %v", volumeAttachment) + if err := volumeattach.Delete(computeClient, volumeAttachment["server_id"].(string), volumeAttachment["id"].(string)).ExtractErr(); err != nil { + return err + } + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"in-use", "attaching", "detaching"}, + Target: []string{"available"}, + Refresh: VolumeV1StateRefreshFunc(blockStorageClient, d.Id()), + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for volume (%s) to become available: %s", + d.Id(), err) + } + } + } + + // It's possible that this volume was used as a boot device and is currently + // in a "deleting" state from when the instance was terminated. + // If this is true, just move on. It'll eventually delete. + if v.Status != "deleting" { + if err := volumes.Delete(blockStorageClient, d.Id()).ExtractErr(); err != nil { + return CheckDeleted(d, err, "volume") + } + } + + // Wait for the volume to delete before moving on. + log.Printf("[DEBUG] Waiting for volume (%s) to delete", d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting", "downloading", "available"}, + Target: []string{"deleted"}, + Refresh: VolumeV1StateRefreshFunc(blockStorageClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for volume (%s) to delete: %s", + d.Id(), err) + } + + d.SetId("") + return nil +} + +func resourceVolumeMetadataV1(d *schema.ResourceData) map[string]string { + m := make(map[string]string) + for key, val := range d.Get("metadata").(map[string]interface{}) { + m[key] = val.(string) + } + return m +} + +// VolumeV1StateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// an OpenStack volume. +func VolumeV1StateRefreshFunc(client *gophercloud.ServiceClient, volumeID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + v, err := volumes.Get(client, volumeID).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return v, "deleted", nil + } + return nil, "", err + } + + if v.Status == "error" { + return v, v.Status, fmt.Errorf("There was an error creating the volume. " + + "Please check with your cloud admin or check the Block Storage " + + "API logs to see why this error occurred.") + } + + return v, v.Status, nil + } +} + +func resourceVolumeAttachmentHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + if m["instance_id"] != nil { + buf.WriteString(fmt.Sprintf("%s-", m["instance_id"].(string))) + } + return hashcode.String(buf.String()) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v2.go new file mode 100644 index 000000000..5944cac04 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v2.go @@ -0,0 +1,350 @@ +package openstack + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceBlockStorageVolumeV2() *schema.Resource { + return &schema.Resource{ + Create: resourceBlockStorageVolumeV2Create, + Read: resourceBlockStorageVolumeV2Read, + Update: resourceBlockStorageVolumeV2Update, + Delete: resourceBlockStorageVolumeV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "size": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "availability_zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: false, + Computed: true, + }, + "snapshot_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "source_vol_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "image_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "volume_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "consistency_group_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "source_replica": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "attachment": &schema.Schema{ + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "instance_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "device": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Set: resourceVolumeV2AttachmentHash, + }, + }, + } +} + +func resourceBlockStorageVolumeV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + blockStorageClient, err := config.blockStorageV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + createOpts := &volumes.CreateOpts{ + AvailabilityZone: d.Get("availability_zone").(string), + ConsistencyGroupID: d.Get("consistency_group_id").(string), + Description: d.Get("description").(string), + ImageID: d.Get("image_id").(string), + Metadata: resourceContainerMetadataV2(d), + Name: d.Get("name").(string), + Size: d.Get("size").(int), + SnapshotID: d.Get("snapshot_id").(string), + SourceReplica: d.Get("source_replica").(string), + SourceVolID: d.Get("source_vol_id").(string), + VolumeType: d.Get("volume_type").(string), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + v, err := volumes.Create(blockStorageClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack volume: %s", err) + } + log.Printf("[INFO] Volume ID: %s", v.ID) + + // Wait for the volume to become available. + log.Printf( + "[DEBUG] Waiting for volume (%s) to become available", + v.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"downloading", "creating"}, + Target: []string{"available"}, + Refresh: VolumeV2StateRefreshFunc(blockStorageClient, v.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for volume (%s) to become ready: %s", + v.ID, err) + } + + // Store the ID now + d.SetId(v.ID) + + return resourceBlockStorageVolumeV2Read(d, meta) +} + +func resourceBlockStorageVolumeV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + blockStorageClient, err := config.blockStorageV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + v, err := volumes.Get(blockStorageClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "volume") + } + + log.Printf("[DEBUG] Retrieved volume %s: %+v", d.Id(), v) + + d.Set("size", v.Size) + d.Set("description", v.Description) + d.Set("availability_zone", v.AvailabilityZone) + d.Set("name", v.Name) + d.Set("snapshot_id", v.SnapshotID) + d.Set("source_vol_id", v.SourceVolID) + d.Set("volume_type", v.VolumeType) + d.Set("metadata", v.Metadata) + d.Set("region", GetRegion(d)) + + attachments := make([]map[string]interface{}, len(v.Attachments)) + for i, attachment := range v.Attachments { + attachments[i] = make(map[string]interface{}) + attachments[i]["id"] = attachment.ID + attachments[i]["instance_id"] = attachment.ServerID + attachments[i]["device"] = attachment.Device + log.Printf("[DEBUG] attachment: %v", attachment) + } + d.Set("attachment", attachments) + + return nil +} + +func resourceBlockStorageVolumeV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + blockStorageClient, err := config.blockStorageV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + updateOpts := volumes.UpdateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + } + + if d.HasChange("metadata") { + updateOpts.Metadata = resourceVolumeMetadataV2(d) + } + + _, err = volumes.Update(blockStorageClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack volume: %s", err) + } + + return resourceBlockStorageVolumeV2Read(d, meta) +} + +func resourceBlockStorageVolumeV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + blockStorageClient, err := config.blockStorageV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + v, err := volumes.Get(blockStorageClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "volume") + } + + // make sure this volume is detached from all instances before deleting + if len(v.Attachments) > 0 { + log.Printf("[DEBUG] detaching volumes") + if computeClient, err := config.computeV2Client(GetRegion(d)); err != nil { + return err + } else { + for _, volumeAttachment := range v.Attachments { + log.Printf("[DEBUG] Attachment: %v", volumeAttachment) + if err := volumeattach.Delete(computeClient, volumeAttachment.ServerID, volumeAttachment.ID).ExtractErr(); err != nil { + return err + } + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"in-use", "attaching", "detaching"}, + Target: []string{"available"}, + Refresh: VolumeV2StateRefreshFunc(blockStorageClient, d.Id()), + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for volume (%s) to become available: %s", + d.Id(), err) + } + } + } + + // It's possible that this volume was used as a boot device and is currently + // in a "deleting" state from when the instance was terminated. + // If this is true, just move on. It'll eventually delete. + if v.Status != "deleting" { + if err := volumes.Delete(blockStorageClient, d.Id()).ExtractErr(); err != nil { + return CheckDeleted(d, err, "volume") + } + } + + // Wait for the volume to delete before moving on. + log.Printf("[DEBUG] Waiting for volume (%s) to delete", d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting", "downloading", "available"}, + Target: []string{"deleted"}, + Refresh: VolumeV2StateRefreshFunc(blockStorageClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for volume (%s) to delete: %s", + d.Id(), err) + } + + d.SetId("") + return nil +} + +func resourceVolumeMetadataV2(d *schema.ResourceData) map[string]string { + m := make(map[string]string) + for key, val := range d.Get("metadata").(map[string]interface{}) { + m[key] = val.(string) + } + return m +} + +// VolumeV2StateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// an OpenStack volume. +func VolumeV2StateRefreshFunc(client *gophercloud.ServiceClient, volumeID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + v, err := volumes.Get(client, volumeID).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return v, "deleted", nil + } + return nil, "", err + } + + if v.Status == "error" { + return v, v.Status, fmt.Errorf("There was an error creating the volume. " + + "Please check with your cloud admin or check the Block Storage " + + "API logs to see why this error occurred.") + } + + return v, v.Status, nil + } +} + +func resourceVolumeV2AttachmentHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + if m["instance_id"] != nil { + buf.WriteString(fmt.Sprintf("%s-", m["instance_id"].(string))) + } + return hashcode.String(buf.String()) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_associate_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_associate_v2.go new file mode 100644 index 000000000..963e191d6 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_associate_v2.go @@ -0,0 +1,234 @@ +package openstack + +import ( + "fmt" + "log" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + nfloatingips "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeFloatingIPAssociateV2() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeFloatingIPAssociateV2Create, + Read: resourceComputeFloatingIPAssociateV2Read, + Delete: resourceComputeFloatingIPAssociateV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "floating_ip": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "instance_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "fixed_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeFloatingIPAssociateV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + floatingIP := d.Get("floating_ip").(string) + fixedIP := d.Get("fixed_ip").(string) + instanceId := d.Get("instance_id").(string) + + associateOpts := floatingips.AssociateOpts{ + FloatingIP: floatingIP, + FixedIP: fixedIP, + } + log.Printf("[DEBUG] Associate Options: %#v", associateOpts) + + err = floatingips.AssociateInstance(computeClient, instanceId, associateOpts).ExtractErr() + if err != nil { + return fmt.Errorf("Error associating Floating IP: %s", err) + } + + // There's an API call to get this information, but it has been + // deprecated. The Neutron API could be used, but I'm trying not + // to mix service APIs. Therefore, a faux ID will be used. + id := fmt.Sprintf("%s/%s/%s", floatingIP, instanceId, fixedIP) + d.SetId(id) + + // This API call is synchronous, so Create won't return until the IP + // is attached. No need to wait for a state. + + return resourceComputeFloatingIPAssociateV2Read(d, meta) +} + +func resourceComputeFloatingIPAssociateV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + // Obtain relevant info from parsing the ID + floatingIP, instanceId, fixedIP, err := parseComputeFloatingIPAssociateId(d.Id()) + if err != nil { + return err + } + + // Now check and see whether the floating IP still exists. + // First try to do this by querying the Network API. + networkEnabled := true + networkClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + networkEnabled = false + } + + var exists bool + if networkEnabled { + log.Printf("[DEBUG] Checking for Floating IP existence via Network API") + exists, err = resourceComputeFloatingIPAssociateV2NetworkExists(networkClient, floatingIP) + } else { + log.Printf("[DEBUG] Checking for Floating IP existence via Compute API") + exists, err = resourceComputeFloatingIPAssociateV2ComputeExists(computeClient, floatingIP) + } + + if err != nil { + return err + } + + if !exists { + d.SetId("") + } + + // Next, see if the instance still exists + instance, err := servers.Get(computeClient, instanceId).Extract() + if err != nil { + if CheckDeleted(d, err, "instance") == nil { + return nil + } + } + + // Finally, check and see if the floating ip is still associated with the instance. + var associated bool + for _, networkAddresses := range instance.Addresses { + for _, element := range networkAddresses.([]interface{}) { + address := element.(map[string]interface{}) + if address["OS-EXT-IPS:type"] == "floating" && address["addr"] == floatingIP { + associated = true + } + } + } + + if !associated { + d.SetId("") + } + + // Set the attributes pulled from the composed resource ID + d.Set("floating_ip", floatingIP) + d.Set("instance_id", instanceId) + d.Set("fixed_ip", fixedIP) + d.Set("region", GetRegion(d)) + + return nil +} + +func resourceComputeFloatingIPAssociateV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + floatingIP := d.Get("floating_ip").(string) + instanceId := d.Get("instance_id").(string) + + disassociateOpts := floatingips.DisassociateOpts{ + FloatingIP: floatingIP, + } + log.Printf("[DEBUG] Disssociate Options: %#v", disassociateOpts) + + err = floatingips.DisassociateInstance(computeClient, instanceId, disassociateOpts).ExtractErr() + if err != nil { + return CheckDeleted(d, err, "floating ip association") + } + + return nil +} + +func parseComputeFloatingIPAssociateId(id string) (string, string, string, error) { + idParts := strings.Split(id, "/") + if len(idParts) < 3 { + return "", "", "", fmt.Errorf("Unable to determine floating ip association ID") + } + + floatingIP := idParts[0] + instanceId := idParts[1] + fixedIP := idParts[2] + + return floatingIP, instanceId, fixedIP, nil +} + +func resourceComputeFloatingIPAssociateV2NetworkExists(networkClient *gophercloud.ServiceClient, floatingIP string) (bool, error) { + listOpts := nfloatingips.ListOpts{ + FloatingIP: floatingIP, + } + allPages, err := nfloatingips.List(networkClient, listOpts).AllPages() + if err != nil { + return false, err + } + + allFips, err := nfloatingips.ExtractFloatingIPs(allPages) + if err != nil { + return false, err + } + + if len(allFips) > 1 { + return false, fmt.Errorf("There was a problem retrieving the floating IP") + } + + if len(allFips) == 0 { + return false, nil + } + + return true, nil +} + +func resourceComputeFloatingIPAssociateV2ComputeExists(computeClient *gophercloud.ServiceClient, floatingIP string) (bool, error) { + // If the Network API isn't available, fall back to the deprecated Compute API. + allPages, err := floatingips.List(computeClient).AllPages() + if err != nil { + return false, err + } + + allFips, err := floatingips.ExtractFloatingIPs(allPages) + if err != nil { + return false, err + } + + for _, f := range allFips { + if f.IP == floatingIP { + return true, nil + } + } + + return false, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_v2.go new file mode 100644 index 000000000..96e723d5c --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_v2.go @@ -0,0 +1,111 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeFloatingIPV2() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeFloatingIPV2Create, + Read: resourceComputeFloatingIPV2Read, + Update: nil, + Delete: resourceComputeFloatingIPV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + + "pool": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_POOL_NAME", nil), + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "fixed_ip": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "instance_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeFloatingIPV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + createOpts := &floatingips.CreateOpts{ + Pool: d.Get("pool").(string), + } + log.Printf("[DEBUG] Create Options: %#v", createOpts) + newFip, err := floatingips.Create(computeClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating Floating IP: %s", err) + } + + d.SetId(newFip.ID) + + return resourceComputeFloatingIPV2Read(d, meta) +} + +func resourceComputeFloatingIPV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + fip, err := floatingips.Get(computeClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "floating ip") + } + + log.Printf("[DEBUG] Retrieved Floating IP %s: %+v", d.Id(), fip) + + d.Set("pool", fip.Pool) + d.Set("instance_id", fip.InstanceID) + d.Set("address", fip.IP) + d.Set("fixed_ip", fip.FixedIP) + d.Set("region", GetRegion(d)) + + return nil +} + +func resourceComputeFloatingIPV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + log.Printf("[DEBUG] Deleting Floating IP %s", d.Id()) + if err := floatingips.Delete(computeClient, d.Id()).ExtractErr(); err != nil { + return fmt.Errorf("Error deleting Floating IP: %s", err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_instance_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_instance_v2.go new file mode 100644 index 000000000..4484e7c89 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_instance_v2.go @@ -0,0 +1,1626 @@ +package openstack + +import ( + "bytes" + "crypto/sha1" + "encoding/hex" + "fmt" + "log" + "os" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" + "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" + "github.com/gophercloud/gophercloud/openstack/compute/v2/images" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeInstanceV2() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceV2Create, + Read: resourceComputeInstanceV2Read, + Update: resourceComputeInstanceV2Update, + Delete: resourceComputeInstanceV2Delete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "image_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "image_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "flavor_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Computed: true, + DefaultFunc: schema.EnvDefaultFunc("OS_FLAVOR_ID", nil), + }, + "flavor_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Computed: true, + DefaultFunc: schema.EnvDefaultFunc("OS_FLAVOR_NAME", nil), + }, + "floating_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Deprecated: "Use the openstack_compute_floatingip_associate_v2 resource instead", + }, + "user_data": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + // just stash the hash for state & diff comparisons + StateFunc: func(v interface{}) string { + switch v.(type) { + case string: + hash := sha1.Sum([]byte(v.(string))) + return hex.EncodeToString(hash[:]) + default: + return "" + } + }, + }, + "security_groups": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: false, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "availability_zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "network": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uuid": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "port": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "fixed_ip_v4": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "fixed_ip_v6": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "floating_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Deprecated: "Use the openstack_compute_floatingip_associate_v2 resource instead", + }, + "mac": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "access_network": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: false, + }, + "config_drive": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "admin_pass": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "access_ip_v4": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: false, + }, + "access_ip_v6": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: false, + }, + "key_pair": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "block_device": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "uuid": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "volume_size": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "destination_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "boot_index": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "delete_on_termination": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + "guest_format": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "volume": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Deprecated: "Use block_device or openstack_compute_volume_attach_v2 instead", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "volume_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "device": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + Set: resourceComputeVolumeAttachmentHash, + }, + "scheduler_hints": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "different_host": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "same_host": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "query": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "target_cell": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "build_near_host_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + Set: resourceComputeSchedulerHintsHash, + }, + "personality": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "file": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "content": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + Set: resourceComputeInstancePersonalityHash, + }, + "stop_before_destroy": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "force_delete": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "all_metadata": &schema.Schema{ + Type: schema.TypeMap, + Computed: true, + }, + }, + } +} + +func resourceComputeInstanceV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + var createOpts servers.CreateOptsBuilder + + // Determines the Image ID using the following rules: + // If a bootable block_device was specified, ignore the image altogether. + // If an image_id was specified, use it. + // If an image_name was specified, look up the image ID, report if error. + imageId, err := getImageIDFromConfig(computeClient, d) + if err != nil { + return err + } + + flavorId, err := getFlavorID(computeClient, d) + if err != nil { + return err + } + + // determine if block_device configuration is correct + // this includes valid combinations and required attributes + if err := checkBlockDeviceConfig(d); err != nil { + return err + } + + // check if floating IP configuration is correct + if err := checkInstanceFloatingIPs(d); err != nil { + return err + } + + // Build a list of networks with the information given upon creation. + // Error out if an invalid network configuration was used. + networkDetails, err := getInstanceNetworks(computeClient, d) + if err != nil { + return err + } + + networks := make([]servers.Network, len(networkDetails)) + for i, net := range networkDetails { + networks[i] = servers.Network{ + UUID: net["uuid"].(string), + Port: net["port"].(string), + FixedIP: net["fixed_ip_v4"].(string), + } + } + + configDrive := d.Get("config_drive").(bool) + + createOpts = &servers.CreateOpts{ + Name: d.Get("name").(string), + ImageRef: imageId, + FlavorRef: flavorId, + SecurityGroups: resourceInstanceSecGroupsV2(d), + AvailabilityZone: d.Get("availability_zone").(string), + Networks: networks, + Metadata: resourceInstanceMetadataV2(d), + ConfigDrive: &configDrive, + AdminPass: d.Get("admin_pass").(string), + UserData: []byte(d.Get("user_data").(string)), + Personality: resourceInstancePersonalityV2(d), + } + + if keyName, ok := d.Get("key_pair").(string); ok && keyName != "" { + createOpts = &keypairs.CreateOptsExt{ + CreateOptsBuilder: createOpts, + KeyName: keyName, + } + } + + if vL, ok := d.GetOk("block_device"); ok { + blockDevices, err := resourceInstanceBlockDevicesV2(d, vL.([]interface{})) + if err != nil { + return err + } + + createOpts = &bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: createOpts, + BlockDevice: blockDevices, + } + } + + schedulerHintsRaw := d.Get("scheduler_hints").(*schema.Set).List() + if len(schedulerHintsRaw) > 0 { + log.Printf("[DEBUG] schedulerhints: %+v", schedulerHintsRaw) + schedulerHints := resourceInstanceSchedulerHintsV2(d, schedulerHintsRaw[0].(map[string]interface{})) + createOpts = &schedulerhints.CreateOptsExt{ + CreateOptsBuilder: createOpts, + SchedulerHints: schedulerHints, + } + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + + // If a block_device is used, use the bootfromvolume.Create function as it allows an empty ImageRef. + // Otherwise, use the normal servers.Create function. + var server *servers.Server + if _, ok := d.GetOk("block_device"); ok { + server, err = bootfromvolume.Create(computeClient, createOpts).Extract() + } else { + server, err = servers.Create(computeClient, createOpts).Extract() + } + + if err != nil { + return fmt.Errorf("Error creating OpenStack server: %s", err) + } + log.Printf("[INFO] Instance ID: %s", server.ID) + + // Store the ID now + d.SetId(server.ID) + + // Wait for the instance to become running so we can get some attributes + // that aren't available until later. + log.Printf( + "[DEBUG] Waiting for instance (%s) to become running", + server.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"BUILD"}, + Target: []string{"ACTIVE"}, + Refresh: ServerV2StateRefreshFunc(computeClient, server.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for instance (%s) to become ready: %s", + server.ID, err) + } + + // Now that the instance has been created, we need to do an early read on the + // networks in order to associate floating IPs + _, err = getInstanceNetworksAndAddresses(computeClient, d) + + // If floating IPs were specified, associate them after the instance has launched. + err = associateFloatingIPsToInstance(computeClient, d) + if err != nil { + return err + } + + // if volumes were specified, attach them after the instance has launched. + if v, ok := d.GetOk("volume"); ok { + vols := v.(*schema.Set).List() + if blockClient, err := config.blockStorageV1Client(GetRegion(d)); err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } else { + if err := attachVolumesToInstance(computeClient, blockClient, d.Id(), vols); err != nil { + return err + } + } + } + + return resourceComputeInstanceV2Read(d, meta) +} + +func resourceComputeInstanceV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + server, err := servers.Get(computeClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "server") + } + + log.Printf("[DEBUG] Retrieved Server %s: %+v", d.Id(), server) + + d.Set("name", server.Name) + + // Get the instance network and address information + networks, err := getInstanceNetworksAndAddresses(computeClient, d) + if err != nil { + return err + } + + // Determine the best IPv4 and IPv6 addresses to access the instance with + hostv4, hostv6 := getInstanceAccessAddresses(d, networks) + + if server.AccessIPv4 != "" && hostv4 == "" { + hostv4 = server.AccessIPv4 + } + + if server.AccessIPv6 != "" && hostv6 == "" { + hostv6 = server.AccessIPv6 + } + + d.Set("network", networks) + d.Set("access_ip_v4", hostv4) + d.Set("access_ip_v6", hostv6) + + // Determine the best IP address to use for SSH connectivity. + // Prefer IPv4 over IPv6. + preferredSSHAddress := "" + if hostv4 != "" { + preferredSSHAddress = hostv4 + } else if hostv6 != "" { + preferredSSHAddress = hostv6 + } + + if preferredSSHAddress != "" { + // Initialize the connection info + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": preferredSSHAddress, + }) + } + + d.Set("all_metadata", server.Metadata) + + secGrpNames := []string{} + for _, sg := range server.SecurityGroups { + secGrpNames = append(secGrpNames, sg["name"].(string)) + } + d.Set("security_groups", secGrpNames) + + flavorId, ok := server.Flavor["id"].(string) + if !ok { + return fmt.Errorf("Error setting OpenStack server's flavor: %v", server.Flavor) + } + d.Set("flavor_id", flavorId) + + flavor, err := flavors.Get(computeClient, flavorId).Extract() + if err != nil { + return err + } + d.Set("flavor_name", flavor.Name) + + // Set the instance's image information appropriately + if err := setImageInformation(computeClient, server, d); err != nil { + return err + } + + // volume attachments + if err := getVolumeAttachments(computeClient, d); err != nil { + return err + } + + // Build a custom struct for the availability zone extension + var serverWithAZ struct { + servers.Server + availabilityzones.ServerExt + } + + // Do another Get so the above work is not disturbed. + err = servers.Get(computeClient, d.Id()).ExtractInto(&serverWithAZ) + if err != nil { + return CheckDeleted(d, err, "server") + } + + // Set the availability zone + d.Set("availability_zone", serverWithAZ.AvailabilityZone) + + return nil +} + +func resourceComputeInstanceV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + var updateOpts servers.UpdateOpts + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + + if updateOpts != (servers.UpdateOpts{}) { + _, err := servers.Update(computeClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack server: %s", err) + } + } + + if d.HasChange("metadata") { + oldMetadata, newMetadata := d.GetChange("metadata") + var metadataToDelete []string + + // Determine if any metadata keys were removed from the configuration. + // Then request those keys to be deleted. + for oldKey, _ := range oldMetadata.(map[string]interface{}) { + var found bool + for newKey, _ := range newMetadata.(map[string]interface{}) { + if oldKey == newKey { + found = true + } + } + + if !found { + metadataToDelete = append(metadataToDelete, oldKey) + } + } + + for _, key := range metadataToDelete { + err := servers.DeleteMetadatum(computeClient, d.Id(), key).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting metadata (%s) from server (%s): %s", key, d.Id(), err) + } + } + + // Update existing metadata and add any new metadata. + metadataOpts := make(servers.MetadataOpts) + for k, v := range newMetadata.(map[string]interface{}) { + metadataOpts[k] = v.(string) + } + + _, err := servers.UpdateMetadata(computeClient, d.Id(), metadataOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack server (%s) metadata: %s", d.Id(), err) + } + } + + if d.HasChange("security_groups") { + oldSGRaw, newSGRaw := d.GetChange("security_groups") + oldSGSet := oldSGRaw.(*schema.Set) + newSGSet := newSGRaw.(*schema.Set) + secgroupsToAdd := newSGSet.Difference(oldSGSet) + secgroupsToRemove := oldSGSet.Difference(newSGSet) + + log.Printf("[DEBUG] Security groups to add: %v", secgroupsToAdd) + + log.Printf("[DEBUG] Security groups to remove: %v", secgroupsToRemove) + + for _, g := range secgroupsToRemove.List() { + err := secgroups.RemoveServer(computeClient, d.Id(), g.(string)).ExtractErr() + if err != nil && err.Error() != "EOF" { + if _, ok := err.(gophercloud.ErrDefault404); ok { + continue + } + + return fmt.Errorf("Error removing security group (%s) from OpenStack server (%s): %s", g, d.Id(), err) + } else { + log.Printf("[DEBUG] Removed security group (%s) from instance (%s)", g, d.Id()) + } + } + + for _, g := range secgroupsToAdd.List() { + err := secgroups.AddServer(computeClient, d.Id(), g.(string)).ExtractErr() + if err != nil && err.Error() != "EOF" { + return fmt.Errorf("Error adding security group (%s) to OpenStack server (%s): %s", g, d.Id(), err) + } + log.Printf("[DEBUG] Added security group (%s) to instance (%s)", g, d.Id()) + } + } + + if d.HasChange("admin_pass") { + if newPwd, ok := d.Get("admin_pass").(string); ok { + err := servers.ChangeAdminPassword(computeClient, d.Id(), newPwd).ExtractErr() + if err != nil { + return fmt.Errorf("Error changing admin password of OpenStack server (%s): %s", d.Id(), err) + } + } + } + + if d.HasChange("floating_ip") { + oldFIP, newFIP := d.GetChange("floating_ip") + log.Printf("[DEBUG] Old Floating IP: %v", oldFIP) + log.Printf("[DEBUG] New Floating IP: %v", newFIP) + if oldFIP.(string) != "" { + log.Printf("[DEBUG] Attempting to disassociate %s from %s", oldFIP, d.Id()) + if err := disassociateFloatingIPFromInstance(computeClient, oldFIP.(string), d.Id(), ""); err != nil { + return fmt.Errorf("Error disassociating Floating IP during update: %s", err) + } + } + + if newFIP.(string) != "" { + log.Printf("[DEBUG] Attempting to associate %s to %s", newFIP, d.Id()) + if err := associateFloatingIPToInstance(computeClient, newFIP.(string), d.Id(), ""); err != nil { + return fmt.Errorf("Error associating Floating IP during update: %s", err) + } + } + } + + if d.HasChange("network") { + oldNetworks, newNetworks := d.GetChange("network") + oldNetworkList := oldNetworks.([]interface{}) + newNetworkList := newNetworks.([]interface{}) + for i, oldNet := range oldNetworkList { + var oldFIP, newFIP string + var oldFixedIP, newFixedIP string + + if oldNetRaw, ok := oldNet.(map[string]interface{}); ok { + oldFIP = oldNetRaw["floating_ip"].(string) + oldFixedIP = oldNetRaw["fixed_ip_v4"].(string) + } + + if len(newNetworkList) > i { + if newNetRaw, ok := newNetworkList[i].(map[string]interface{}); ok { + newFIP = newNetRaw["floating_ip"].(string) + newFixedIP = newNetRaw["fixed_ip_v4"].(string) + } + } + + // Only changes to the floating IP are supported + if oldFIP != "" && oldFIP != newFIP { + log.Printf("[DEBUG] Attempting to disassociate %s from %s", oldFIP, d.Id()) + if err := disassociateFloatingIPFromInstance(computeClient, oldFIP, d.Id(), oldFixedIP); err != nil { + return fmt.Errorf("Error disassociating Floating IP during update: %s", err) + } + } + + if newFIP != "" && oldFIP != newFIP { + log.Printf("[DEBUG] Attempting to associate %s to %s", newFIP, d.Id()) + if err := associateFloatingIPToInstance(computeClient, newFIP, d.Id(), newFixedIP); err != nil { + return fmt.Errorf("Error associating Floating IP during update: %s", err) + } + } + } + } + + if d.HasChange("volume") { + // old attachments and new attachments + oldAttachments, newAttachments := d.GetChange("volume") + // for each old attachment, detach the volume + oldAttachmentSet := oldAttachments.(*schema.Set).List() + + log.Printf("[DEBUG] Attempting to detach the following volumes: %#v", oldAttachmentSet) + if blockClient, err := config.blockStorageV1Client(GetRegion(d)); err != nil { + return err + } else { + if err := detachVolumesFromInstance(computeClient, blockClient, d.Id(), oldAttachmentSet); err != nil { + return err + } + } + + // for each new attachment, attach the volume + newAttachmentSet := newAttachments.(*schema.Set).List() + if blockClient, err := config.blockStorageV1Client(GetRegion(d)); err != nil { + return err + } else { + if err := attachVolumesToInstance(computeClient, blockClient, d.Id(), newAttachmentSet); err != nil { + return err + } + } + + d.SetPartial("volume") + } + + if d.HasChange("flavor_id") || d.HasChange("flavor_name") { + var newFlavorId string + var err error + if d.HasChange("flavor_id") { + newFlavorId = d.Get("flavor_id").(string) + } else { + newFlavorName := d.Get("flavor_name").(string) + newFlavorId, err = flavors.IDFromName(computeClient, newFlavorName) + if err != nil { + return err + } + } + + resizeOpts := &servers.ResizeOpts{ + FlavorRef: newFlavorId, + } + log.Printf("[DEBUG] Resize configuration: %#v", resizeOpts) + err = servers.Resize(computeClient, d.Id(), resizeOpts).ExtractErr() + if err != nil { + return fmt.Errorf("Error resizing OpenStack server: %s", err) + } + + // Wait for the instance to finish resizing. + log.Printf("[DEBUG] Waiting for instance (%s) to finish resizing", d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"RESIZE"}, + Target: []string{"VERIFY_RESIZE"}, + Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for instance (%s) to resize: %s", d.Id(), err) + } + + // Confirm resize. + log.Printf("[DEBUG] Confirming resize") + err = servers.ConfirmResize(computeClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error confirming resize of OpenStack server: %s", err) + } + + stateConf = &resource.StateChangeConf{ + Pending: []string{"VERIFY_RESIZE"}, + Target: []string{"ACTIVE"}, + Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for instance (%s) to confirm resize: %s", d.Id(), err) + } + } + + return resourceComputeInstanceV2Read(d, meta) +} + +func resourceComputeInstanceV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + // Make sure all volumes are detached before deleting + volumes := d.Get("volume") + if volumeSet, ok := volumes.(*schema.Set); ok { + volumeList := volumeSet.List() + if len(volumeList) > 0 { + log.Printf("[DEBUG] Attempting to detach the following volumes: %#v", volumeList) + if blockClient, err := config.blockStorageV1Client(GetRegion(d)); err != nil { + return err + } else { + if err := detachVolumesFromInstance(computeClient, blockClient, d.Id(), volumeList); err != nil { + return err + } + } + } + } + + if d.Get("stop_before_destroy").(bool) { + err = startstop.Stop(computeClient, d.Id()).ExtractErr() + if err != nil { + log.Printf("[WARN] Error stopping OpenStack instance: %s", err) + } else { + stopStateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"SHUTOFF"}, + Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), + Timeout: 3 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + log.Printf("[DEBUG] Waiting for instance (%s) to stop", d.Id()) + _, err = stopStateConf.WaitForState() + if err != nil { + log.Printf("[WARN] Error waiting for instance (%s) to stop: %s, proceeding to delete", d.Id(), err) + } + } + } + + if d.Get("force_delete").(bool) { + log.Printf("[DEBUG] Force deleting OpenStack Instance %s", d.Id()) + err = servers.ForceDelete(computeClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack server: %s", err) + } + } else { + log.Printf("[DEBUG] Deleting OpenStack Instance %s", d.Id()) + err = servers.Delete(computeClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack server: %s", err) + } + } + + // Wait for the instance to delete before moving on. + log.Printf("[DEBUG] Waiting for instance (%s) to delete", d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE", "SHUTOFF"}, + Target: []string{"DELETED", "SOFT_DELETED"}, + Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for instance (%s) to delete: %s", + d.Id(), err) + } + + d.SetId("") + return nil +} + +// ServerV2StateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// an OpenStack instance. +func ServerV2StateRefreshFunc(client *gophercloud.ServiceClient, instanceID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + s, err := servers.Get(client, instanceID).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return s, "DELETED", nil + } + return nil, "", err + } + + return s, s.Status, nil + } +} + +func resourceInstanceSecGroupsV2(d *schema.ResourceData) []string { + rawSecGroups := d.Get("security_groups").(*schema.Set).List() + secgroups := make([]string, len(rawSecGroups)) + for i, raw := range rawSecGroups { + secgroups[i] = raw.(string) + } + return secgroups +} + +// getInstanceNetworks collects instance network information from different sources +// and aggregates it all together. +func getInstanceNetworksAndAddresses(computeClient *gophercloud.ServiceClient, d *schema.ResourceData) ([]map[string]interface{}, error) { + server, err := servers.Get(computeClient, d.Id()).Extract() + + if err != nil { + return nil, CheckDeleted(d, err, "server") + } + + networkDetails, err := getInstanceNetworks(computeClient, d) + addresses := getInstanceAddresses(server.Addresses) + if err != nil { + return nil, err + } + + // if there are no networkDetails, make networks at least a length of 1 + networkLength := 1 + if len(networkDetails) > 0 { + networkLength = len(networkDetails) + } + networks := make([]map[string]interface{}, networkLength) + + // Loop through all networks and addresses, + // merge relevant address details. + if len(networkDetails) == 0 { + for netName, n := range addresses { + networks[0] = map[string]interface{}{ + "name": netName, + "fixed_ip_v4": n["fixed_ip_v4"], + "fixed_ip_v6": n["fixed_ip_v6"], + "floating_ip": n["floating_ip"], + "mac": n["mac"], + } + } + } else { + for i, net := range networkDetails { + n := addresses[net["name"].(string)] + + networks[i] = map[string]interface{}{ + "uuid": networkDetails[i]["uuid"], + "name": networkDetails[i]["name"], + "port": networkDetails[i]["port"], + "fixed_ip_v4": n["fixed_ip_v4"], + "fixed_ip_v6": n["fixed_ip_v6"], + "floating_ip": n["floating_ip"], + "mac": n["mac"], + "access_network": networkDetails[i]["access_network"], + } + } + } + + log.Printf("[DEBUG] networks: %+v", networks) + + return networks, nil +} + +func getInstanceNetworks(computeClient *gophercloud.ServiceClient, d *schema.ResourceData) ([]map[string]interface{}, error) { + rawNetworks := d.Get("network").([]interface{}) + newNetworks := make([]map[string]interface{}, 0, len(rawNetworks)) + var tenantnet tenantnetworks.Network + + tenantNetworkExt := true + for _, raw := range rawNetworks { + // Not sure what causes this, but it is a possibility (see GH-2323). + // Since we call this function to reconcile what we'll save in the + // state anyways, we just ignore it. + if raw == nil { + continue + } + + rawMap := raw.(map[string]interface{}) + + // Both a floating IP and a port cannot be specified + if fip, ok := rawMap["floating_ip"].(string); ok { + if port, ok := rawMap["port"].(string); ok { + if fip != "" && port != "" { + return nil, fmt.Errorf("Only one of a floating IP or port may be specified per network.") + } + } + } + + allPages, err := tenantnetworks.List(computeClient).AllPages() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] os-tenant-networks disabled") + tenantNetworkExt = false + } + + log.Printf("[DEBUG] Err looks like: %+v", err) + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 403 { + log.Printf("[DEBUG] os-tenant-networks disabled.") + tenantNetworkExt = false + } else { + log.Printf("[DEBUG] unexpected os-tenant-networks error: %s", err) + tenantNetworkExt = false + } + } + } + + // In some cases, a call to os-tenant-networks might work, + // but the response is invalid. Catch this during extraction. + networkList := []tenantnetworks.Network{} + if tenantNetworkExt { + networkList, err = tenantnetworks.ExtractNetworks(allPages) + if err != nil { + log.Printf("[DEBUG] error extracting os-tenant-networks results: %s", err) + tenantNetworkExt = false + } + } + + networkID := "" + networkName := "" + if tenantNetworkExt { + for _, network := range networkList { + if network.Name == rawMap["name"] { + tenantnet = network + } + if network.ID == rawMap["uuid"] { + tenantnet = network + } + } + + networkID = tenantnet.ID + networkName = tenantnet.Name + } else { + networkID = rawMap["uuid"].(string) + networkName = rawMap["name"].(string) + } + + newNetworks = append(newNetworks, map[string]interface{}{ + "uuid": networkID, + "name": networkName, + "port": rawMap["port"].(string), + "fixed_ip_v4": rawMap["fixed_ip_v4"].(string), + "access_network": rawMap["access_network"].(bool), + }) + } + + log.Printf("[DEBUG] networks: %+v", newNetworks) + return newNetworks, nil +} + +func getInstanceAddresses(addresses map[string]interface{}) map[string]map[string]interface{} { + addrs := make(map[string]map[string]interface{}) + for n, networkAddresses := range addresses { + addrs[n] = make(map[string]interface{}) + for _, element := range networkAddresses.([]interface{}) { + address := element.(map[string]interface{}) + if address["OS-EXT-IPS:type"] == "floating" { + addrs[n]["floating_ip"] = address["addr"] + } else { + if address["version"].(float64) == 4 { + addrs[n]["fixed_ip_v4"] = address["addr"].(string) + } else { + addrs[n]["fixed_ip_v6"] = fmt.Sprintf("[%s]", address["addr"].(string)) + } + } + if mac, ok := address["OS-EXT-IPS-MAC:mac_addr"]; ok { + addrs[n]["mac"] = mac.(string) + } + } + } + + log.Printf("[DEBUG] Addresses: %+v", addresses) + + return addrs +} + +func getInstanceAccessAddresses(d *schema.ResourceData, networks []map[string]interface{}) (string, string) { + var hostv4, hostv6 string + + // Start with a global floating IP + floatingIP := d.Get("floating_ip").(string) + if floatingIP != "" { + hostv4 = floatingIP + } + + // Loop through all networks + // If the network has a valid floating, fixed v4, or fixed v6 address + // and hostv4 or hostv6 is not set, set hostv4/hostv6. + // If the network is an "access_network" overwrite hostv4/hostv6. + for _, n := range networks { + var accessNetwork bool + + if an, ok := n["access_network"].(bool); ok && an { + accessNetwork = true + } + + if fixedIPv4, ok := n["fixed_ip_v4"].(string); ok && fixedIPv4 != "" { + if hostv4 == "" || accessNetwork { + hostv4 = fixedIPv4 + } + } + + if floatingIP, ok := n["floating_ip"].(string); ok && floatingIP != "" { + if hostv4 == "" || accessNetwork { + hostv4 = floatingIP + } + } + + if fixedIPv6, ok := n["fixed_ip_v6"].(string); ok && fixedIPv6 != "" { + if hostv6 == "" || accessNetwork { + hostv6 = fixedIPv6 + } + } + } + + log.Printf("[DEBUG] OpenStack Instance Network Access Addresses: %s, %s", hostv4, hostv6) + + return hostv4, hostv6 +} + +func checkInstanceFloatingIPs(d *schema.ResourceData) error { + rawNetworks := d.Get("network").([]interface{}) + floatingIP := d.Get("floating_ip").(string) + + for _, raw := range rawNetworks { + if raw == nil { + continue + } + + rawMap := raw.(map[string]interface{}) + + // Error if a floating IP was specified both globally and in the network block. + if floatingIP != "" && rawMap["floating_ip"] != "" { + return fmt.Errorf("Cannot specify a floating IP both globally and in a network block.") + } + } + return nil +} + +func associateFloatingIPsToInstance(computeClient *gophercloud.ServiceClient, d *schema.ResourceData) error { + floatingIP := d.Get("floating_ip").(string) + rawNetworks := d.Get("network").([]interface{}) + instanceID := d.Id() + + if floatingIP != "" { + if err := associateFloatingIPToInstance(computeClient, floatingIP, instanceID, ""); err != nil { + return err + } + } else { + for _, raw := range rawNetworks { + if raw == nil { + continue + } + + rawMap := raw.(map[string]interface{}) + if rawMap["floating_ip"].(string) != "" { + floatingIP := rawMap["floating_ip"].(string) + fixedIP := rawMap["fixed_ip_v4"].(string) + if err := associateFloatingIPToInstance(computeClient, floatingIP, instanceID, fixedIP); err != nil { + return err + } + } + } + } + return nil +} + +func associateFloatingIPToInstance(computeClient *gophercloud.ServiceClient, floatingIP string, instanceID string, fixedIP string) error { + associateOpts := floatingips.AssociateOpts{ + FloatingIP: floatingIP, + FixedIP: fixedIP, + } + + if err := floatingips.AssociateInstance(computeClient, instanceID, associateOpts).ExtractErr(); err != nil { + return fmt.Errorf("Error associating floating IP: %s", err) + } + + return nil +} + +func disassociateFloatingIPFromInstance(computeClient *gophercloud.ServiceClient, floatingIP string, instanceID string, fixedIP string) error { + disassociateOpts := floatingips.DisassociateOpts{ + FloatingIP: floatingIP, + } + + if err := floatingips.DisassociateInstance(computeClient, instanceID, disassociateOpts).ExtractErr(); err != nil { + return fmt.Errorf("Error disassociating floating IP: %s", err) + } + + return nil +} + +func resourceInstanceMetadataV2(d *schema.ResourceData) map[string]string { + m := make(map[string]string) + for key, val := range d.Get("metadata").(map[string]interface{}) { + m[key] = val.(string) + } + return m +} + +func resourceInstanceBlockDevicesV2(d *schema.ResourceData, bds []interface{}) ([]bootfromvolume.BlockDevice, error) { + blockDeviceOpts := make([]bootfromvolume.BlockDevice, len(bds)) + for i, bd := range bds { + bdM := bd.(map[string]interface{}) + blockDeviceOpts[i] = bootfromvolume.BlockDevice{ + UUID: bdM["uuid"].(string), + VolumeSize: bdM["volume_size"].(int), + BootIndex: bdM["boot_index"].(int), + DeleteOnTermination: bdM["delete_on_termination"].(bool), + GuestFormat: bdM["guest_format"].(string), + } + + sourceType := bdM["source_type"].(string) + switch sourceType { + case "blank": + blockDeviceOpts[i].SourceType = bootfromvolume.SourceBlank + case "image": + blockDeviceOpts[i].SourceType = bootfromvolume.SourceImage + case "snapshot": + blockDeviceOpts[i].SourceType = bootfromvolume.SourceSnapshot + case "volume": + blockDeviceOpts[i].SourceType = bootfromvolume.SourceVolume + default: + return blockDeviceOpts, fmt.Errorf("unknown block device source type %s", sourceType) + } + + destinationType := bdM["destination_type"].(string) + switch destinationType { + case "local": + blockDeviceOpts[i].DestinationType = bootfromvolume.DestinationLocal + case "volume": + blockDeviceOpts[i].DestinationType = bootfromvolume.DestinationVolume + default: + return blockDeviceOpts, fmt.Errorf("unknown block device destination type %s", destinationType) + } + } + + log.Printf("[DEBUG] Block Device Options: %+v", blockDeviceOpts) + return blockDeviceOpts, nil +} + +func resourceInstanceSchedulerHintsV2(d *schema.ResourceData, schedulerHintsRaw map[string]interface{}) schedulerhints.SchedulerHints { + differentHost := []string{} + if len(schedulerHintsRaw["different_host"].([]interface{})) > 0 { + for _, dh := range schedulerHintsRaw["different_host"].([]interface{}) { + differentHost = append(differentHost, dh.(string)) + } + } + + sameHost := []string{} + if len(schedulerHintsRaw["same_host"].([]interface{})) > 0 { + for _, sh := range schedulerHintsRaw["same_host"].([]interface{}) { + sameHost = append(sameHost, sh.(string)) + } + } + + query := make([]interface{}, len(schedulerHintsRaw["query"].([]interface{}))) + if len(schedulerHintsRaw["query"].([]interface{})) > 0 { + for _, q := range schedulerHintsRaw["query"].([]interface{}) { + query = append(query, q.(string)) + } + } + + schedulerHints := schedulerhints.SchedulerHints{ + Group: schedulerHintsRaw["group"].(string), + DifferentHost: differentHost, + SameHost: sameHost, + Query: query, + TargetCell: schedulerHintsRaw["target_cell"].(string), + BuildNearHostIP: schedulerHintsRaw["build_near_host_ip"].(string), + } + + return schedulerHints +} + +func getImageIDFromConfig(computeClient *gophercloud.ServiceClient, d *schema.ResourceData) (string, error) { + // If block_device was used, an Image does not need to be specified, unless an image/local + // combination was used. This emulates normal boot behavior. Otherwise, ignore the image altogether. + if vL, ok := d.GetOk("block_device"); ok { + needImage := false + for _, v := range vL.([]interface{}) { + vM := v.(map[string]interface{}) + if vM["source_type"] == "image" && vM["destination_type"] == "local" { + needImage = true + } + } + if !needImage { + return "", nil + } + } + + if imageId := d.Get("image_id").(string); imageId != "" { + return imageId, nil + } else { + // try the OS_IMAGE_ID environment variable + if v := os.Getenv("OS_IMAGE_ID"); v != "" { + return v, nil + } + } + + imageName := d.Get("image_name").(string) + if imageName == "" { + // try the OS_IMAGE_NAME environment variable + if v := os.Getenv("OS_IMAGE_NAME"); v != "" { + imageName = v + } + } + + if imageName != "" { + imageId, err := images.IDFromName(computeClient, imageName) + if err != nil { + return "", err + } + return imageId, nil + } + + return "", fmt.Errorf("Neither a boot device, image ID, or image name were able to be determined.") +} + +func setImageInformation(computeClient *gophercloud.ServiceClient, server *servers.Server, d *schema.ResourceData) error { + // If block_device was used, an Image does not need to be specified, unless an image/local + // combination was used. This emulates normal boot behavior. Otherwise, ignore the image altogether. + if vL, ok := d.GetOk("block_device"); ok { + needImage := false + for _, v := range vL.([]interface{}) { + vM := v.(map[string]interface{}) + if vM["source_type"] == "image" && vM["destination_type"] == "local" { + needImage = true + } + } + if !needImage { + d.Set("image_id", "Attempt to boot from volume - no image supplied") + return nil + } + } + + imageId := server.Image["id"].(string) + if imageId != "" { + d.Set("image_id", imageId) + if image, err := images.Get(computeClient, imageId).Extract(); err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + // If the image name can't be found, set the value to "Image not found". + // The most likely scenario is that the image no longer exists in the Image Service + // but the instance still has a record from when it existed. + d.Set("image_name", "Image not found") + return nil + } + return err + } else { + d.Set("image_name", image.Name) + } + } + + return nil +} + +func getFlavorID(client *gophercloud.ServiceClient, d *schema.ResourceData) (string, error) { + flavorId := d.Get("flavor_id").(string) + + if flavorId != "" { + return flavorId, nil + } + + flavorName := d.Get("flavor_name").(string) + return flavors.IDFromName(client, flavorName) +} + +func resourceComputeVolumeAttachmentHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["volume_id"].(string))) + + return hashcode.String(buf.String()) +} + +func resourceComputeSchedulerHintsHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + if m["group"] != nil { + buf.WriteString(fmt.Sprintf("%s-", m["group"].(string))) + } + + if m["target_cell"] != nil { + buf.WriteString(fmt.Sprintf("%s-", m["target_cell"].(string))) + } + + if m["build_host_near_ip"] != nil { + buf.WriteString(fmt.Sprintf("%s-", m["build_host_near_ip"].(string))) + } + + buf.WriteString(fmt.Sprintf("%s-", m["different_host"].([]interface{}))) + buf.WriteString(fmt.Sprintf("%s-", m["same_host"].([]interface{}))) + buf.WriteString(fmt.Sprintf("%s-", m["query"].([]interface{}))) + + return hashcode.String(buf.String()) +} + +func attachVolumesToInstance(computeClient *gophercloud.ServiceClient, blockClient *gophercloud.ServiceClient, serverId string, vols []interface{}) error { + for _, v := range vols { + va := v.(map[string]interface{}) + volumeId := va["volume_id"].(string) + device := va["device"].(string) + + s := "" + if serverId != "" { + s = serverId + } else if va["server_id"] != "" { + s = va["server_id"].(string) + } else { + return fmt.Errorf("Unable to determine server ID to attach volume.") + } + + vaOpts := &volumeattach.CreateOpts{ + Device: device, + VolumeID: volumeId, + } + + if _, err := volumeattach.Create(computeClient, s, vaOpts).Extract(); err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"attaching", "available"}, + Target: []string{"in-use"}, + Refresh: VolumeV1StateRefreshFunc(blockClient, va["volume_id"].(string)), + Timeout: 30 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 2 * time.Second, + } + + if _, err := stateConf.WaitForState(); err != nil { + return err + } + + log.Printf("[INFO] Attached volume %s to instance %s", volumeId, serverId) + } + return nil +} + +func detachVolumesFromInstance(computeClient *gophercloud.ServiceClient, blockClient *gophercloud.ServiceClient, serverId string, vols []interface{}) error { + for _, v := range vols { + va := v.(map[string]interface{}) + aId := va["id"].(string) + + log.Printf("[INFO] Attempting to detach volume %s", va["volume_id"]) + if err := volumeattach.Delete(computeClient, serverId, aId).ExtractErr(); err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"detaching", "in-use"}, + Target: []string{"available"}, + Refresh: VolumeV1StateRefreshFunc(blockClient, va["volume_id"].(string)), + Timeout: 30 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 2 * time.Second, + } + + if _, err := stateConf.WaitForState(); err != nil { + return err + } + log.Printf("[INFO] Detached volume %s from instance %s", va["volume_id"], serverId) + } + + return nil +} + +func getVolumeAttachments(computeClient *gophercloud.ServiceClient, d *schema.ResourceData) error { + var vols []map[string]interface{} + + allPages, err := volumeattach.List(computeClient, d.Id()).AllPages() + if err != nil { + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 403 { + log.Printf("[DEBUG] os-volume_attachments disabled.") + return nil + } else { + return err + } + } + } + + allVolumeAttachments, err := volumeattach.ExtractVolumeAttachments(allPages) + if err != nil { + return err + } + + if v, ok := d.GetOk("volume"); ok { + volumes := v.(*schema.Set).List() + for _, volume := range volumes { + if volumeMap, ok := volume.(map[string]interface{}); ok { + if v, ok := volumeMap["volume_id"].(string); ok { + for _, volumeAttachment := range allVolumeAttachments { + if v == volumeAttachment.ID { + vol := make(map[string]interface{}) + vol["id"] = volumeAttachment.ID + vol["volume_id"] = volumeAttachment.VolumeID + vol["device"] = volumeAttachment.Device + vols = append(vols, vol) + } + } + } + } + } + } + + log.Printf("[INFO] Volume attachments: %v", vols) + d.Set("volume", vols) + + return nil +} + +func checkBlockDeviceConfig(d *schema.ResourceData) error { + if vL, ok := d.GetOk("block_device"); ok { + for _, v := range vL.([]interface{}) { + vM := v.(map[string]interface{}) + + if vM["source_type"] != "blank" && vM["uuid"] == "" { + return fmt.Errorf("You must specify a uuid for %s block device types", vM["source_type"]) + } + + if vM["source_type"] == "image" && vM["destination_type"] == "volume" { + if vM["volume_size"] == 0 { + return fmt.Errorf("You must specify a volume_size when creating a volume from an image") + } + } + + if vM["source_type"] == "blank" && vM["destination_type"] == "local" { + if vM["volume_size"] == 0 { + return fmt.Errorf("You must specify a volume_size when creating a blank block device") + } + } + } + } + + return nil +} + +func resourceComputeInstancePersonalityHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["file"].(string))) + + return hashcode.String(buf.String()) +} + +func resourceInstancePersonalityV2(d *schema.ResourceData) servers.Personality { + var personalities servers.Personality + + if v := d.Get("personality"); v != nil { + personalityList := v.(*schema.Set).List() + if len(personalityList) > 0 { + for _, p := range personalityList { + rawPersonality := p.(map[string]interface{}) + file := servers.File{ + Path: rawPersonality["file"].(string), + Contents: []byte(rawPersonality["content"].(string)), + } + + log.Printf("[DEBUG] OpenStack Compute Instance Personality: %+v", file) + + personalities = append(personalities, &file) + } + } + } + + return personalities +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_keypair_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_keypair_v2.go new file mode 100644 index 000000000..7537d3bda --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_keypair_v2.go @@ -0,0 +1,104 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeKeypairV2() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeKeypairV2Create, + Read: resourceComputeKeypairV2Read, + Delete: resourceComputeKeypairV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "public_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeKeypairV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + createOpts := KeyPairCreateOpts{ + keypairs.CreateOpts{ + Name: d.Get("name").(string), + PublicKey: d.Get("public_key").(string), + }, + MapValueSpecs(d), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + kp, err := keypairs.Create(computeClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack keypair: %s", err) + } + + d.SetId(kp.Name) + + return resourceComputeKeypairV2Read(d, meta) +} + +func resourceComputeKeypairV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + kp, err := keypairs.Get(computeClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "keypair") + } + + d.Set("name", kp.Name) + d.Set("public_key", kp.PublicKey) + d.Set("region", GetRegion(d)) + + return nil +} + +func resourceComputeKeypairV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + err = keypairs.Delete(computeClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack keypair: %s", err) + } + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_secgroup_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_secgroup_v2.go new file mode 100644 index 000000000..99887a2da --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_secgroup_v2.go @@ -0,0 +1,397 @@ +package openstack + +import ( + "bytes" + "fmt" + "log" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeSecGroupV2() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeSecGroupV2Create, + Read: resourceComputeSecGroupV2Read, + Update: resourceComputeSecGroupV2Update, + Delete: resourceComputeSecGroupV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "rule": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "from_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: false, + }, + "to_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: false, + }, + "ip_protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "cidr": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + StateFunc: func(v interface{}) string { + return strings.ToLower(v.(string)) + }, + }, + "from_group_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "self": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: false, + }, + }, + }, + Set: secgroupRuleV2Hash, + }, + }, + } +} + +func resourceComputeSecGroupV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + // Before creating the security group, make sure all rules are valid. + if err := checkSecGroupV2RulesForErrors(d); err != nil { + return err + } + + // If all rules are valid, proceed with creating the security gruop. + createOpts := secgroups.CreateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + sg, err := secgroups.Create(computeClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack security group: %s", err) + } + + d.SetId(sg.ID) + + // Now that the security group has been created, iterate through each rule and create it + createRuleOptsList := resourceSecGroupRulesV2(d) + for _, createRuleOpts := range createRuleOptsList { + _, err := secgroups.CreateRule(computeClient, createRuleOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack security group rule: %s", err) + } + } + + return resourceComputeSecGroupV2Read(d, meta) +} + +func resourceComputeSecGroupV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + sg, err := secgroups.Get(computeClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "security group") + } + + d.Set("name", sg.Name) + d.Set("description", sg.Description) + + rtm, err := rulesToMap(computeClient, d, sg.Rules) + if err != nil { + return err + } + log.Printf("[DEBUG] rulesToMap(sg.Rules): %+v", rtm) + d.Set("rule", rtm) + + d.Set("region", GetRegion(d)) + + return nil +} + +func resourceComputeSecGroupV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + updateOpts := secgroups.UpdateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + } + + log.Printf("[DEBUG] Updating Security Group (%s) with options: %+v", d.Id(), updateOpts) + + _, err = secgroups.Update(computeClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack security group (%s): %s", d.Id(), err) + } + + if d.HasChange("rule") { + oldSGRaw, newSGRaw := d.GetChange("rule") + oldSGRSet, newSGRSet := oldSGRaw.(*schema.Set), newSGRaw.(*schema.Set) + secgrouprulesToAdd := newSGRSet.Difference(oldSGRSet) + secgrouprulesToRemove := oldSGRSet.Difference(newSGRSet) + + log.Printf("[DEBUG] Security group rules to add: %v", secgrouprulesToAdd) + log.Printf("[DEBUG] Security groups rules to remove: %v", secgrouprulesToRemove) + + for _, rawRule := range secgrouprulesToAdd.List() { + createRuleOpts := resourceSecGroupRuleCreateOptsV2(d, rawRule) + rule, err := secgroups.CreateRule(computeClient, createRuleOpts).Extract() + if err != nil { + return fmt.Errorf("Error adding rule to OpenStack security group (%s): %s", d.Id(), err) + } + log.Printf("[DEBUG] Added rule (%s) to OpenStack security group (%s) ", rule.ID, d.Id()) + } + + for _, r := range secgrouprulesToRemove.List() { + rule := resourceSecGroupRuleV2(d, r) + err := secgroups.DeleteRule(computeClient, rule.ID).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + continue + } + + return fmt.Errorf("Error removing rule (%s) from OpenStack security group (%s)", rule.ID, d.Id()) + } else { + log.Printf("[DEBUG] Removed rule (%s) from OpenStack security group (%s): %s", rule.ID, d.Id(), err) + } + } + } + + return resourceComputeSecGroupV2Read(d, meta) +} + +func resourceComputeSecGroupV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: SecGroupV2StateRefreshFunc(computeClient, d), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack security group: %s", err) + } + + d.SetId("") + return nil +} + +func resourceSecGroupRulesV2(d *schema.ResourceData) []secgroups.CreateRuleOpts { + rawRules := d.Get("rule").(*schema.Set).List() + createRuleOptsList := make([]secgroups.CreateRuleOpts, len(rawRules)) + for i, rawRule := range rawRules { + createRuleOptsList[i] = resourceSecGroupRuleCreateOptsV2(d, rawRule) + } + return createRuleOptsList +} + +func resourceSecGroupRuleCreateOptsV2(d *schema.ResourceData, rawRule interface{}) secgroups.CreateRuleOpts { + rawRuleMap := rawRule.(map[string]interface{}) + groupId := rawRuleMap["from_group_id"].(string) + if rawRuleMap["self"].(bool) { + groupId = d.Id() + } + return secgroups.CreateRuleOpts{ + ParentGroupID: d.Id(), + FromPort: rawRuleMap["from_port"].(int), + ToPort: rawRuleMap["to_port"].(int), + IPProtocol: rawRuleMap["ip_protocol"].(string), + CIDR: rawRuleMap["cidr"].(string), + FromGroupID: groupId, + } +} + +func checkSecGroupV2RulesForErrors(d *schema.ResourceData) error { + rawRules := d.Get("rule").(*schema.Set).List() + for _, rawRule := range rawRules { + rawRuleMap := rawRule.(map[string]interface{}) + + // only one of cidr, from_group_id, or self can be set + cidr := rawRuleMap["cidr"].(string) + groupId := rawRuleMap["from_group_id"].(string) + self := rawRuleMap["self"].(bool) + errorMessage := fmt.Errorf("Only one of cidr, from_group_id, or self can be set.") + + // if cidr is set, from_group_id and self cannot be set + if cidr != "" { + if groupId != "" || self { + return errorMessage + } + } + + // if from_group_id is set, cidr and self cannot be set + if groupId != "" { + if cidr != "" || self { + return errorMessage + } + } + + // if self is set, cidr and from_group_id cannot be set + if self { + if cidr != "" || groupId != "" { + return errorMessage + } + } + } + + return nil +} + +func resourceSecGroupRuleV2(d *schema.ResourceData, rawRule interface{}) secgroups.Rule { + rawRuleMap := rawRule.(map[string]interface{}) + return secgroups.Rule{ + ID: rawRuleMap["id"].(string), + ParentGroupID: d.Id(), + FromPort: rawRuleMap["from_port"].(int), + ToPort: rawRuleMap["to_port"].(int), + IPProtocol: rawRuleMap["ip_protocol"].(string), + IPRange: secgroups.IPRange{CIDR: rawRuleMap["cidr"].(string)}, + } +} + +func rulesToMap(computeClient *gophercloud.ServiceClient, d *schema.ResourceData, sgrs []secgroups.Rule) ([]map[string]interface{}, error) { + sgrMap := make([]map[string]interface{}, len(sgrs)) + for i, sgr := range sgrs { + groupId := "" + self := false + if sgr.Group.Name != "" { + if sgr.Group.Name == d.Get("name").(string) { + self = true + } else { + // Since Nova only returns the secgroup Name (and not the ID) for the group attribute, + // we need to look up all security groups and match the name. + // Nevermind that Nova wants the ID when setting the Group *and* that multiple groups + // with the same name can exist... + allPages, err := secgroups.List(computeClient).AllPages() + if err != nil { + return nil, err + } + securityGroups, err := secgroups.ExtractSecurityGroups(allPages) + if err != nil { + return nil, err + } + + for _, sg := range securityGroups { + if sg.Name == sgr.Group.Name { + groupId = sg.ID + } + } + } + } + + sgrMap[i] = map[string]interface{}{ + "id": sgr.ID, + "from_port": sgr.FromPort, + "to_port": sgr.ToPort, + "ip_protocol": sgr.IPProtocol, + "cidr": sgr.IPRange.CIDR, + "self": self, + "from_group_id": groupId, + } + } + return sgrMap, nil +} + +func secgroupRuleV2Hash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%d-", m["from_port"].(int))) + buf.WriteString(fmt.Sprintf("%d-", m["to_port"].(int))) + buf.WriteString(fmt.Sprintf("%s-", m["ip_protocol"].(string))) + buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["cidr"].(string)))) + buf.WriteString(fmt.Sprintf("%s-", m["from_group_id"].(string))) + buf.WriteString(fmt.Sprintf("%t-", m["self"].(bool))) + + return hashcode.String(buf.String()) +} + +func SecGroupV2StateRefreshFunc(computeClient *gophercloud.ServiceClient, d *schema.ResourceData) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete Security Group %s.\n", d.Id()) + + err := secgroups.Delete(computeClient, d.Id()).ExtractErr() + if err != nil { + return nil, "", err + } + + s, err := secgroups.Get(computeClient, d.Id()).Extract() + if err != nil { + err = CheckDeleted(d, err, "Security Group") + if err != nil { + return s, "", err + } else { + log.Printf("[DEBUG] Successfully deleted Security Group %s", d.Id()) + return s, "DELETED", nil + } + } + + log.Printf("[DEBUG] Security Group %s still active.\n", d.Id()) + return s, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_servergroup_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_servergroup_v2.go new file mode 100644 index 000000000..70b31eec6 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_servergroup_v2.go @@ -0,0 +1,137 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeServerGroupV2() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeServerGroupV2Create, + Read: resourceComputeServerGroupV2Read, + Update: nil, + Delete: resourceComputeServerGroupV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "policies": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "members": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeServerGroupV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + createOpts := ServerGroupCreateOpts{ + servergroups.CreateOpts{ + Name: d.Get("name").(string), + Policies: resourceServerGroupPoliciesV2(d), + }, + MapValueSpecs(d), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + newSG, err := servergroups.Create(computeClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating ServerGroup: %s", err) + } + + d.SetId(newSG.ID) + + return resourceComputeServerGroupV2Read(d, meta) +} + +func resourceComputeServerGroupV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + sg, err := servergroups.Get(computeClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "server group") + } + + log.Printf("[DEBUG] Retrieved ServerGroup %s: %+v", d.Id(), sg) + + // Set the name + d.Set("name", sg.Name) + + // Set the policies + policies := []string{} + for _, p := range sg.Policies { + policies = append(policies, p) + } + d.Set("policies", policies) + + // Set the members + members := []string{} + for _, m := range sg.Members { + members = append(members, m) + } + d.Set("members", members) + + d.Set("region", GetRegion(d)) + + return nil +} + +func resourceComputeServerGroupV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + log.Printf("[DEBUG] Deleting ServerGroup %s", d.Id()) + if err := servergroups.Delete(computeClient, d.Id()).ExtractErr(); err != nil { + return fmt.Errorf("Error deleting ServerGroup: %s", err) + } + + return nil +} + +func resourceServerGroupPoliciesV2(d *schema.ResourceData) []string { + rawPolicies := d.Get("policies").([]interface{}) + policies := make([]string, len(rawPolicies)) + for i, raw := range rawPolicies { + policies[i] = raw.(string) + } + return policies +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_volume_attach_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_volume_attach_v2.go new file mode 100644 index 000000000..4fa6cb812 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_volume_attach_v2.go @@ -0,0 +1,222 @@ +package openstack + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeVolumeAttachV2() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeVolumeAttachV2Create, + Read: resourceComputeVolumeAttachV2Read, + Delete: resourceComputeVolumeAttachV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + + "instance_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "volume_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "device": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + } +} + +func resourceComputeVolumeAttachV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + instanceId := d.Get("instance_id").(string) + volumeId := d.Get("volume_id").(string) + + var device string + if v, ok := d.GetOk("device"); ok { + device = v.(string) + } + + attachOpts := volumeattach.CreateOpts{ + Device: device, + VolumeID: volumeId, + } + + log.Printf("[DEBUG] Creating volume attachment: %#v", attachOpts) + + attachment, err := volumeattach.Create(computeClient, instanceId, attachOpts).Extract() + if err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ATTACHING"}, + Target: []string{"ATTACHED"}, + Refresh: resourceComputeVolumeAttachV2AttachFunc(computeClient, instanceId, attachment.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 30 * time.Second, + MinTimeout: 15 * time.Second, + } + + if _, err = stateConf.WaitForState(); err != nil { + return fmt.Errorf("Error attaching OpenStack volume: %s", err) + } + + log.Printf("[DEBUG] Created volume attachment: %#v", attachment) + + // Use the instance ID and attachment ID as the resource ID. + // This is because an attachment cannot be retrieved just by its ID alone. + id := fmt.Sprintf("%s/%s", instanceId, attachment.ID) + + d.SetId(id) + + return resourceComputeVolumeAttachV2Read(d, meta) +} + +func resourceComputeVolumeAttachV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + instanceId, attachmentId, err := parseComputeVolumeAttachmentId(d.Id()) + if err != nil { + return err + } + + attachment, err := volumeattach.Get(computeClient, instanceId, attachmentId).Extract() + if err != nil { + return CheckDeleted(d, err, "compute_volume_attach") + } + + log.Printf("[DEBUG] Retrieved volume attachment: %#v", attachment) + + d.Set("instance_id", attachment.ServerID) + d.Set("volume_id", attachment.VolumeID) + d.Set("device", attachment.Device) + d.Set("region", GetRegion(d)) + + return nil +} + +func resourceComputeVolumeAttachV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + instanceId, attachmentId, err := parseComputeVolumeAttachmentId(d.Id()) + if err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{""}, + Target: []string{"DETACHED"}, + Refresh: resourceComputeVolumeAttachV2DetachFunc(computeClient, instanceId, attachmentId), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 15 * time.Second, + MinTimeout: 15 * time.Second, + } + + if _, err = stateConf.WaitForState(); err != nil { + return fmt.Errorf("Error detaching OpenStack volume: %s", err) + } + + return nil +} + +func resourceComputeVolumeAttachV2AttachFunc( + computeClient *gophercloud.ServiceClient, instanceId, attachmentId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + va, err := volumeattach.Get(computeClient, instanceId, attachmentId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return va, "ATTACHING", nil + } + return va, "", err + } + + return va, "ATTACHED", nil + } +} + +func resourceComputeVolumeAttachV2DetachFunc( + computeClient *gophercloud.ServiceClient, instanceId, attachmentId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to detach OpenStack volume %s from instance %s", + attachmentId, instanceId) + + va, err := volumeattach.Get(computeClient, instanceId, attachmentId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return va, "DETACHED", nil + } + return va, "", err + } + + err = volumeattach.Delete(computeClient, instanceId, attachmentId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return va, "DETACHED", nil + } + + if _, ok := err.(gophercloud.ErrDefault400); ok { + return nil, "", nil + } + + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack Volume Attachment (%s) is still active.", attachmentId) + return nil, "", nil + } +} + +func parseComputeVolumeAttachmentId(id string) (string, string, error) { + idParts := strings.Split(id, "/") + if len(idParts) < 2 { + return "", "", fmt.Errorf("Unable to determine volume attachment ID") + } + + instanceId := idParts[0] + attachmentId := idParts[1] + + return instanceId, attachmentId, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_recordset_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_recordset_v2.go new file mode 100644 index 000000000..cf911cd4a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_recordset_v2.go @@ -0,0 +1,276 @@ +package openstack + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceDNSRecordSetV2() *schema.Resource { + return &schema.Resource{ + Create: resourceDNSRecordSetV2Create, + Read: resourceDNSRecordSetV2Read, + Update: resourceDNSRecordSetV2Update, + Delete: resourceDNSRecordSetV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "zone_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "records": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "ttl": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: false, + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceDNSRecordSetV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + dnsClient, err := config.dnsV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS client: %s", err) + } + + recordsraw := d.Get("records").([]interface{}) + records := make([]string, len(recordsraw)) + for i, recordraw := range recordsraw { + records[i] = recordraw.(string) + } + + createOpts := RecordSetCreateOpts{ + recordsets.CreateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + Records: records, + TTL: d.Get("ttl").(int), + Type: d.Get("type").(string), + }, + MapValueSpecs(d), + } + + zoneID := d.Get("zone_id").(string) + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + n, err := recordsets.Create(dnsClient, zoneID, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS record set: %s", err) + } + + log.Printf("[DEBUG] Waiting for DNS record set (%s) to become available", n.ID) + stateConf := &resource.StateChangeConf{ + Target: []string{"ACTIVE"}, + Pending: []string{"PENDING"}, + Refresh: waitForDNSRecordSet(dnsClient, zoneID, n.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + id := fmt.Sprintf("%s/%s", zoneID, n.ID) + d.SetId(id) + + log.Printf("[DEBUG] Created OpenStack DNS record set %s: %#v", n.ID, n) + return resourceDNSRecordSetV2Read(d, meta) +} + +func resourceDNSRecordSetV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + dnsClient, err := config.dnsV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS client: %s", err) + } + + // Obtain relevant info from parsing the ID + zoneID, recordsetID, err := parseDNSV2RecordSetID(d.Id()) + if err != nil { + return err + } + + n, err := recordsets.Get(dnsClient, zoneID, recordsetID).Extract() + if err != nil { + return CheckDeleted(d, err, "record_set") + } + + log.Printf("[DEBUG] Retrieved record set %s: %#v", recordsetID, n) + + d.Set("name", n.Name) + d.Set("description", n.Description) + d.Set("ttl", n.TTL) + d.Set("type", n.Type) + d.Set("records", n.Records) + d.Set("region", GetRegion(d)) + d.Set("zone_id", zoneID) + + return nil +} + +func resourceDNSRecordSetV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + dnsClient, err := config.dnsV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS client: %s", err) + } + + var updateOpts recordsets.UpdateOpts + if d.HasChange("ttl") { + updateOpts.TTL = d.Get("ttl").(int) + } + + if d.HasChange("records") { + recordsraw := d.Get("records").([]interface{}) + records := make([]string, len(recordsraw)) + for i, recordraw := range recordsraw { + records[i] = recordraw.(string) + } + updateOpts.Records = records + } + + if d.HasChange("description") { + updateOpts.Description = d.Get("description").(string) + } + + // Obtain relevant info from parsing the ID + zoneID, recordsetID, err := parseDNSV2RecordSetID(d.Id()) + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating record set %s with options: %#v", recordsetID, updateOpts) + + _, err = recordsets.Update(dnsClient, zoneID, recordsetID, updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack DNS record set: %s", err) + } + + log.Printf("[DEBUG] Waiting for DNS record set (%s) to update", recordsetID) + stateConf := &resource.StateChangeConf{ + Target: []string{"ACTIVE"}, + Pending: []string{"PENDING"}, + Refresh: waitForDNSRecordSet(dnsClient, zoneID, recordsetID), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + return resourceDNSRecordSetV2Read(d, meta) +} + +func resourceDNSRecordSetV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + dnsClient, err := config.dnsV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS client: %s", err) + } + + // Obtain relevant info from parsing the ID + zoneID, recordsetID, err := parseDNSV2RecordSetID(d.Id()) + if err != nil { + return err + } + + err = recordsets.Delete(dnsClient, zoneID, recordsetID).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack DNS record set: %s", err) + } + + log.Printf("[DEBUG] Waiting for DNS record set (%s) to be deleted", recordsetID) + stateConf := &resource.StateChangeConf{ + Target: []string{"DELETED"}, + Pending: []string{"ACTIVE", "PENDING"}, + Refresh: waitForDNSRecordSet(dnsClient, zoneID, recordsetID), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + d.SetId("") + return nil +} + +func waitForDNSRecordSet(dnsClient *gophercloud.ServiceClient, zoneID, recordsetId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + recordset, err := recordsets.Get(dnsClient, zoneID, recordsetId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return recordset, "DELETED", nil + } + + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack DNS record set (%s) current status: %s", recordset.ID, recordset.Status) + return recordset, recordset.Status, nil + } +} + +func parseDNSV2RecordSetID(id string) (string, string, error) { + idParts := strings.Split(id, "/") + if len(idParts) != 2 { + return "", "", fmt.Errorf("Unable to determine DNS record set ID from raw ID: %s", id) + } + + zoneID := idParts[0] + recordsetID := idParts[1] + + return zoneID, recordsetID, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_zone_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_zone_v2.go new file mode 100644 index 000000000..2b4b7995b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_zone_v2.go @@ -0,0 +1,276 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/dns/v2/zones" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceDNSZoneV2() *schema.Resource { + return &schema.Resource{ + Create: resourceDNSZoneV2Create, + Read: resourceDNSZoneV2Read, + Update: resourceDNSZoneV2Update, + Delete: resourceDNSZoneV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "email": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: resourceDNSZoneV2ValidType, + }, + "attributes": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + "ttl": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: false, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "masters": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceDNSZoneV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + dnsClient, err := config.dnsV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS client: %s", err) + } + + mastersraw := d.Get("masters").(*schema.Set).List() + masters := make([]string, len(mastersraw)) + for i, masterraw := range mastersraw { + masters[i] = masterraw.(string) + } + + attrsraw := d.Get("attributes").(map[string]interface{}) + attrs := make(map[string]string, len(attrsraw)) + for k, v := range attrsraw { + attrs[k] = v.(string) + } + + createOpts := ZoneCreateOpts{ + zones.CreateOpts{ + Name: d.Get("name").(string), + Type: d.Get("type").(string), + Attributes: attrs, + TTL: d.Get("ttl").(int), + Email: d.Get("email").(string), + Description: d.Get("description").(string), + Masters: masters, + }, + MapValueSpecs(d), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + n, err := zones.Create(dnsClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS zone: %s", err) + } + + log.Printf("[DEBUG] Waiting for DNS Zone (%s) to become available", n.ID) + stateConf := &resource.StateChangeConf{ + Target: []string{"ACTIVE"}, + Pending: []string{"PENDING"}, + Refresh: waitForDNSZone(dnsClient, n.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + d.SetId(n.ID) + + log.Printf("[DEBUG] Created OpenStack DNS Zone %s: %#v", n.ID, n) + return resourceDNSZoneV2Read(d, meta) +} + +func resourceDNSZoneV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + dnsClient, err := config.dnsV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS client: %s", err) + } + + n, err := zones.Get(dnsClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "zone") + } + + log.Printf("[DEBUG] Retrieved Zone %s: %#v", d.Id(), n) + + d.Set("name", n.Name) + d.Set("email", n.Email) + d.Set("description", n.Description) + d.Set("ttl", n.TTL) + d.Set("type", n.Type) + d.Set("attributes", n.Attributes) + d.Set("masters", n.Masters) + d.Set("region", GetRegion(d)) + + return nil +} + +func resourceDNSZoneV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + dnsClient, err := config.dnsV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS client: %s", err) + } + + var updateOpts zones.UpdateOpts + if d.HasChange("email") { + updateOpts.Email = d.Get("email").(string) + } + if d.HasChange("ttl") { + updateOpts.TTL = d.Get("ttl").(int) + } + if d.HasChange("masters") { + mastersraw := d.Get("masters").(*schema.Set).List() + masters := make([]string, len(mastersraw)) + for i, masterraw := range mastersraw { + masters[i] = masterraw.(string) + } + updateOpts.Masters = masters + } + if d.HasChange("description") { + updateOpts.Description = d.Get("description").(string) + } + + log.Printf("[DEBUG] Updating Zone %s with options: %#v", d.Id(), updateOpts) + + _, err = zones.Update(dnsClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack DNS Zone: %s", err) + } + + log.Printf("[DEBUG] Waiting for DNS Zone (%s) to update", d.Id()) + stateConf := &resource.StateChangeConf{ + Target: []string{"ACTIVE"}, + Pending: []string{"PENDING"}, + Refresh: waitForDNSZone(dnsClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + return resourceDNSZoneV2Read(d, meta) +} + +func resourceDNSZoneV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + dnsClient, err := config.dnsV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS client: %s", err) + } + + _, err = zones.Delete(dnsClient, d.Id()).Extract() + if err != nil { + return fmt.Errorf("Error deleting OpenStack DNS Zone: %s", err) + } + + log.Printf("[DEBUG] Waiting for DNS Zone (%s) to become available", d.Id()) + stateConf := &resource.StateChangeConf{ + Target: []string{"DELETED"}, + Pending: []string{"ACTIVE", "PENDING"}, + Refresh: waitForDNSZone(dnsClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + d.SetId("") + return nil +} + +func resourceDNSZoneV2ValidType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + validTypes := []string{ + "PRIMARY", + "SECONDARY", + } + + for _, v := range validTypes { + if value == v { + return + } + } + + err := fmt.Errorf("%s must be one of %s", k, validTypes) + errors = append(errors, err) + return +} + +func waitForDNSZone(dnsClient *gophercloud.ServiceClient, zoneId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + zone, err := zones.Get(dnsClient, zoneId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return zone, "DELETED", nil + } + + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack DNS Zone (%s) current status: %s", zone.ID, zone.Status) + return zone, zone.Status, nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_firewall_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_firewall_v1.go new file mode 100644 index 000000000..66601b998 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_firewall_v1.go @@ -0,0 +1,324 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceFWFirewallV1() *schema.Resource { + return &schema.Resource{ + Create: resourceFWFirewallV1Create, + Read: resourceFWFirewallV1Read, + Update: resourceFWFirewallV1Update, + Delete: resourceFWFirewallV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "policy_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "associated_routers": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + ConflictsWith: []string{"no_routers"}, + }, + "no_routers": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ConflictsWith: []string{"associated_routers"}, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceFWFirewallV1Create(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var createOpts firewalls.CreateOptsBuilder + + adminStateUp := d.Get("admin_state_up").(bool) + createOpts = &firewalls.CreateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + PolicyID: d.Get("policy_id").(string), + AdminStateUp: &adminStateUp, + TenantID: d.Get("tenant_id").(string), + } + + associatedRoutersRaw := d.Get("associated_routers").(*schema.Set).List() + if len(associatedRoutersRaw) > 0 { + log.Printf("[DEBUG] Will attempt to associate Firewall with router(s): %+v", associatedRoutersRaw) + + var routerIds []string + for _, v := range associatedRoutersRaw { + routerIds = append(routerIds, v.(string)) + } + + createOpts = &routerinsertion.CreateOptsExt{ + CreateOptsBuilder: createOpts, + RouterIDs: routerIds, + } + } + + if d.Get("no_routers").(bool) { + routerIds := make([]string, 0) + log.Println("[DEBUG] No routers specified. Setting to empty slice") + createOpts = &routerinsertion.CreateOptsExt{ + CreateOptsBuilder: createOpts, + RouterIDs: routerIds, + } + } + + createOpts = &FirewallCreateOpts{ + createOpts, + MapValueSpecs(d), + } + + log.Printf("[DEBUG] Create firewall: %#v", createOpts) + + firewall, err := firewalls.Create(networkingClient, createOpts).Extract() + if err != nil { + return err + } + + log.Printf("[DEBUG] Firewall created: %#v", firewall) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForFirewallActive(networkingClient, firewall.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 0, + MinTimeout: 2 * time.Second, + } + + _, err = stateConf.WaitForState() + log.Printf("[DEBUG] Firewall (%s) is active.", firewall.ID) + + d.SetId(firewall.ID) + + return resourceFWFirewallV1Read(d, meta) +} + +func resourceFWFirewallV1Read(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Retrieve information about firewall: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var firewall Firewall + err = firewalls.Get(networkingClient, d.Id()).ExtractInto(&firewall) + if err != nil { + return CheckDeleted(d, err, "firewall") + } + + log.Printf("[DEBUG] Read OpenStack Firewall %s: %#v", d.Id(), firewall) + + d.Set("name", firewall.Name) + d.Set("description", firewall.Description) + d.Set("policy_id", firewall.PolicyID) + d.Set("admin_state_up", firewall.AdminStateUp) + d.Set("tenant_id", firewall.TenantID) + d.Set("region", GetRegion(d)) + d.Set("associated_routers", firewall.RouterIDs) + + return nil +} + +func resourceFWFirewallV1Update(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + // PolicyID is required + opts := firewalls.UpdateOpts{ + PolicyID: d.Get("policy_id").(string), + } + + if d.HasChange("name") { + opts.Name = d.Get("name").(string) + } + + if d.HasChange("description") { + opts.Description = d.Get("description").(string) + } + + if d.HasChange("admin_state_up") { + adminStateUp := d.Get("admin_state_up").(bool) + opts.AdminStateUp = &adminStateUp + } + + var updateOpts firewalls.UpdateOptsBuilder + var routerIds []string + if d.HasChange("associated_routers") || d.HasChange("no_routers") { + // 'no_routers' = true means 'associated_routers' will be empty... + if d.Get("no_routers").(bool) { + log.Printf("[DEBUG] 'no_routers' is true.") + routerIds = make([]string, 0) + } else { + associatedRoutersRaw := d.Get("associated_routers").(*schema.Set).List() + for _, v := range associatedRoutersRaw { + routerIds = append(routerIds, v.(string)) + } + } + + updateOpts = routerinsertion.UpdateOptsExt{ + UpdateOptsBuilder: opts, + RouterIDs: routerIds, + } + } else { + updateOpts = opts + } + + log.Printf("[DEBUG] Updating firewall with id %s: %#v", d.Id(), updateOpts) + + err = firewalls.Update(networkingClient, d.Id(), updateOpts).Err + if err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE", "PENDING_UPDATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForFirewallActive(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 0, + MinTimeout: 2 * time.Second, + } + + _, err = stateConf.WaitForState() + + return resourceFWFirewallV1Read(d, meta) +} + +func resourceFWFirewallV1Delete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Destroy firewall: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + // Ensure the firewall was fully created/updated before being deleted. + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE", "PENDING_UPDATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForFirewallActive(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 0, + MinTimeout: 2 * time.Second, + } + + _, err = stateConf.WaitForState() + + err = firewalls.Delete(networkingClient, d.Id()).Err + + if err != nil { + return err + } + + stateConf = &resource.StateChangeConf{ + Pending: []string{"DELETING"}, + Target: []string{"DELETED"}, + Refresh: waitForFirewallDeletion(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 0, + MinTimeout: 2 * time.Second, + } + + _, err = stateConf.WaitForState() + + return err +} + +func waitForFirewallActive(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { + + return func() (interface{}, string, error) { + var fw Firewall + + err := firewalls.Get(networkingClient, id).ExtractInto(&fw) + if err != nil { + return nil, "", err + } + return fw, fw.Status, nil + } +} + +func waitForFirewallDeletion(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { + + return func() (interface{}, string, error) { + fw, err := firewalls.Get(networkingClient, id).Extract() + log.Printf("[DEBUG] Got firewall %s => %#v", id, fw) + + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Firewall %s is actually deleted", id) + return "", "DELETED", nil + } + return nil, "", fmt.Errorf("Unexpected error: %s", err) + } + + log.Printf("[DEBUG] Firewall %s deletion is pending", id) + return fw, "DELETING", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_policy_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_policy_v1.go new file mode 100644 index 000000000..a810e360e --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_policy_v1.go @@ -0,0 +1,231 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceFWPolicyV1() *schema.Resource { + return &schema.Resource{ + Create: resourceFWPolicyV1Create, + Read: resourceFWPolicyV1Read, + Update: resourceFWPolicyV1Update, + Delete: resourceFWPolicyV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "audited": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "shared": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "rules": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceFWPolicyV1Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + v := d.Get("rules").([]interface{}) + + log.Printf("[DEBUG] Rules found : %#v", v) + log.Printf("[DEBUG] Rules count : %d", len(v)) + + rules := make([]string, len(v)) + for i, v := range v { + rules[i] = v.(string) + } + + audited := d.Get("audited").(bool) + + opts := PolicyCreateOpts{ + policies.CreateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + Audited: &audited, + TenantID: d.Get("tenant_id").(string), + Rules: rules, + }, + MapValueSpecs(d), + } + + if r, ok := d.GetOk("shared"); ok { + shared := r.(bool) + opts.Shared = &shared + } + + log.Printf("[DEBUG] Create firewall policy: %#v", opts) + + policy, err := policies.Create(networkingClient, opts).Extract() + if err != nil { + return err + } + + log.Printf("[DEBUG] Firewall policy created: %#v", policy) + + d.SetId(policy.ID) + + return resourceFWPolicyV1Read(d, meta) +} + +func resourceFWPolicyV1Read(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Retrieve information about firewall policy: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + policy, err := policies.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "FW policy") + } + + log.Printf("[DEBUG] Read OpenStack Firewall Policy %s: %#v", d.Id(), policy) + + d.Set("name", policy.Name) + d.Set("description", policy.Description) + d.Set("shared", policy.Shared) + d.Set("audited", policy.Audited) + d.Set("tenant_id", policy.TenantID) + d.Set("rules", policy.Rules) + d.Set("region", GetRegion(d)) + + return nil +} + +func resourceFWPolicyV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + opts := policies.UpdateOpts{} + + if d.HasChange("name") { + opts.Name = d.Get("name").(string) + } + + if d.HasChange("description") { + opts.Description = d.Get("description").(string) + } + + if d.HasChange("rules") { + v := d.Get("rules").([]interface{}) + + log.Printf("[DEBUG] Rules found : %#v", v) + log.Printf("[DEBUG] Rules count : %d", len(v)) + + rules := make([]string, len(v)) + for i, v := range v { + rules[i] = v.(string) + } + opts.Rules = rules + } + + log.Printf("[DEBUG] Updating firewall policy with id %s: %#v", d.Id(), opts) + + err = policies.Update(networkingClient, d.Id(), opts).Err + if err != nil { + return err + } + + return resourceFWPolicyV1Read(d, meta) +} + +func resourceFWPolicyV1Delete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Destroy firewall policy: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: waitForFirewallPolicyDeletion(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 0, + MinTimeout: 2 * time.Second, + } + + if _, err = stateConf.WaitForState(); err != nil { + return err + } + + return nil +} + +func waitForFirewallPolicyDeletion(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + err := policies.Delete(networkingClient, id).Err + if err == nil { + return "", "DELETED", nil + } + + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + // This error usually means that the policy is attached + // to a firewall. At this point, the firewall is probably + // being delete. So, we retry a few times. + return nil, "ACTIVE", nil + } + } + + return nil, "ACTIVE", err + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_rule_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_rule_v1.go new file mode 100644 index 000000000..afde64f93 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_rule_v1.go @@ -0,0 +1,288 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceFWRuleV1() *schema.Resource { + return &schema.Resource{ + Create: resourceFWRuleV1Create, + Read: resourceFWRuleV1Read, + Update: resourceFWRuleV1Update, + Delete: resourceFWRuleV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "action": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "ip_version": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 4, + }, + "source_ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "destination_ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "source_port": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "destination_port": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceFWRuleV1Create(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + enabled := d.Get("enabled").(bool) + ipVersion := resourceFWRuleV1DetermineIPVersion(d.Get("ip_version").(int)) + protocol := resourceFWRuleV1DetermineProtocol(d.Get("protocol").(string)) + + ruleConfiguration := RuleCreateOpts{ + rules.CreateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + Protocol: protocol, + Action: d.Get("action").(string), + IPVersion: ipVersion, + SourceIPAddress: d.Get("source_ip_address").(string), + DestinationIPAddress: d.Get("destination_ip_address").(string), + SourcePort: d.Get("source_port").(string), + DestinationPort: d.Get("destination_port").(string), + Enabled: &enabled, + TenantID: d.Get("tenant_id").(string), + }, + MapValueSpecs(d), + } + + log.Printf("[DEBUG] Create firewall rule: %#v", ruleConfiguration) + + rule, err := rules.Create(networkingClient, ruleConfiguration).Extract() + + if err != nil { + return err + } + + log.Printf("[DEBUG] Firewall rule with id %s : %#v", rule.ID, rule) + + d.SetId(rule.ID) + + return resourceFWRuleV1Read(d, meta) +} + +func resourceFWRuleV1Read(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Retrieve information about firewall rule: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + rule, err := rules.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "FW rule") + } + + log.Printf("[DEBUG] Read OpenStack Firewall Rule %s: %#v", d.Id(), rule) + + d.Set("action", rule.Action) + d.Set("name", rule.Name) + d.Set("description", rule.Description) + d.Set("ip_version", rule.IPVersion) + d.Set("source_ip_address", rule.SourceIPAddress) + d.Set("destination_ip_address", rule.DestinationIPAddress) + d.Set("source_port", rule.SourcePort) + d.Set("destination_port", rule.DestinationPort) + d.Set("enabled", rule.Enabled) + + if rule.Protocol == "" { + d.Set("protocol", "any") + } else { + d.Set("protocol", rule.Protocol) + } + + d.Set("region", GetRegion(d)) + + return nil +} + +func resourceFWRuleV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + opts := rules.UpdateOpts{} + + if d.HasChange("name") { + v := d.Get("name").(string) + opts.Name = &v + } + + if d.HasChange("description") { + v := d.Get("description").(string) + opts.Description = &v + } + + if d.HasChange("protocol") { + v := d.Get("protocol").(string) + opts.Protocol = &v + } + + if d.HasChange("action") { + v := d.Get("action").(string) + opts.Action = &v + } + + if d.HasChange("ip_version") { + v := d.Get("ip_version").(int) + ipVersion := resourceFWRuleV1DetermineIPVersion(v) + opts.IPVersion = &ipVersion + } + + if d.HasChange("source_ip_address") { + v := d.Get("source_ip_address").(string) + opts.SourceIPAddress = &v + } + + if d.HasChange("destination_ip_address") { + v := d.Get("destination_ip_address").(string) + opts.DestinationIPAddress = &v + } + + if d.HasChange("source_port") { + v := d.Get("source_port").(string) + opts.SourcePort = &v + } + + if d.HasChange("destination_port") { + v := d.Get("destination_port").(string) + opts.DestinationPort = &v + } + + if d.HasChange("enabled") { + v := d.Get("enabled").(bool) + opts.Enabled = &v + } + + log.Printf("[DEBUG] Updating firewall rules: %#v", opts) + + err = rules.Update(networkingClient, d.Id(), opts).Err + if err != nil { + return err + } + + return resourceFWRuleV1Read(d, meta) +} + +func resourceFWRuleV1Delete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Destroy firewall rule: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + rule, err := rules.Get(networkingClient, d.Id()).Extract() + if err != nil { + return err + } + + if rule.PolicyID != "" { + _, err := policies.RemoveRule(networkingClient, rule.PolicyID, rule.ID).Extract() + if err != nil { + return err + } + } + + return rules.Delete(networkingClient, d.Id()).Err +} + +func resourceFWRuleV1DetermineIPVersion(ipv int) gophercloud.IPVersion { + // Determine the IP Version + var ipVersion gophercloud.IPVersion + switch ipv { + case 4: + ipVersion = gophercloud.IPv4 + case 6: + ipVersion = gophercloud.IPv6 + } + + return ipVersion +} + +func resourceFWRuleV1DetermineProtocol(p string) rules.Protocol { + var protocol rules.Protocol + switch p { + case "any": + protocol = rules.ProtocolAny + case "icmp": + protocol = rules.ProtocolICMP + case "tcp": + protocol = rules.ProtocolTCP + case "udp": + protocol = rules.ProtocolUDP + } + + return protocol +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_images_image_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_images_image_v2.go new file mode 100644 index 000000000..483494334 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_images_image_v2.go @@ -0,0 +1,501 @@ +package openstack + +import ( + "crypto/md5" + "encoding/hex" + "fmt" + "io" + "log" + "net/http" + "os" + "path/filepath" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata" + "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceImagesImageV2() *schema.Resource { + return &schema.Resource{ + Create: resourceImagesImageV2Create, + Read: resourceImagesImageV2Read, + Update: resourceImagesImageV2Update, + Delete: resourceImagesImageV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "checksum": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "container_format": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: resourceImagesImageV2ValidateContainerFormat, + }, + + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "disk_format": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: resourceImagesImageV2ValidateDiskFormat, + }, + + "file": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "image_cache_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: fmt.Sprintf("%s/.terraform/image_cache", os.Getenv("HOME")), + }, + + "image_source_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"local_file_path"}, + }, + + "local_file_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"image_source_url"}, + }, + + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Computed: true, + }, + + "min_disk_gb": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validatePositiveInt, + Default: 0, + }, + + "min_ram_mb": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validatePositiveInt, + Default: 0, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + + "owner": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "protected": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + + "schema": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "size_bytes": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + + "status": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "tags": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "update_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "visibility": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + ValidateFunc: resourceImagesImageV2ValidateVisibility, + Default: "private", + }, + }, + } +} + +func resourceImagesImageV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + imageClient, err := config.imageV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack image client: %s", err) + } + + protected := d.Get("protected").(bool) + visibility := resourceImagesImageV2VisibilityFromString(d.Get("visibility").(string)) + createOpts := &images.CreateOpts{ + Name: d.Get("name").(string), + ContainerFormat: d.Get("container_format").(string), + DiskFormat: d.Get("disk_format").(string), + MinDisk: d.Get("min_disk_gb").(int), + MinRAM: d.Get("min_ram_mb").(int), + Protected: &protected, + Visibility: &visibility, + } + + if v, ok := d.GetOk("tags"); ok { + tags := v.(*schema.Set).List() + createOpts.Tags = resourceImagesImageV2BuildTags(tags) + } + + d.Partial(true) + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + newImg, err := images.Create(imageClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating Image: %s", err) + } + + d.SetId(newImg.ID) + + // downloading/getting image file props + imgFilePath, err := resourceImagesImageV2File(d) + if err != nil { + return fmt.Errorf("Error opening file for Image: %s", err) + + } + fileSize, fileChecksum, err := resourceImagesImageV2FileProps(imgFilePath) + if err != nil { + return fmt.Errorf("Error getting file props: %s", err) + } + + // upload + imgFile, err := os.Open(imgFilePath) + if err != nil { + return fmt.Errorf("Error opening file %q: %s", imgFilePath, err) + } + defer imgFile.Close() + log.Printf("[WARN] Uploading image %s (%d bytes). This can be pretty long.", d.Id(), fileSize) + + res := imagedata.Upload(imageClient, d.Id(), imgFile) + if res.Err != nil { + return fmt.Errorf("Error while uploading file %q: %s", imgFilePath, res.Err) + } + + //wait for active + stateConf := &resource.StateChangeConf{ + Pending: []string{string(images.ImageStatusQueued), string(images.ImageStatusSaving)}, + Target: []string{string(images.ImageStatusActive)}, + Refresh: resourceImagesImageV2RefreshFunc(imageClient, d.Id(), fileSize, fileChecksum), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + if _, err = stateConf.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for Image: %s", err) + } + + d.Partial(false) + + return resourceImagesImageV2Read(d, meta) +} + +func resourceImagesImageV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + imageClient, err := config.imageV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack image client: %s", err) + } + + img, err := images.Get(imageClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "image") + } + + log.Printf("[DEBUG] Retrieved Image %s: %#v", d.Id(), img) + + d.Set("owner", img.Owner) + d.Set("status", img.Status) + d.Set("file", img.File) + d.Set("schema", img.Schema) + d.Set("checksum", img.Checksum) + d.Set("size_bytes", img.SizeBytes) + d.Set("metadata", img.Metadata) + d.Set("created_at", img.CreatedAt) + d.Set("update_at", img.UpdatedAt) + d.Set("container_format", img.ContainerFormat) + d.Set("disk_format", img.DiskFormat) + d.Set("min_disk_gb", img.MinDiskGigabytes) + d.Set("min_ram_mb", img.MinRAMMegabytes) + d.Set("file", img.File) + d.Set("name", img.Name) + d.Set("protected", img.Protected) + d.Set("size_bytes", img.SizeBytes) + d.Set("tags", img.Tags) + d.Set("visibility", img.Visibility) + return nil +} + +func resourceImagesImageV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + imageClient, err := config.imageV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack image client: %s", err) + } + + updateOpts := make(images.UpdateOpts, 0) + + if d.HasChange("visibility") { + visibility := resourceImagesImageV2VisibilityFromString(d.Get("visibility").(string)) + v := images.UpdateVisibility{Visibility: visibility} + updateOpts = append(updateOpts, v) + } + + if d.HasChange("name") { + v := images.ReplaceImageName{NewName: d.Get("name").(string)} + updateOpts = append(updateOpts, v) + } + + if d.HasChange("tags") { + tags := d.Get("tags").(*schema.Set).List() + v := images.ReplaceImageTags{ + NewTags: resourceImagesImageV2BuildTags(tags), + } + updateOpts = append(updateOpts, v) + } + + log.Printf("[DEBUG] Update Options: %#v", updateOpts) + + _, err = images.Update(imageClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating image: %s", err) + } + + return resourceImagesImageV2Read(d, meta) +} + +func resourceImagesImageV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + imageClient, err := config.imageV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack image client: %s", err) + } + + log.Printf("[DEBUG] Deleting Image %s", d.Id()) + if err := images.Delete(imageClient, d.Id()).Err; err != nil { + return fmt.Errorf("Error deleting Image: %s", err) + } + + d.SetId("") + return nil +} + +func resourceImagesImageV2ValidateVisibility(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + validVisibilities := []string{ + "public", + "private", + "shared", + "community", + } + + for _, v := range validVisibilities { + if value == v { + return + } + } + + err := fmt.Errorf("%s must be one of %s", k, validVisibilities) + errors = append(errors, err) + return +} + +func validatePositiveInt(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value > 0 { + return + } + errors = append(errors, fmt.Errorf("%q must be a positive integer", k)) + return +} + +var DiskFormats = [9]string{"ami", "ari", "aki", "vhd", "vmdk", "raw", "qcow2", "vdi", "iso"} + +func resourceImagesImageV2ValidateDiskFormat(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + for i := range DiskFormats { + if value == DiskFormats[i] { + return + } + } + errors = append(errors, fmt.Errorf("%q must be one of %v", k, DiskFormats)) + return +} + +var ContainerFormats = [9]string{"ami", "ari", "aki", "bare", "ovf"} + +func resourceImagesImageV2ValidateContainerFormat(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + for i := range ContainerFormats { + if value == ContainerFormats[i] { + return + } + } + errors = append(errors, fmt.Errorf("%q must be one of %v", k, ContainerFormats)) + return +} + +func resourceImagesImageV2VisibilityFromString(v string) images.ImageVisibility { + switch v { + case "public": + return images.ImageVisibilityPublic + case "private": + return images.ImageVisibilityPrivate + case "shared": + return images.ImageVisibilityShared + case "community": + return images.ImageVisibilityCommunity + } + + return "" +} + +func fileMD5Checksum(f *os.File) (string, error) { + hash := md5.New() + if _, err := io.Copy(hash, f); err != nil { + return "", err + } + return hex.EncodeToString(hash.Sum(nil)), nil +} + +func resourceImagesImageV2FileProps(filename string) (int64, string, error) { + var filesize int64 + var filechecksum string + + file, err := os.Open(filename) + if err != nil { + return -1, "", fmt.Errorf("Error opening file for Image: %s", err) + + } + defer file.Close() + + fstat, err := file.Stat() + if err != nil { + return -1, "", fmt.Errorf("Error reading image file %q: %s", file.Name(), err) + } + + filesize = fstat.Size() + filechecksum, err = fileMD5Checksum(file) + + if err != nil { + return -1, "", fmt.Errorf("Error computing image file %q checksum: %s", file.Name(), err) + } + + return filesize, filechecksum, nil +} + +func resourceImagesImageV2File(d *schema.ResourceData) (string, error) { + if filename := d.Get("local_file_path").(string); filename != "" { + return filename, nil + } else if furl := d.Get("image_source_url").(string); furl != "" { + dir := d.Get("image_cache_path").(string) + os.MkdirAll(dir, 0700) + filename := filepath.Join(dir, fmt.Sprintf("%x.img", md5.Sum([]byte(furl)))) + + if _, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { + return "", fmt.Errorf("Error while trying to access file %q: %s", filename, err) + } + log.Printf("[DEBUG] File doens't exists %s. will download from %s", filename, furl) + file, err := os.Create(filename) + if err != nil { + return "", fmt.Errorf("Error creating file %q: %s", filename, err) + } + defer file.Close() + resp, err := http.Get(furl) + if err != nil { + return "", fmt.Errorf("Error downloading image from %q", furl) + } + defer resp.Body.Close() + + if _, err = io.Copy(file, resp.Body); err != nil { + return "", fmt.Errorf("Error downloading image %q to file %q: %s", furl, filename, err) + } + return filename, nil + } else { + log.Printf("[DEBUG] File exists %s", filename) + return filename, nil + } + } else { + return "", fmt.Errorf("Error in config. no file specified") + } +} + +func resourceImagesImageV2RefreshFunc(client *gophercloud.ServiceClient, id string, fileSize int64, checksum string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + img, err := images.Get(client, id).Extract() + if err != nil { + return nil, "", err + } + log.Printf("[DEBUG] OpenStack image status is: %s", img.Status) + + if img.Checksum != checksum || int64(img.SizeBytes) != fileSize { + return img, fmt.Sprintf("%s", img.Status), fmt.Errorf("Error wrong size %v or checksum %q", img.SizeBytes, img.Checksum) + } + + return img, fmt.Sprintf("%s", img.Status), nil + } +} + +func resourceImagesImageV2BuildTags(v []interface{}) []string { + var tags []string + for _, tag := range v { + tags = append(tags, tag.(string)) + } + + return tags +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_listener_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_listener_v2.go new file mode 100644 index 000000000..c426f2be6 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_listener_v2.go @@ -0,0 +1,316 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners" +) + +func resourceListenerV2() *schema.Resource { + return &schema.Resource{ + Create: resourceListenerV2Create, + Read: resourceListenerV2Read, + Update: resourceListenerV2Update, + Delete: resourceListenerV2Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "TCP" && value != "HTTP" && value != "HTTPS" { + errors = append(errors, fmt.Errorf( + "Only 'TCP', 'HTTP', and 'HTTPS' are supported values for 'protocol'")) + } + return + }, + }, + + "protocol_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "loadbalancer_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "default_pool_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "connection_limit": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + + "default_tls_container_ref": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "sni_container_refs": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Default: true, + Optional: true, + }, + + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceListenerV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + adminStateUp := d.Get("admin_state_up").(bool) + connLimit := d.Get("connection_limit").(int) + var sniContainerRefs []string + if raw, ok := d.GetOk("sni_container_refs"); ok { + for _, v := range raw.([]interface{}) { + sniContainerRefs = append(sniContainerRefs, v.(string)) + } + } + createOpts := listeners.CreateOpts{ + Protocol: listeners.Protocol(d.Get("protocol").(string)), + ProtocolPort: d.Get("protocol_port").(int), + TenantID: d.Get("tenant_id").(string), + LoadbalancerID: d.Get("loadbalancer_id").(string), + Name: d.Get("name").(string), + DefaultPoolID: d.Get("default_pool_id").(string), + Description: d.Get("description").(string), + ConnLimit: &connLimit, + DefaultTlsContainerRef: d.Get("default_tls_container_ref").(string), + SniContainerRefs: sniContainerRefs, + AdminStateUp: &adminStateUp, + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + listener, err := listeners.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack LBaaSV2 listener: %s", err) + } + log.Printf("[INFO] Listener ID: %s", listener.ID) + + log.Printf("[DEBUG] Waiting for Openstack LBaaSV2 listener (%s) to become available.", listener.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForListenerActive(networkingClient, listener.ID), + Timeout: 2 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + d.SetId(listener.ID) + + return resourceListenerV2Read(d, meta) +} + +func resourceListenerV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + listener, err := listeners.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "LBV2 listener") + } + + log.Printf("[DEBUG] Retrieved OpenStack LBaaSV2 listener %s: %+v", d.Id(), listener) + + d.Set("id", listener.ID) + d.Set("name", listener.Name) + d.Set("protocol", listener.Protocol) + d.Set("tenant_id", listener.TenantID) + d.Set("description", listener.Description) + d.Set("protocol_port", listener.ProtocolPort) + d.Set("admin_state_up", listener.AdminStateUp) + d.Set("default_pool_id", listener.DefaultPoolID) + d.Set("connection_limit", listener.ConnLimit) + d.Set("sni_container_refs", listener.SniContainerRefs) + d.Set("default_tls_container_ref", listener.DefaultTlsContainerRef) + + return nil +} + +func resourceListenerV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts listeners.UpdateOpts + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + if d.HasChange("description") { + updateOpts.Description = d.Get("description").(string) + } + if d.HasChange("connection_limit") { + connLimit := d.Get("connection_limit").(int) + updateOpts.ConnLimit = &connLimit + } + if d.HasChange("default_tls_container_ref") { + updateOpts.DefaultTlsContainerRef = d.Get("default_tls_container_ref").(string) + } + if d.HasChange("sni_container_refs") { + var sniContainerRefs []string + if raw, ok := d.GetOk("sni_container_refs"); ok { + for _, v := range raw.([]interface{}) { + sniContainerRefs = append(sniContainerRefs, v.(string)) + } + } + updateOpts.SniContainerRefs = sniContainerRefs + } + if d.HasChange("admin_state_up") { + asu := d.Get("admin_state_up").(bool) + updateOpts.AdminStateUp = &asu + } + + log.Printf("[DEBUG] Updating OpenStack LBaaSV2 Listener %s with options: %+v", d.Id(), updateOpts) + + _, err = listeners.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack LBaaSV2 Listener: %s", err) + } + + return resourceListenerV2Read(d, meta) + +} + +func resourceListenerV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE", "PENDING_DELETE"}, + Target: []string{"DELETED"}, + Refresh: waitForListenerDelete(networkingClient, d.Id()), + Timeout: 2 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack LBaaSV2 listener: %s", err) + } + + d.SetId("") + return nil +} + +func waitForListenerActive(networkingClient *gophercloud.ServiceClient, listenerID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + listener, err := listeners.Get(networkingClient, listenerID).Extract() + if err != nil { + return nil, "", err + } + + // The listener resource has no Status attribute, so a successful Get is the best we can do + log.Printf("[DEBUG] OpenStack LBaaSV2 listener: %+v", listener) + return listener, "ACTIVE", nil + } +} + +func waitForListenerDelete(networkingClient *gophercloud.ServiceClient, listenerID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack LBaaSV2 listener %s", listenerID) + + listener, err := listeners.Get(networkingClient, listenerID).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 listener %s", listenerID) + return listener, "DELETED", nil + } + return listener, "ACTIVE", err + } + + log.Printf("[DEBUG] Openstack LBaaSV2 listener: %+v", listener) + err = listeners.Delete(networkingClient, listenerID).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 listener %s", listenerID) + return listener, "DELETED", nil + } + + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + log.Printf("[DEBUG] OpenStack LBaaSV2 listener (%s) is still in use.", listenerID) + return listener, "ACTIVE", nil + } + } + + return listener, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack LBaaSV2 listener %s still active.", listenerID) + return listener, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_loadbalancer_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_loadbalancer_v2.go new file mode 100644 index 000000000..c4e17995f --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_loadbalancer_v2.go @@ -0,0 +1,337 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" +) + +func resourceLoadBalancerV2() *schema.Resource { + return &schema.Resource{ + Create: resourceLoadBalancerV2Create, + Read: resourceLoadBalancerV2Read, + Update: resourceLoadBalancerV2Update, + Delete: resourceLoadBalancerV2Delete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "vip_subnet_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "vip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "vip_port_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Default: true, + Optional: true, + }, + + "flavor": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "provider": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Deprecated: "Please use loadbalancer_provider", + }, + + "loadbalancer_provider": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "security_group_ids": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceLoadBalancerV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var lbProvider string + if v, ok := d.GetOk("loadbalancer_provider"); ok { + lbProvider = v.(string) + } + + adminStateUp := d.Get("admin_state_up").(bool) + createOpts := loadbalancers.CreateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + VipSubnetID: d.Get("vip_subnet_id").(string), + TenantID: d.Get("tenant_id").(string), + VipAddress: d.Get("vip_address").(string), + AdminStateUp: &adminStateUp, + Flavor: d.Get("flavor").(string), + Provider: lbProvider, + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + lb, err := loadbalancers.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack LoadBalancer: %s", err) + } + log.Printf("[INFO] LoadBalancer ID: %s", lb.ID) + + log.Printf("[DEBUG] Waiting for Openstack LoadBalancer (%s) to become available.", lb.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForLoadBalancerActive(networkingClient, lb.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + // Once the loadbalancer has been created, apply any requested security groups + // to the port that was created behind the scenes. + if err := resourceLoadBalancerV2SecurityGroups(networkingClient, lb.VipPortID, d); err != nil { + return err + } + + // If all has been successful, set the ID on the resource + d.SetId(lb.ID) + + return resourceLoadBalancerV2Read(d, meta) +} + +func resourceLoadBalancerV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + lb, err := loadbalancers.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "LoadBalancerV2") + } + + log.Printf("[DEBUG] Retrieved OpenStack LBaaSV2 LoadBalancer %s: %+v", d.Id(), lb) + + d.Set("name", lb.Name) + d.Set("description", lb.Description) + d.Set("vip_subnet_id", lb.VipSubnetID) + d.Set("tenant_id", lb.TenantID) + d.Set("vip_address", lb.VipAddress) + d.Set("vip_port_id", lb.VipPortID) + d.Set("admin_state_up", lb.AdminStateUp) + d.Set("flavor", lb.Flavor) + d.Set("loadbalancer_provider", lb.Provider) + + // Get any security groups on the VIP Port + if lb.VipPortID != "" { + port, err := ports.Get(networkingClient, lb.VipPortID).Extract() + if err != nil { + return err + } + + d.Set("security_group_ids", port.SecurityGroups) + } + + return nil +} + +func resourceLoadBalancerV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts loadbalancers.UpdateOpts + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + if d.HasChange("description") { + updateOpts.Description = d.Get("description").(string) + } + if d.HasChange("admin_state_up") { + asu := d.Get("admin_state_up").(bool) + updateOpts.AdminStateUp = &asu + } + + log.Printf("[DEBUG] Updating OpenStack LBaaSV2 LoadBalancer %s with options: %+v", d.Id(), updateOpts) + + _, err = loadbalancers.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack LBaaSV2 LoadBalancer: %s", err) + } + + // Security Groups get updated separately + if d.HasChange("security_group_ids") { + vipPortID := d.Get("vip_port_id").(string) + if err := resourceLoadBalancerV2SecurityGroups(networkingClient, vipPortID, d); err != nil { + return err + } + } + + return resourceLoadBalancerV2Read(d, meta) +} + +func resourceLoadBalancerV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE", "PENDING_DELETE"}, + Target: []string{"DELETED"}, + Refresh: waitForLoadBalancerDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack LBaaSV2 LoadBalancer: %s", err) + } + + d.SetId("") + return nil +} + +func resourceLoadBalancerV2SecurityGroups(networkingClient *gophercloud.ServiceClient, vipPortID string, d *schema.ResourceData) error { + if vipPortID != "" { + if _, ok := d.GetOk("security_group_ids"); ok { + updateOpts := ports.UpdateOpts{ + SecurityGroups: resourcePortSecurityGroupsV2(d), + } + + log.Printf("[DEBUG] Adding security groups to OpenStack LoadBalancer "+ + "VIP Port (%s): %#v", vipPortID, updateOpts) + + _, err := ports.Update(networkingClient, vipPortID, updateOpts).Extract() + if err != nil { + return err + } + } + } + + return nil +} + +func waitForLoadBalancerActive(networkingClient *gophercloud.ServiceClient, lbID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + lb, err := loadbalancers.Get(networkingClient, lbID).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack LBaaSV2 LoadBalancer: %+v", lb) + if lb.ProvisioningStatus == "ACTIVE" { + return lb, "ACTIVE", nil + } + + return lb, lb.ProvisioningStatus, nil + } +} + +func waitForLoadBalancerDelete(networkingClient *gophercloud.ServiceClient, lbID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack LBaaSV2 LoadBalancer %s", lbID) + + lb, err := loadbalancers.Get(networkingClient, lbID).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 LoadBalancer %s", lbID) + return lb, "DELETED", nil + } + return lb, "ACTIVE", err + } + + log.Printf("[DEBUG] Openstack LoadBalancerV2: %+v", lb) + err = loadbalancers.Delete(networkingClient, lbID).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 LoadBalancer %s", lbID) + return lb, "DELETED", nil + } + + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + log.Printf("[DEBUG] OpenStack LBaaSV2 LoadBalancer (%s) is still in use.", lbID) + return lb, "ACTIVE", nil + } + } + + return lb, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack LBaaSV2 LoadBalancer (%s) still active.", lbID) + return lb, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v1.go new file mode 100644 index 000000000..e6dc3da9f --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v1.go @@ -0,0 +1,236 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members" +) + +func resourceLBMemberV1() *schema.Resource { + return &schema.Resource{ + Create: resourceLBMemberV1Create, + Read: resourceLBMemberV1Read, + Update: resourceLBMemberV1Update, + Delete: resourceLBMemberV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "pool_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "address": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "weight": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: false, + Computed: true, + }, + }, + } +} + +func resourceLBMemberV1Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := members.CreateOpts{ + TenantID: d.Get("tenant_id").(string), + PoolID: d.Get("pool_id").(string), + Address: d.Get("address").(string), + ProtocolPort: d.Get("port").(int), + } + + log.Printf("[DEBUG] OpenStack LB Member Create Options: %#v", createOpts) + m, err := members.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack LB member: %s", err) + } + log.Printf("[INFO] LB member ID: %s", m.ID) + + log.Printf("[DEBUG] Waiting for OpenStack LB member (%s) to become available.", m.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE"}, + Target: []string{"ACTIVE", "INACTIVE", "CREATED", "DOWN"}, + Refresh: waitForLBMemberActive(networkingClient, m.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + d.SetId(m.ID) + + // Due to the way Gophercloud is currently set up, AdminStateUp must be set post-create + asu := d.Get("admin_state_up").(bool) + updateOpts := members.UpdateOpts{ + AdminStateUp: &asu, + } + + log.Printf("[DEBUG] OpenStack LB Member Update Options: %#v", createOpts) + m, err = members.Update(networkingClient, m.ID, updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack LB member: %s", err) + } + + return resourceLBMemberV1Read(d, meta) +} + +func resourceLBMemberV1Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + m, err := members.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "LB member") + } + + log.Printf("[DEBUG] Retrieved OpenStack LB member %s: %+v", d.Id(), m) + + d.Set("address", m.Address) + d.Set("pool_id", m.PoolID) + d.Set("port", m.ProtocolPort) + d.Set("weight", m.Weight) + d.Set("admin_state_up", m.AdminStateUp) + d.Set("region", GetRegion(d)) + + return nil +} + +func resourceLBMemberV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts members.UpdateOpts + if d.HasChange("admin_state_up") { + asu := d.Get("admin_state_up").(bool) + updateOpts.AdminStateUp = &asu + } + + log.Printf("[DEBUG] Updating LB member %s with options: %+v", d.Id(), updateOpts) + + _, err = members.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack LB member: %s", err) + } + + return resourceLBMemberV1Read(d, meta) +} + +func resourceLBMemberV1Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + err = members.Delete(networkingClient, d.Id()).ExtractErr() + if err != nil { + CheckDeleted(d, err, "LB member") + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE", "PENDING_DELETE"}, + Target: []string{"DELETED"}, + Refresh: waitForLBMemberDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack LB member: %s", err) + } + + d.SetId("") + return nil +} + +func waitForLBMemberActive(networkingClient *gophercloud.ServiceClient, memberId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + m, err := members.Get(networkingClient, memberId).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack LB member: %+v", m) + if m.Status == "ACTIVE" { + return m, "ACTIVE", nil + } + + return m, m.Status, nil + } +} + +func waitForLBMemberDelete(networkingClient *gophercloud.ServiceClient, memberId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack LB member %s", memberId) + + m, err := members.Get(networkingClient, memberId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LB member %s", memberId) + return m, "DELETED", nil + } + return m, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack LB member %s still active.", memberId) + return m, "ACTIVE", nil + } + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v2.go new file mode 100644 index 000000000..61326bac3 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v2.go @@ -0,0 +1,305 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" +) + +func resourceMemberV2() *schema.Resource { + return &schema.Resource{ + Create: resourceMemberV2Create, + Read: resourceMemberV2Read, + Update: resourceMemberV2Update, + Delete: resourceMemberV2Delete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "protocol_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "weight": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 1 { + errors = append(errors, fmt.Errorf( + "Only numbers greater than 0 are supported values for 'weight'")) + } + return + }, + }, + + "subnet_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Default: true, + Optional: true, + }, + + "pool_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceMemberV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + adminStateUp := d.Get("admin_state_up").(bool) + createOpts := pools.CreateMemberOpts{ + Name: d.Get("name").(string), + TenantID: d.Get("tenant_id").(string), + Address: d.Get("address").(string), + ProtocolPort: d.Get("protocol_port").(int), + Weight: d.Get("weight").(int), + AdminStateUp: &adminStateUp, + } + + // Must omit if not set + if v, ok := d.GetOk("subnet_id"); ok { + createOpts.SubnetID = v.(string) + } + + poolID := d.Get("pool_id").(string) + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + + var member *pools.Member + err = resource.Retry(10*time.Minute, func() *resource.RetryError { + var err error + log.Printf("[DEBUG] Attempting to create LBaaSV2 member") + member, err = pools.CreateMember(networkingClient, poolID, createOpts).Extract() + if err != nil { + switch errCode := err.(type) { + case gophercloud.ErrDefault500: + log.Printf("[DEBUG] OpenStack LBaaSV2 member is still creating.") + return resource.RetryableError(err) + case gophercloud.ErrUnexpectedResponseCode: + if errCode.Actual == 409 { + log.Printf("[DEBUG] OpenStack LBaaSV2 member is still creating.") + return resource.RetryableError(err) + } + + default: + return resource.NonRetryableError(err) + } + } + return nil + }) + + if err != nil { + return fmt.Errorf("Error creating OpenStack LBaaSV2 member: %s", err) + } + log.Printf("[INFO] member ID: %s", member.ID) + + log.Printf("[DEBUG] Waiting for Openstack LBaaSV2 member (%s) to become available.", member.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForMemberActive(networkingClient, poolID, member.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + d.SetId(member.ID) + + return resourceMemberV2Read(d, meta) +} + +func resourceMemberV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + member, err := pools.GetMember(networkingClient, d.Get("pool_id").(string), d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "LBV2 Member") + } + + log.Printf("[DEBUG] Retrieved OpenStack LBaaSV2 Member %s: %+v", d.Id(), member) + + d.Set("name", member.Name) + d.Set("weight", member.Weight) + d.Set("admin_state_up", member.AdminStateUp) + d.Set("tenant_id", member.TenantID) + d.Set("subnet_id", member.SubnetID) + d.Set("address", member.Address) + d.Set("protocol_port", member.ProtocolPort) + d.Set("id", member.ID) + + return nil +} + +func resourceMemberV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts pools.UpdateMemberOpts + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + if d.HasChange("weight") { + updateOpts.Weight = d.Get("weight").(int) + } + if d.HasChange("admin_state_up") { + asu := d.Get("admin_state_up").(bool) + updateOpts.AdminStateUp = &asu + } + + log.Printf("[DEBUG] Updating OpenStack LBaaSV2 Member %s with options: %+v", d.Id(), updateOpts) + + _, err = pools.UpdateMember(networkingClient, d.Get("pool_id").(string), d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack LBaaSV2 Member: %s", err) + } + + return resourceMemberV2Read(d, meta) +} + +func resourceMemberV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE", "PENDING_DELETE"}, + Target: []string{"DELETED"}, + Refresh: waitForMemberDelete(networkingClient, d.Get("pool_id").(string), d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack LBaaSV2 Member: %s", err) + } + + d.SetId("") + return nil +} + +func waitForMemberActive(networkingClient *gophercloud.ServiceClient, poolID string, memberID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + member, err := pools.GetMember(networkingClient, poolID, memberID).Extract() + if err != nil { + return nil, "", err + } + + // The member resource has no Status attribute, so a successful Get is the best we can do + log.Printf("[DEBUG] OpenStack LBaaSV2 Member: %+v", member) + return member, "ACTIVE", nil + } +} + +func waitForMemberDelete(networkingClient *gophercloud.ServiceClient, poolID string, memberID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack LBaaSV2 Member %s", memberID) + + member, err := pools.GetMember(networkingClient, poolID, memberID).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 Member %s", memberID) + return member, "DELETED", nil + } + return member, "ACTIVE", err + } + + log.Printf("[DEBUG] Openstack LBaaSV2 Member: %+v", member) + err = pools.DeleteMember(networkingClient, poolID, memberID).ExtractErr() + if err != nil { + switch errCode := err.(type) { + case gophercloud.ErrDefault404: + log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 Member %s", memberID) + return member, "DELETED", nil + case gophercloud.ErrDefault500: + log.Printf("[DEBUG] OpenStack LBaaSV2 Member (%s) is still in use.", memberID) + return member, "PENDING_DELETE", nil + case gophercloud.ErrUnexpectedResponseCode: + if errCode.Actual == 409 { + log.Printf("[DEBUG] OpenStack LBaaSV2 Member (%s) is still in use.", memberID) + return member, "PENDING_DELETE", nil + } + + default: + return member, "ACTIVE", err + } + } + + log.Printf("[DEBUG] OpenStack LBaaSV2 Member %s still active.", memberID) + return member, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v1.go new file mode 100644 index 000000000..26066cbea --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v1.go @@ -0,0 +1,310 @@ +package openstack + +import ( + "fmt" + "log" + "strconv" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors" +) + +func resourceLBMonitorV1() *schema.Resource { + return &schema.Resource{ + Create: resourceLBMonitorV1Create, + Read: resourceLBMonitorV1Read, + Update: resourceLBMonitorV1Update, + Delete: resourceLBMonitorV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "delay": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: false, + }, + "timeout": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: false, + }, + "max_retries": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: false, + }, + "url_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "http_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "expected_codes": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Computed: true, + }, + }, + } +} + +func resourceLBMonitorV1Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := monitors.CreateOpts{ + TenantID: d.Get("tenant_id").(string), + Delay: d.Get("delay").(int), + Timeout: d.Get("timeout").(int), + MaxRetries: d.Get("max_retries").(int), + URLPath: d.Get("url_path").(string), + ExpectedCodes: d.Get("expected_codes").(string), + HTTPMethod: d.Get("http_method").(string), + } + + if v, ok := d.GetOk("type"); ok { + monitorType := resourceLBMonitorV1DetermineType(v.(string)) + createOpts.Type = monitorType + } + + asuRaw := d.Get("admin_state_up").(string) + if asuRaw != "" { + asu, err := strconv.ParseBool(asuRaw) + if err != nil { + return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") + } + createOpts.AdminStateUp = &asu + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + m, err := monitors.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack LB Monitor: %s", err) + } + log.Printf("[INFO] LB Monitor ID: %s", m.ID) + + log.Printf("[DEBUG] Waiting for OpenStack LB Monitor (%s) to become available.", m.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForLBMonitorActive(networkingClient, m.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + d.SetId(m.ID) + + return resourceLBMonitorV1Read(d, meta) +} + +func resourceLBMonitorV1Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + m, err := monitors.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "LB monitor") + } + + log.Printf("[DEBUG] Retrieved OpenStack LB Monitor %s: %+v", d.Id(), m) + + d.Set("type", m.Type) + d.Set("delay", m.Delay) + d.Set("timeout", m.Timeout) + d.Set("max_retries", m.MaxRetries) + d.Set("tenant_id", m.TenantID) + d.Set("url_path", m.URLPath) + d.Set("http_method", m.HTTPMethod) + d.Set("expected_codes", m.ExpectedCodes) + d.Set("admin_state_up", strconv.FormatBool(m.AdminStateUp)) + d.Set("region", GetRegion(d)) + + return nil +} + +func resourceLBMonitorV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + updateOpts := monitors.UpdateOpts{ + Delay: d.Get("delay").(int), + Timeout: d.Get("timeout").(int), + MaxRetries: d.Get("max_retries").(int), + URLPath: d.Get("url_path").(string), + HTTPMethod: d.Get("http_method").(string), + ExpectedCodes: d.Get("expected_codes").(string), + } + + if d.HasChange("admin_state_up") { + asuRaw := d.Get("admin_state_up").(string) + if asuRaw != "" { + asu, err := strconv.ParseBool(asuRaw) + if err != nil { + return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") + } + updateOpts.AdminStateUp = &asu + } + } + + log.Printf("[DEBUG] Updating OpenStack LB Monitor %s with options: %+v", d.Id(), updateOpts) + + _, err = monitors.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack LB Monitor: %s", err) + } + + return resourceLBMonitorV1Read(d, meta) +} + +func resourceLBMonitorV1Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE", "PENDING_DELETE"}, + Target: []string{"DELETED"}, + Refresh: waitForLBMonitorDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack LB Monitor: %s", err) + } + + d.SetId("") + return nil +} + +func resourceLBMonitorV1DetermineType(t string) monitors.MonitorType { + var monitorType monitors.MonitorType + switch t { + case "PING": + monitorType = monitors.TypePING + case "TCP": + monitorType = monitors.TypeTCP + case "HTTP": + monitorType = monitors.TypeHTTP + case "HTTPS": + monitorType = monitors.TypeHTTPS + } + + return monitorType +} + +func waitForLBMonitorActive(networkingClient *gophercloud.ServiceClient, monitorId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + m, err := monitors.Get(networkingClient, monitorId).Extract() + if err != nil { + return nil, "", err + } + + // The monitor resource has no Status attribute, so a successful Get is the best we can do + log.Printf("[DEBUG] OpenStack LB Monitor: %+v", m) + return m, "ACTIVE", nil + } +} + +func waitForLBMonitorDelete(networkingClient *gophercloud.ServiceClient, monitorId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack LB Monitor %s", monitorId) + + m, err := monitors.Get(networkingClient, monitorId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LB Monitor %s", monitorId) + return m, "DELETED", nil + } + + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + log.Printf("[DEBUG] OpenStack LB Monitor (%s) is waiting for Pool to delete.", monitorId) + return m, "PENDING", nil + } + } + + return m, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack LB Monitor: %+v", m) + err = monitors.Delete(networkingClient, monitorId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LB Monitor %s", monitorId) + return m, "DELETED", nil + } + + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + log.Printf("[DEBUG] OpenStack LB Monitor (%s) is waiting for Pool to delete.", monitorId) + return m, "PENDING", nil + } + } + + return m, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack LB Monitor %s still active.", monitorId) + return m, "ACTIVE", nil + } + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v2.go new file mode 100644 index 000000000..061c270e5 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v2.go @@ -0,0 +1,294 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors" +) + +func resourceMonitorV2() *schema.Resource { + return &schema.Resource{ + Create: resourceMonitorV2Create, + Read: resourceMonitorV2Read, + Update: resourceMonitorV2Update, + Delete: resourceMonitorV2Delete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + + "pool_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "delay": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + "timeout": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + "max_retries": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + "url_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "http_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "expected_codes": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Default: true, + Optional: true, + }, + + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceMonitorV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + adminStateUp := d.Get("admin_state_up").(bool) + createOpts := monitors.CreateOpts{ + PoolID: d.Get("pool_id").(string), + TenantID: d.Get("tenant_id").(string), + Type: d.Get("type").(string), + Delay: d.Get("delay").(int), + Timeout: d.Get("timeout").(int), + MaxRetries: d.Get("max_retries").(int), + URLPath: d.Get("url_path").(string), + HTTPMethod: d.Get("http_method").(string), + ExpectedCodes: d.Get("expected_codes").(string), + Name: d.Get("name").(string), + AdminStateUp: &adminStateUp, + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + monitor, err := monitors.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack LBaaSV2 monitor: %s", err) + } + log.Printf("[INFO] monitor ID: %s", monitor.ID) + + log.Printf("[DEBUG] Waiting for Openstack LBaaSV2 monitor (%s) to become available.", monitor.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForMonitorActive(networkingClient, monitor.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + d.SetId(monitor.ID) + + return resourceMonitorV2Read(d, meta) +} + +func resourceMonitorV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + monitor, err := monitors.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "LBV2 Monitor") + } + + log.Printf("[DEBUG] Retrieved OpenStack LBaaSV2 Monitor %s: %+v", d.Id(), monitor) + + d.Set("id", monitor.ID) + d.Set("tenant_id", monitor.TenantID) + d.Set("type", monitor.Type) + d.Set("delay", monitor.Delay) + d.Set("timeout", monitor.Timeout) + d.Set("max_retries", monitor.MaxRetries) + d.Set("url_path", monitor.URLPath) + d.Set("http_method", monitor.HTTPMethod) + d.Set("expected_codes", monitor.ExpectedCodes) + d.Set("admin_state_up", monitor.AdminStateUp) + d.Set("name", monitor.Name) + + return nil +} + +func resourceMonitorV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts monitors.UpdateOpts + if d.HasChange("url_path") { + updateOpts.URLPath = d.Get("url_path").(string) + } + if d.HasChange("expected_codes") { + updateOpts.ExpectedCodes = d.Get("expected_codes").(string) + } + if d.HasChange("delay") { + updateOpts.Delay = d.Get("delay").(int) + } + if d.HasChange("timeout") { + updateOpts.Timeout = d.Get("timeout").(int) + } + if d.HasChange("max_retries") { + updateOpts.MaxRetries = d.Get("max_retries").(int) + } + if d.HasChange("admin_state_up") { + asu := d.Get("admin_state_up").(bool) + updateOpts.AdminStateUp = &asu + } + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + if d.HasChange("http_method") { + updateOpts.HTTPMethod = d.Get("http_method").(string) + } + + log.Printf("[DEBUG] Updating OpenStack LBaaSV2 Monitor %s with options: %+v", d.Id(), updateOpts) + + _, err = monitors.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack LBaaSV2 Monitor: %s", err) + } + + return resourceMonitorV2Read(d, meta) +} + +func resourceMonitorV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE", "PENDING_DELETE"}, + Target: []string{"DELETED"}, + Refresh: waitForMonitorDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack LBaaSV2 Monitor: %s", err) + } + + d.SetId("") + return nil +} + +func waitForMonitorActive(networkingClient *gophercloud.ServiceClient, monitorID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + monitor, err := monitors.Get(networkingClient, monitorID).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack LBaaSV2 Monitor: %+v", monitor) + return monitor, "ACTIVE", nil + } +} + +func waitForMonitorDelete(networkingClient *gophercloud.ServiceClient, monitorID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack LBaaSV2 Monitor %s", monitorID) + + monitor, err := monitors.Get(networkingClient, monitorID).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 Monitor %s", monitorID) + return monitor, "DELETED", nil + } + return monitor, "ACTIVE", err + } + + log.Printf("[DEBUG] Openstack LBaaSV2 Monitor: %+v", monitor) + err = monitors.Delete(networkingClient, monitorID).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 Monitor %s", monitorID) + return monitor, "DELETED", nil + } + + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + log.Printf("[DEBUG] OpenStack LBaaSV2 Monitor (%s) is still in use.", monitorID) + return monitor, "ACTIVE", nil + } + } + + return monitor, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack LBaaSV2 Monitor %s still active.", monitorID) + return monitor, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v1.go new file mode 100644 index 000000000..7cf796c6f --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v1.go @@ -0,0 +1,467 @@ +package openstack + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools" + "github.com/gophercloud/gophercloud/pagination" +) + +func resourceLBPoolV1() *schema.Resource { + return &schema.Resource{ + Create: resourceLBPoolV1Create, + Read: resourceLBPoolV1Read, + Update: resourceLBPoolV1Update, + Delete: resourceLBPoolV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "subnet_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "lb_method": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "lb_provider": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "member": &schema.Schema{ + Type: schema.TypeSet, + Deprecated: "Use openstack_lb_member_v1 instead. This attribute will be removed in a future version.", + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "address": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Required: true, + ForceNew: false, + }, + }, + }, + Set: resourceLBMemberV1Hash, + }, + "monitor_ids": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceLBPoolV1Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := pools.CreateOpts{ + Name: d.Get("name").(string), + SubnetID: d.Get("subnet_id").(string), + TenantID: d.Get("tenant_id").(string), + Provider: d.Get("lb_provider").(string), + } + + if v, ok := d.GetOk("protocol"); ok { + protocol := resourceLBPoolV1DetermineProtocol(v.(string)) + createOpts.Protocol = protocol + } + + if v, ok := d.GetOk("lb_method"); ok { + lbMethod := resourceLBPoolV1DetermineLBMethod(v.(string)) + createOpts.LBMethod = lbMethod + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + p, err := pools.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack LB pool: %s", err) + } + log.Printf("[INFO] LB Pool ID: %s", p.ID) + + log.Printf("[DEBUG] Waiting for OpenStack LB pool (%s) to become available.", p.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForLBPoolActive(networkingClient, p.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + d.SetId(p.ID) + + if mIDs := resourcePoolMonitorIDsV1(d); mIDs != nil { + for _, mID := range mIDs { + _, err := pools.AssociateMonitor(networkingClient, p.ID, mID).Extract() + if err != nil { + return fmt.Errorf("Error associating monitor (%s) with OpenStack LB pool (%s): %s", mID, p.ID, err) + } + } + } + + if memberOpts := resourcePoolMembersV1(d); memberOpts != nil { + for _, memberOpt := range memberOpts { + _, err := members.Create(networkingClient, memberOpt).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack LB member: %s", err) + } + } + } + + return resourceLBPoolV1Read(d, meta) +} + +func resourceLBPoolV1Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + p, err := pools.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "LB pool") + } + + log.Printf("[DEBUG] Retrieved OpenStack LB Pool %s: %+v", d.Id(), p) + + d.Set("name", p.Name) + d.Set("protocol", p.Protocol) + d.Set("subnet_id", p.SubnetID) + d.Set("lb_method", p.LBMethod) + d.Set("lb_provider", p.Provider) + d.Set("tenant_id", p.TenantID) + d.Set("monitor_ids", p.MonitorIDs) + d.Set("member_ids", p.MemberIDs) + d.Set("region", GetRegion(d)) + + return nil +} + +func resourceLBPoolV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts pools.UpdateOpts + // If either option changed, update both. + // Gophercloud complains if one is empty. + if d.HasChange("name") || d.HasChange("lb_method") { + updateOpts.Name = d.Get("name").(string) + + lbMethod := resourceLBPoolV1DetermineLBMethod(d.Get("lb_method").(string)) + updateOpts.LBMethod = lbMethod + } + + log.Printf("[DEBUG] Updating OpenStack LB Pool %s with options: %+v", d.Id(), updateOpts) + + _, err = pools.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack LB Pool: %s", err) + } + + if d.HasChange("monitor_ids") { + oldMIDsRaw, newMIDsRaw := d.GetChange("monitor_ids") + oldMIDsSet, newMIDsSet := oldMIDsRaw.(*schema.Set), newMIDsRaw.(*schema.Set) + monitorsToAdd := newMIDsSet.Difference(oldMIDsSet) + monitorsToRemove := oldMIDsSet.Difference(newMIDsSet) + + log.Printf("[DEBUG] Monitors to add: %v", monitorsToAdd) + + log.Printf("[DEBUG] Monitors to remove: %v", monitorsToRemove) + + for _, m := range monitorsToAdd.List() { + _, err := pools.AssociateMonitor(networkingClient, d.Id(), m.(string)).Extract() + if err != nil { + return fmt.Errorf("Error associating monitor (%s) with OpenStack server (%s): %s", m.(string), d.Id(), err) + } + log.Printf("[DEBUG] Associated monitor (%s) with pool (%s)", m.(string), d.Id()) + } + + for _, m := range monitorsToRemove.List() { + _, err := pools.DisassociateMonitor(networkingClient, d.Id(), m.(string)).Extract() + if err != nil { + return fmt.Errorf("Error disassociating monitor (%s) from OpenStack server (%s): %s", m.(string), d.Id(), err) + } + log.Printf("[DEBUG] Disassociated monitor (%s) from pool (%s)", m.(string), d.Id()) + } + } + + if d.HasChange("member") { + oldMembersRaw, newMembersRaw := d.GetChange("member") + oldMembersSet, newMembersSet := oldMembersRaw.(*schema.Set), newMembersRaw.(*schema.Set) + membersToAdd := newMembersSet.Difference(oldMembersSet) + membersToRemove := oldMembersSet.Difference(newMembersSet) + + log.Printf("[DEBUG] Members to add: %v", membersToAdd) + + log.Printf("[DEBUG] Members to remove: %v", membersToRemove) + + for _, m := range membersToRemove.List() { + oldMember := resourcePoolMemberV1(d, m) + listOpts := members.ListOpts{ + PoolID: d.Id(), + Address: oldMember.Address, + ProtocolPort: oldMember.ProtocolPort, + } + err = members.List(networkingClient, listOpts).EachPage(func(page pagination.Page) (bool, error) { + extractedMembers, err := members.ExtractMembers(page) + if err != nil { + return false, err + } + for _, member := range extractedMembers { + err := members.Delete(networkingClient, member.ID).ExtractErr() + if err != nil { + return false, fmt.Errorf("Error deleting member (%s) from OpenStack LB pool (%s): %s", member.ID, d.Id(), err) + } + log.Printf("[DEBUG] Deleted member (%s) from pool (%s)", member.ID, d.Id()) + } + return true, nil + }) + } + + for _, m := range membersToAdd.List() { + createOpts := resourcePoolMemberV1(d, m) + newMember, err := members.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating LB member: %s", err) + } + log.Printf("[DEBUG] Created member (%s) in OpenStack LB pool (%s)", newMember.ID, d.Id()) + } + } + + return resourceLBPoolV1Read(d, meta) +} + +func resourceLBPoolV1Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + // Make sure all monitors are disassociated first + if v, ok := d.GetOk("monitor_ids"); ok { + if monitorIDList, ok := v.([]interface{}); ok { + for _, monitorID := range monitorIDList { + mID := monitorID.(string) + log.Printf("[DEBUG] Attempting to disassociate monitor %s from pool %s", mID, d.Id()) + if res := pools.DisassociateMonitor(networkingClient, d.Id(), mID); res.Err != nil { + return fmt.Errorf("Error disassociating monitor %s from pool %s: %s", mID, d.Id(), err) + } + } + } + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE", "PENDING_DELETE"}, + Target: []string{"DELETED"}, + Refresh: waitForLBPoolDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack LB Pool: %s", err) + } + + d.SetId("") + return nil +} + +func resourcePoolMonitorIDsV1(d *schema.ResourceData) []string { + mIDsRaw := d.Get("monitor_ids").(*schema.Set) + mIDs := make([]string, mIDsRaw.Len()) + for i, raw := range mIDsRaw.List() { + mIDs[i] = raw.(string) + } + return mIDs +} + +func resourcePoolMembersV1(d *schema.ResourceData) []members.CreateOpts { + memberOptsRaw := d.Get("member").(*schema.Set) + memberOpts := make([]members.CreateOpts, memberOptsRaw.Len()) + for i, raw := range memberOptsRaw.List() { + rawMap := raw.(map[string]interface{}) + memberOpts[i] = members.CreateOpts{ + TenantID: rawMap["tenant_id"].(string), + Address: rawMap["address"].(string), + ProtocolPort: rawMap["port"].(int), + PoolID: d.Id(), + } + } + return memberOpts +} + +func resourcePoolMemberV1(d *schema.ResourceData, raw interface{}) members.CreateOpts { + rawMap := raw.(map[string]interface{}) + return members.CreateOpts{ + TenantID: rawMap["tenant_id"].(string), + Address: rawMap["address"].(string), + ProtocolPort: rawMap["port"].(int), + PoolID: d.Id(), + } +} + +func resourceLBMemberV1Hash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["region"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["tenant_id"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["address"].(string))) + buf.WriteString(fmt.Sprintf("%d-", m["port"].(int))) + + return hashcode.String(buf.String()) +} + +func resourceLBPoolV1DetermineProtocol(v string) pools.LBProtocol { + var protocol pools.LBProtocol + switch v { + case "TCP": + protocol = pools.ProtocolTCP + case "HTTP": + protocol = pools.ProtocolHTTP + case "HTTPS": + protocol = pools.ProtocolHTTPS + } + + return protocol +} + +func resourceLBPoolV1DetermineLBMethod(v string) pools.LBMethod { + var lbMethod pools.LBMethod + switch v { + case "ROUND_ROBIN": + lbMethod = pools.LBMethodRoundRobin + case "LEAST_CONNECTIONS": + lbMethod = pools.LBMethodLeastConnections + } + + return lbMethod +} + +func waitForLBPoolActive(networkingClient *gophercloud.ServiceClient, poolId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + p, err := pools.Get(networkingClient, poolId).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack LB Pool: %+v", p) + if p.Status == "ACTIVE" { + return p, "ACTIVE", nil + } + + return p, p.Status, nil + } +} + +func waitForLBPoolDelete(networkingClient *gophercloud.ServiceClient, poolId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack LB Pool %s", poolId) + + p, err := pools.Get(networkingClient, poolId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LB Pool %s", poolId) + return p, "DELETED", nil + } + return p, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack LB Pool: %+v", p) + err = pools.Delete(networkingClient, poolId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LB Pool %s", poolId) + return p, "DELETED", nil + } + return p, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack LB Pool %s still active.", poolId) + return p, "ACTIVE", nil + } + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v2.go new file mode 100644 index 000000000..d1a602f53 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v2.go @@ -0,0 +1,350 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" +) + +func resourcePoolV2() *schema.Resource { + return &schema.Resource{ + Create: resourcePoolV2Create, + Read: resourcePoolV2Read, + Update: resourcePoolV2Update, + Delete: resourcePoolV2Delete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "TCP" && value != "HTTP" && value != "HTTPS" { + errors = append(errors, fmt.Errorf( + "Only 'TCP', 'HTTP', and 'HTTPS' are supported values for 'protocol'")) + } + return + }, + }, + + // One of loadbalancer_id or listener_id must be provided + "loadbalancer_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + // One of loadbalancer_id or listener_id must be provided + "listener_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "lb_method": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "ROUND_ROBIN" && value != "LEAST_CONNECTIONS" && value != "SOURCE_IP" { + errors = append(errors, fmt.Errorf( + "Only 'ROUND_ROBIN', 'LEAST_CONNECTIONS', and 'SOURCE_IP' are supported values for 'lb_method'")) + } + return + }, + }, + + "persistence": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "SOURCE_IP" && value != "HTTP_COOKIE" && value != "APP_COOKIE" { + errors = append(errors, fmt.Errorf( + "Only 'SOURCE_IP', 'HTTP_COOKIE', and 'APP_COOKIE' are supported values for 'persistence'")) + } + return + }, + }, + + "cookie_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + }, + }, + + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Default: true, + Optional: true, + }, + + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourcePoolV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + adminStateUp := d.Get("admin_state_up").(bool) + var persistence pools.SessionPersistence + if p, ok := d.GetOk("persistence"); ok { + pV := (p.([]interface{}))[0].(map[string]interface{}) + + persistence = pools.SessionPersistence{ + Type: pV["type"].(string), + CookieName: pV["cookie_name"].(string), + } + } + createOpts := pools.CreateOpts{ + TenantID: d.Get("tenant_id").(string), + Name: d.Get("name").(string), + Description: d.Get("description").(string), + Protocol: pools.Protocol(d.Get("protocol").(string)), + LoadbalancerID: d.Get("loadbalancer_id").(string), + ListenerID: d.Get("listener_id").(string), + LBMethod: pools.LBMethod(d.Get("lb_method").(string)), + AdminStateUp: &adminStateUp, + } + // Must omit if not set + if persistence != (pools.SessionPersistence{}) { + createOpts.Persistence = &persistence + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + + var pool *pools.Pool + err = resource.Retry(d.Timeout(schema.TimeoutCreate), func() *resource.RetryError { + var err error + log.Printf("[DEBUG] Attempting to create LBaaSV2 pool") + pool, err = pools.Create(networkingClient, createOpts).Extract() + if err != nil { + switch errCode := err.(type) { + case gophercloud.ErrDefault500: + log.Printf("[DEBUG] OpenStack LBaaSV2 pool is still creating.") + return resource.RetryableError(err) + case gophercloud.ErrUnexpectedResponseCode: + if errCode.Actual == 409 { + log.Printf("[DEBUG] OpenStack LBaaSV2 pool is still creating.") + return resource.RetryableError(err) + } + default: + return resource.NonRetryableError(err) + } + } + return nil + }) + + if err != nil { + return fmt.Errorf("Error creating OpenStack LBaaSV2 pool: %s", err) + } + + log.Printf("[INFO] pool ID: %s", pool.ID) + + log.Printf("[DEBUG] Waiting for Openstack LBaaSV2 pool (%s) to become available.", pool.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForPoolActive(networkingClient, pool.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + d.SetId(pool.ID) + + return resourcePoolV2Read(d, meta) +} + +func resourcePoolV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + pool, err := pools.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "LBV2 Pool") + } + + log.Printf("[DEBUG] Retrieved OpenStack LBaaSV2 Pool %s: %+v", d.Id(), pool) + + d.Set("lb_method", pool.LBMethod) + d.Set("protocol", pool.Protocol) + d.Set("description", pool.Description) + d.Set("tenant_id", pool.TenantID) + d.Set("admin_state_up", pool.AdminStateUp) + d.Set("name", pool.Name) + d.Set("id", pool.ID) + d.Set("persistence", pool.Persistence) + + return nil +} + +func resourcePoolV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts pools.UpdateOpts + if d.HasChange("lb_method") { + updateOpts.LBMethod = pools.LBMethod(d.Get("lb_method").(string)) + } + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + if d.HasChange("description") { + updateOpts.Description = d.Get("description").(string) + } + if d.HasChange("admin_state_up") { + asu := d.Get("admin_state_up").(bool) + updateOpts.AdminStateUp = &asu + } + + log.Printf("[DEBUG] Updating OpenStack LBaaSV2 Pool %s with options: %+v", d.Id(), updateOpts) + + _, err = pools.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack LBaaSV2 Pool: %s", err) + } + + return resourcePoolV2Read(d, meta) +} + +func resourcePoolV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE", "PENDING_DELETE"}, + Target: []string{"DELETED"}, + Refresh: waitForPoolDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack LBaaSV2 Pool: %s", err) + } + + d.SetId("") + return nil +} + +func waitForPoolActive(networkingClient *gophercloud.ServiceClient, poolID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + pool, err := pools.Get(networkingClient, poolID).Extract() + if err != nil { + return nil, "", err + } + + // The pool resource has no Status attribute, so a successful Get is the best we can do + log.Printf("[DEBUG] OpenStack LBaaSV2 Pool: %+v", pool) + return pool, "ACTIVE", nil + } +} + +func waitForPoolDelete(networkingClient *gophercloud.ServiceClient, poolID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack LBaaSV2 Pool %s", poolID) + + pool, err := pools.Get(networkingClient, poolID).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 Pool %s", poolID) + return pool, "DELETED", nil + } + return pool, "ACTIVE", err + } + + log.Printf("[DEBUG] Openstack LBaaSV2 Pool: %+v", pool) + err = pools.Delete(networkingClient, poolID).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LBaaSV2 Pool %s", poolID) + return pool, "DELETED", nil + } + + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + log.Printf("[DEBUG] OpenStack LBaaSV2 Pool (%s) is still in use.", poolID) + return pool, "ACTIVE", nil + } + } + + return pool, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack LBaaSV2 Pool %s still active.", poolID) + return pool, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_vip_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_vip_v1.go new file mode 100644 index 000000000..6e6d46d89 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_vip_v1.go @@ -0,0 +1,401 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceLBVipV1() *schema.Resource { + return &schema.Resource{ + Create: resourceLBVipV1Create, + Read: resourceLBVipV1Read, + Update: resourceLBVipV1Update, + Delete: resourceLBVipV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "subnet_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "pool_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: false, + }, + "persistence": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: false, + }, + "conn_limit": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: false, + }, + "port_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + ForceNew: false, + }, + "floating_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: false, + }, + }, + } +} + +func resourceLBVipV1Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := vips.CreateOpts{ + Name: d.Get("name").(string), + SubnetID: d.Get("subnet_id").(string), + Protocol: d.Get("protocol").(string), + ProtocolPort: d.Get("port").(int), + PoolID: d.Get("pool_id").(string), + TenantID: d.Get("tenant_id").(string), + Address: d.Get("address").(string), + Description: d.Get("description").(string), + Persistence: resourceVipPersistenceV1(d), + ConnLimit: gophercloud.MaybeInt(d.Get("conn_limit").(int)), + } + + asu := d.Get("admin_state_up").(bool) + createOpts.AdminStateUp = &asu + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + p, err := vips.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack LB VIP: %s", err) + } + log.Printf("[INFO] LB VIP ID: %s", p.ID) + + log.Printf("[DEBUG] Waiting for OpenStack LB VIP (%s) to become available.", p.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForLBVIPActive(networkingClient, p.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + floatingIP := d.Get("floating_ip").(string) + if floatingIP != "" { + lbVipV1AssignFloatingIP(floatingIP, p.PortID, networkingClient) + } + + d.SetId(p.ID) + + return resourceLBVipV1Read(d, meta) +} + +func resourceLBVipV1Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + p, err := vips.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "LB VIP") + } + + log.Printf("[DEBUG] Retrieved OpenStack LB VIP %s: %+v", d.Id(), p) + + d.Set("name", p.Name) + d.Set("subnet_id", p.SubnetID) + d.Set("protocol", p.Protocol) + d.Set("port", p.ProtocolPort) + d.Set("pool_id", p.PoolID) + d.Set("port_id", p.PortID) + d.Set("tenant_id", p.TenantID) + d.Set("address", p.Address) + d.Set("description", p.Description) + d.Set("conn_limit", p.ConnLimit) + d.Set("admin_state_up", p.AdminStateUp) + + // Set the persistence method being used + persistence := make(map[string]interface{}) + if p.Persistence.Type != "" { + persistence["type"] = p.Persistence.Type + } + if p.Persistence.CookieName != "" { + persistence["cookie_name"] = p.Persistence.CookieName + } + d.Set("persistence", persistence) + + d.Set("region", GetRegion(d)) + + return nil +} + +func resourceLBVipV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts vips.UpdateOpts + if d.HasChange("name") { + v := d.Get("name").(string) + updateOpts.Name = &v + } + + if d.HasChange("pool_id") { + v := d.Get("pool_id").(string) + updateOpts.PoolID = &v + } + + if d.HasChange("description") { + v := d.Get("description").(string) + updateOpts.Description = &v + } + + if d.HasChange("conn_limit") { + updateOpts.ConnLimit = gophercloud.MaybeInt(d.Get("conn_limit").(int)) + } + + if d.HasChange("floating_ip") { + portID := d.Get("port_id").(string) + + // Searching for a floating IP assigned to the VIP + listOpts := floatingips.ListOpts{ + PortID: portID, + } + page, err := floatingips.List(networkingClient, listOpts).AllPages() + if err != nil { + return err + } + + fips, err := floatingips.ExtractFloatingIPs(page) + if err != nil { + return err + } + + // If a floating IP is found we unassign it + if len(fips) == 1 { + portID := "" + updateOpts := floatingips.UpdateOpts{ + PortID: &portID, + } + if err = floatingips.Update(networkingClient, fips[0].ID, updateOpts).Err; err != nil { + return err + } + } + + // Assign the updated floating IP + floatingIP := d.Get("floating_ip").(string) + if floatingIP != "" { + lbVipV1AssignFloatingIP(floatingIP, portID, networkingClient) + } + } + + if d.HasChange("admin_state_up") { + asu := d.Get("admin_state_up").(bool) + updateOpts.AdminStateUp = &asu + } + + // Persistence has to be included, even if it hasn't changed. + updateOpts.Persistence = resourceVipPersistenceV1(d) + + log.Printf("[DEBUG] Updating OpenStack LB VIP %s with options: %+v", d.Id(), updateOpts) + + _, err = vips.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack LB VIP: %s", err) + } + + return resourceLBVipV1Read(d, meta) +} + +func resourceLBVipV1Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE", "PENDING_DELETE"}, + Target: []string{"DELETED"}, + Refresh: waitForLBVIPDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack LB VIP: %s", err) + } + + d.SetId("") + return nil +} + +func resourceVipPersistenceV1(d *schema.ResourceData) *vips.SessionPersistence { + rawP := d.Get("persistence").(interface{}) + rawMap := rawP.(map[string]interface{}) + if len(rawMap) != 0 { + p := vips.SessionPersistence{} + if t, ok := rawMap["type"]; ok { + p.Type = t.(string) + } + if c, ok := rawMap["cookie_name"]; ok { + p.CookieName = c.(string) + } + return &p + } + return nil +} + +func lbVipV1AssignFloatingIP(floatingIP, portID string, networkingClient *gophercloud.ServiceClient) error { + log.Printf("[DEBUG] Assigning floating IP %s to VIP %s", floatingIP, portID) + + listOpts := floatingips.ListOpts{ + FloatingIP: floatingIP, + } + page, err := floatingips.List(networkingClient, listOpts).AllPages() + if err != nil { + return err + } + + fips, err := floatingips.ExtractFloatingIPs(page) + if err != nil { + return err + } + if len(fips) != 1 { + return fmt.Errorf("Unable to retrieve floating IP '%s'", floatingIP) + } + + updateOpts := floatingips.UpdateOpts{ + PortID: &portID, + } + if err = floatingips.Update(networkingClient, fips[0].ID, updateOpts).Err; err != nil { + return err + } + + return nil +} + +func waitForLBVIPActive(networkingClient *gophercloud.ServiceClient, vipId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + p, err := vips.Get(networkingClient, vipId).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack LB VIP: %+v", p) + if p.Status == "ACTIVE" { + return p, "ACTIVE", nil + } + + return p, p.Status, nil + } +} + +func waitForLBVIPDelete(networkingClient *gophercloud.ServiceClient, vipId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack LB VIP %s", vipId) + + p, err := vips.Get(networkingClient, vipId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LB VIP %s", vipId) + return p, "DELETED", nil + } + return p, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack LB VIP: %+v", p) + err = vips.Delete(networkingClient, vipId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LB VIP %s", vipId) + return p, "DELETED", nil + } + return p, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack LB VIP %s still active.", vipId) + return p, "ACTIVE", nil + } + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_floatingip_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_floatingip_v2.go new file mode 100644 index 000000000..9712dd156 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_floatingip_v2.go @@ -0,0 +1,298 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/pagination" +) + +func resourceNetworkingFloatingIPV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkFloatingIPV2Create, + Read: resourceNetworkFloatingIPV2Read, + Update: resourceNetworkFloatingIPV2Update, + Delete: resourceNetworkFloatingIPV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "pool": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_POOL_NAME", nil), + }, + "port_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "fixed_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceNetworkFloatingIPV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + poolID, err := getNetworkID(d, meta, d.Get("pool").(string)) + if err != nil { + return fmt.Errorf("Error retrieving floating IP pool name: %s", err) + } + if len(poolID) == 0 { + return fmt.Errorf("No network found with name: %s", d.Get("pool").(string)) + } + createOpts := FloatingIPCreateOpts{ + floatingips.CreateOpts{ + FloatingNetworkID: poolID, + PortID: d.Get("port_id").(string), + TenantID: d.Get("tenant_id").(string), + FixedIP: d.Get("fixed_ip").(string), + }, + MapValueSpecs(d), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + floatingIP, err := floatingips.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error allocating floating IP: %s", err) + } + + log.Printf("[DEBUG] Waiting for OpenStack Neutron Floating IP (%s) to become available.", floatingIP.ID) + + stateConf := &resource.StateChangeConf{ + Target: []string{"ACTIVE"}, + Refresh: waitForFloatingIPActive(networkingClient, floatingIP.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + d.SetId(floatingIP.ID) + + return resourceNetworkFloatingIPV2Read(d, meta) +} + +func resourceNetworkFloatingIPV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + floatingIP, err := floatingips.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "floating IP") + } + + d.Set("address", floatingIP.FloatingIP) + d.Set("port_id", floatingIP.PortID) + d.Set("fixed_ip", floatingIP.FixedIP) + poolName, err := getNetworkName(d, meta, floatingIP.FloatingNetworkID) + if err != nil { + return fmt.Errorf("Error retrieving floating IP pool name: %s", err) + } + d.Set("pool", poolName) + d.Set("tenant_id", floatingIP.TenantID) + + d.Set("region", GetRegion(d)) + + return nil +} + +func resourceNetworkFloatingIPV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + var updateOpts floatingips.UpdateOpts + + if d.HasChange("port_id") { + portID := d.Get("port_id").(string) + updateOpts.PortID = &portID + } + + log.Printf("[DEBUG] Update Options: %#v", updateOpts) + + _, err = floatingips.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating floating IP: %s", err) + } + + return resourceNetworkFloatingIPV2Read(d, meta) +} + +func resourceNetworkFloatingIPV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: waitForFloatingIPDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Floating IP: %s", err) + } + + d.SetId("") + return nil +} + +func getNetworkID(d *schema.ResourceData, meta interface{}, networkName string) (string, error) { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return "", fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + opts := networks.ListOpts{Name: networkName} + pager := networks.List(networkingClient, opts) + networkID := "" + + err = pager.EachPage(func(page pagination.Page) (bool, error) { + networkList, err := networks.ExtractNetworks(page) + if err != nil { + return false, err + } + + for _, n := range networkList { + if n.Name == networkName { + networkID = n.ID + return false, nil + } + } + + return true, nil + }) + + return networkID, err +} + +func getNetworkName(d *schema.ResourceData, meta interface{}, networkID string) (string, error) { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return "", fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + opts := networks.ListOpts{ID: networkID} + pager := networks.List(networkingClient, opts) + networkName := "" + + err = pager.EachPage(func(page pagination.Page) (bool, error) { + networkList, err := networks.ExtractNetworks(page) + if err != nil { + return false, err + } + + for _, n := range networkList { + if n.ID == networkID { + networkName = n.Name + return false, nil + } + } + + return true, nil + }) + + return networkName, err +} + +func waitForFloatingIPActive(networkingClient *gophercloud.ServiceClient, fId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + f, err := floatingips.Get(networkingClient, fId).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack Neutron Floating IP: %+v", f) + if f.Status == "DOWN" || f.Status == "ACTIVE" { + return f, "ACTIVE", nil + } + + return f, "", nil + } +} + +func waitForFloatingIPDelete(networkingClient *gophercloud.ServiceClient, fId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack Floating IP %s.\n", fId) + + f, err := floatingips.Get(networkingClient, fId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Floating IP %s", fId) + return f, "DELETED", nil + } + return f, "ACTIVE", err + } + + err = floatingips.Delete(networkingClient, fId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Floating IP %s", fId) + return f, "DELETED", nil + } + return f, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack Floating IP %s still active.\n", fId) + return f, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_network_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_network_v2.go new file mode 100644 index 000000000..81e8b6379 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_network_v2.go @@ -0,0 +1,326 @@ +package openstack + +import ( + "fmt" + "log" + "strconv" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" +) + +func resourceNetworkingNetworkV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingNetworkV2Create, + Read: resourceNetworkingNetworkV2Read, + Update: resourceNetworkingNetworkV2Update, + Delete: resourceNetworkingNetworkV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Computed: true, + }, + "shared": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Computed: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "segments": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "physical_network": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "network_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "segmentation_id": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceNetworkingNetworkV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := NetworkCreateOpts{ + networks.CreateOpts{ + Name: d.Get("name").(string), + TenantID: d.Get("tenant_id").(string), + }, + MapValueSpecs(d), + } + + asuRaw := d.Get("admin_state_up").(string) + if asuRaw != "" { + asu, err := strconv.ParseBool(asuRaw) + if err != nil { + return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") + } + createOpts.AdminStateUp = &asu + } + + sharedRaw := d.Get("shared").(string) + if sharedRaw != "" { + shared, err := strconv.ParseBool(sharedRaw) + if err != nil { + return fmt.Errorf("shared, if provided, must be either 'true' or 'false': %v", err) + } + createOpts.Shared = &shared + } + + segments := resourceNetworkingNetworkV2Segments(d) + + n := &networks.Network{} + if len(segments) > 0 { + providerCreateOpts := provider.CreateOptsExt{ + CreateOptsBuilder: createOpts, + Segments: segments, + } + log.Printf("[DEBUG] Create Options: %#v", providerCreateOpts) + n, err = networks.Create(networkingClient, providerCreateOpts).Extract() + } else { + log.Printf("[DEBUG] Create Options: %#v", createOpts) + n, err = networks.Create(networkingClient, createOpts).Extract() + } + + if err != nil { + return fmt.Errorf("Error creating OpenStack Neutron network: %s", err) + } + + log.Printf("[INFO] Network ID: %s", n.ID) + + log.Printf("[DEBUG] Waiting for Network (%s) to become available", n.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"BUILD"}, + Target: []string{"ACTIVE"}, + Refresh: waitForNetworkActive(networkingClient, n.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + d.SetId(n.ID) + + return resourceNetworkingNetworkV2Read(d, meta) +} + +func resourceNetworkingNetworkV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + n, err := networks.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "network") + } + + log.Printf("[DEBUG] Retrieved Network %s: %+v", d.Id(), n) + + d.Set("name", n.Name) + d.Set("admin_state_up", strconv.FormatBool(n.AdminStateUp)) + d.Set("shared", strconv.FormatBool(n.Shared)) + d.Set("tenant_id", n.TenantID) + d.Set("region", GetRegion(d)) + + return nil +} + +func resourceNetworkingNetworkV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts networks.UpdateOpts + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + if d.HasChange("admin_state_up") { + asuRaw := d.Get("admin_state_up").(string) + if asuRaw != "" { + asu, err := strconv.ParseBool(asuRaw) + if err != nil { + return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") + } + updateOpts.AdminStateUp = &asu + } + } + if d.HasChange("shared") { + sharedRaw := d.Get("shared").(string) + if sharedRaw != "" { + shared, err := strconv.ParseBool(sharedRaw) + if err != nil { + return fmt.Errorf("shared, if provided, must be either 'true' or 'false': %v", err) + } + updateOpts.Shared = &shared + } + } + + log.Printf("[DEBUG] Updating Network %s with options: %+v", d.Id(), updateOpts) + + _, err = networks.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack Neutron Network: %s", err) + } + + return resourceNetworkingNetworkV2Read(d, meta) +} + +func resourceNetworkingNetworkV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: waitForNetworkDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Network: %s", err) + } + + d.SetId("") + return nil +} + +func resourceNetworkingNetworkV2Segments(d *schema.ResourceData) (providerSegments []provider.Segment) { + segments := d.Get("segments").([]interface{}) + for _, v := range segments { + var segment provider.Segment + segmentMap := v.(map[string]interface{}) + + if v, ok := segmentMap["physical_network"].(string); ok { + segment.PhysicalNetwork = v + } + + if v, ok := segmentMap["network_type"].(string); ok { + segment.NetworkType = v + } + + if v, ok := segmentMap["segmentation_id"].(int); ok { + segment.SegmentationID = v + } + + providerSegments = append(providerSegments, segment) + } + return +} + +func waitForNetworkActive(networkingClient *gophercloud.ServiceClient, networkId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + n, err := networks.Get(networkingClient, networkId).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack Neutron Network: %+v", n) + if n.Status == "DOWN" || n.Status == "ACTIVE" { + return n, "ACTIVE", nil + } + + return n, n.Status, nil + } +} + +func waitForNetworkDelete(networkingClient *gophercloud.ServiceClient, networkId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack Network %s.\n", networkId) + + n, err := networks.Get(networkingClient, networkId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Network %s", networkId) + return n, "DELETED", nil + } + return n, "ACTIVE", err + } + + err = networks.Delete(networkingClient, networkId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Network %s", networkId) + return n, "DELETED", nil + } + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + return n, "ACTIVE", nil + } + } + return n, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack Network %s still active.\n", networkId) + return n, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_port_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_port_v2.go new file mode 100644 index 000000000..4be432935 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_port_v2.go @@ -0,0 +1,404 @@ +package openstack + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" +) + +func resourceNetworkingPortV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingPortV2Create, + Read: resourceNetworkingPortV2Read, + Update: resourceNetworkingPortV2Update, + Delete: resourceNetworkingPortV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "network_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: false, + Computed: true, + }, + "mac_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "device_owner": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "security_group_ids": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: false, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "device_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "fixed_ip": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "subnet_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "allowed_address_pairs": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: false, + Computed: true, + Set: allowedAddressPairsHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "mac_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + "all_fixed_ips": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceNetworkingPortV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := PortCreateOpts{ + ports.CreateOpts{ + Name: d.Get("name").(string), + AdminStateUp: resourcePortAdminStateUpV2(d), + NetworkID: d.Get("network_id").(string), + MACAddress: d.Get("mac_address").(string), + TenantID: d.Get("tenant_id").(string), + DeviceOwner: d.Get("device_owner").(string), + SecurityGroups: resourcePortSecurityGroupsV2(d), + DeviceID: d.Get("device_id").(string), + FixedIPs: resourcePortFixedIpsV2(d), + AllowedAddressPairs: resourceAllowedAddressPairsV2(d), + }, + MapValueSpecs(d), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + p, err := ports.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack Neutron network: %s", err) + } + log.Printf("[INFO] Network ID: %s", p.ID) + + log.Printf("[DEBUG] Waiting for OpenStack Neutron Port (%s) to become available.", p.ID) + + stateConf := &resource.StateChangeConf{ + Target: []string{"ACTIVE"}, + Refresh: waitForNetworkPortActive(networkingClient, p.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + d.SetId(p.ID) + + return resourceNetworkingPortV2Read(d, meta) +} + +func resourceNetworkingPortV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + p, err := ports.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "port") + } + + log.Printf("[DEBUG] Retrieved Port %s: %+v", d.Id(), p) + + d.Set("name", p.Name) + d.Set("admin_state_up", p.AdminStateUp) + d.Set("network_id", p.NetworkID) + d.Set("mac_address", p.MACAddress) + d.Set("tenant_id", p.TenantID) + d.Set("device_owner", p.DeviceOwner) + d.Set("security_group_ids", p.SecurityGroups) + d.Set("device_id", p.DeviceID) + + // Create a slice of all returned Fixed IPs. + // This will be in the order returned by the API, + // which is usually alpha-numeric. + var ips []string + for _, ipObject := range p.FixedIPs { + ips = append(ips, ipObject.IPAddress) + } + d.Set("all_fixed_ips", ips) + + // Convert AllowedAddressPairs to list of map + var pairs []map[string]interface{} + for _, pairObject := range p.AllowedAddressPairs { + pair := make(map[string]interface{}) + pair["ip_address"] = pairObject.IPAddress + pair["mac_address"] = pairObject.MACAddress + pairs = append(pairs, pair) + } + d.Set("allowed_address_pairs", pairs) + + d.Set("region", GetRegion(d)) + + return nil +} + +func resourceNetworkingPortV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + // security_group_ids and allowed_address_pairs are able to send empty arrays + // to denote the removal of each. But their default zero-value is translated + // to "null", which has been reported to cause problems in vendor-modified + // OpenStack clouds. Therefore, we must set them in each request update. + updateOpts := ports.UpdateOpts{ + AllowedAddressPairs: resourceAllowedAddressPairsV2(d), + SecurityGroups: resourcePortSecurityGroupsV2(d), + } + + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + + if d.HasChange("admin_state_up") { + updateOpts.AdminStateUp = resourcePortAdminStateUpV2(d) + } + + if d.HasChange("device_owner") { + updateOpts.DeviceOwner = d.Get("device_owner").(string) + } + + if d.HasChange("device_id") { + updateOpts.DeviceID = d.Get("device_id").(string) + } + + if d.HasChange("fixed_ip") { + updateOpts.FixedIPs = resourcePortFixedIpsV2(d) + } + + log.Printf("[DEBUG] Updating Port %s with options: %+v", d.Id(), updateOpts) + + _, err = ports.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack Neutron Network: %s", err) + } + + return resourceNetworkingPortV2Read(d, meta) +} + +func resourceNetworkingPortV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: waitForNetworkPortDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Network: %s", err) + } + + d.SetId("") + return nil +} + +func resourcePortSecurityGroupsV2(d *schema.ResourceData) []string { + rawSecurityGroups := d.Get("security_group_ids").(*schema.Set) + groups := make([]string, rawSecurityGroups.Len()) + for i, raw := range rawSecurityGroups.List() { + groups[i] = raw.(string) + } + return groups +} + +func resourcePortFixedIpsV2(d *schema.ResourceData) interface{} { + rawIP := d.Get("fixed_ip").([]interface{}) + + if len(rawIP) == 0 { + return nil + } + + ip := make([]ports.IP, len(rawIP)) + for i, raw := range rawIP { + rawMap := raw.(map[string]interface{}) + ip[i] = ports.IP{ + SubnetID: rawMap["subnet_id"].(string), + IPAddress: rawMap["ip_address"].(string), + } + } + return ip +} + +func resourceAllowedAddressPairsV2(d *schema.ResourceData) []ports.AddressPair { + // ports.AddressPair + rawPairs := d.Get("allowed_address_pairs").(*schema.Set).List() + + pairs := make([]ports.AddressPair, len(rawPairs)) + for i, raw := range rawPairs { + rawMap := raw.(map[string]interface{}) + pairs[i] = ports.AddressPair{ + IPAddress: rawMap["ip_address"].(string), + MACAddress: rawMap["mac_address"].(string), + } + } + return pairs +} + +func resourcePortAdminStateUpV2(d *schema.ResourceData) *bool { + value := false + + if raw, ok := d.GetOk("admin_state_up"); ok && raw == true { + value = true + } + + return &value +} + +func allowedAddressPairsHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s", m["ip_address"].(string))) + + return hashcode.String(buf.String()) +} + +func waitForNetworkPortActive(networkingClient *gophercloud.ServiceClient, portId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + p, err := ports.Get(networkingClient, portId).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack Neutron Port: %+v", p) + if p.Status == "DOWN" || p.Status == "ACTIVE" { + return p, "ACTIVE", nil + } + + return p, p.Status, nil + } +} + +func waitForNetworkPortDelete(networkingClient *gophercloud.ServiceClient, portId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack Neutron Port %s", portId) + + p, err := ports.Get(networkingClient, portId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Port %s", portId) + return p, "DELETED", nil + } + return p, "ACTIVE", err + } + + err = ports.Delete(networkingClient, portId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Port %s", portId) + return p, "DELETED", nil + } + return p, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack Port %s still active.\n", portId) + return p, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_interface_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_interface_v2.go new file mode 100644 index 000000000..4a4ae8685 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_interface_v2.go @@ -0,0 +1,189 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" +) + +func resourceNetworkingRouterInterfaceV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingRouterInterfaceV2Create, + Read: resourceNetworkingRouterInterfaceV2Read, + Delete: resourceNetworkingRouterInterfaceV2Delete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "router_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "subnet_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "port_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceNetworkingRouterInterfaceV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := routers.AddInterfaceOpts{ + SubnetID: d.Get("subnet_id").(string), + PortID: d.Get("port_id").(string), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + n, err := routers.AddInterface(networkingClient, d.Get("router_id").(string), createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack Neutron router interface: %s", err) + } + log.Printf("[INFO] Router interface Port ID: %s", n.PortID) + + log.Printf("[DEBUG] Waiting for Router Interface (%s) to become available", n.PortID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"BUILD", "PENDING_CREATE", "PENDING_UPDATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForRouterInterfaceActive(networkingClient, n.PortID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + d.SetId(n.PortID) + + return resourceNetworkingRouterInterfaceV2Read(d, meta) +} + +func resourceNetworkingRouterInterfaceV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + n, err := ports.Get(networkingClient, d.Id()).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving OpenStack Neutron Router Interface: %s", err) + } + + log.Printf("[DEBUG] Retrieved Router Interface %s: %+v", d.Id(), n) + + return nil +} + +func resourceNetworkingRouterInterfaceV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: waitForRouterInterfaceDelete(networkingClient, d), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Router Interface: %s", err) + } + + d.SetId("") + return nil +} + +func waitForRouterInterfaceActive(networkingClient *gophercloud.ServiceClient, rId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + r, err := ports.Get(networkingClient, rId).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack Neutron Router Interface: %+v", r) + return r, r.Status, nil + } +} + +func waitForRouterInterfaceDelete(networkingClient *gophercloud.ServiceClient, d *schema.ResourceData) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + routerId := d.Get("router_id").(string) + routerInterfaceId := d.Id() + + log.Printf("[DEBUG] Attempting to delete OpenStack Router Interface %s.", routerInterfaceId) + + removeOpts := routers.RemoveInterfaceOpts{ + SubnetID: d.Get("subnet_id").(string), + PortID: d.Get("port_id").(string), + } + + r, err := ports.Get(networkingClient, routerInterfaceId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Router Interface %s", routerInterfaceId) + return r, "DELETED", nil + } + return r, "ACTIVE", err + } + + _, err = routers.RemoveInterface(networkingClient, routerId, removeOpts).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Router Interface %s.", routerInterfaceId) + return r, "DELETED", nil + } + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + log.Printf("[DEBUG] Router Interface %s is still in use.", routerInterfaceId) + return r, "ACTIVE", nil + } + } + + return r, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack Router Interface %s is still active.", routerInterfaceId) + return r, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_route_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_route_v2.go new file mode 100644 index 000000000..332017ac6 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_route_v2.go @@ -0,0 +1,202 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" +) + +func resourceNetworkingRouterRouteV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingRouterRouteV2Create, + Read: resourceNetworkingRouterRouteV2Read, + Delete: resourceNetworkingRouterRouteV2Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "router_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "destination_cidr": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "next_hop": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceNetworkingRouterRouteV2Create(d *schema.ResourceData, meta interface{}) error { + + routerId := d.Get("router_id").(string) + osMutexKV.Lock(routerId) + defer osMutexKV.Unlock(routerId) + + var destCidr string = d.Get("destination_cidr").(string) + var nextHop string = d.Get("next_hop").(string) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + n, err := routers.Get(networkingClient, routerId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving OpenStack Neutron Router: %s", err) + } + + var updateOpts routers.UpdateOpts + var routeExists bool = false + + var rts []routers.Route = n.Routes + for _, r := range rts { + + if r.DestinationCIDR == destCidr && r.NextHop == nextHop { + routeExists = true + break + } + } + + if !routeExists { + + if destCidr != "" && nextHop != "" { + r := routers.Route{DestinationCIDR: destCidr, NextHop: nextHop} + log.Printf( + "[INFO] Adding route %s", r) + rts = append(rts, r) + } + + updateOpts.Routes = rts + + log.Printf("[DEBUG] Updating Router %s with options: %+v", routerId, updateOpts) + + _, err = routers.Update(networkingClient, routerId, updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack Neutron Router: %s", err) + } + d.SetId(fmt.Sprintf("%s-route-%s-%s", routerId, destCidr, nextHop)) + + } else { + log.Printf("[DEBUG] Router %s has route already", routerId) + } + + return resourceNetworkingRouterRouteV2Read(d, meta) +} + +func resourceNetworkingRouterRouteV2Read(d *schema.ResourceData, meta interface{}) error { + + routerId := d.Get("router_id").(string) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + n, err := routers.Get(networkingClient, routerId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving OpenStack Neutron Router: %s", err) + } + + log.Printf("[DEBUG] Retrieved Router %s: %+v", routerId, n) + + var destCidr string = d.Get("destination_cidr").(string) + var nextHop string = d.Get("next_hop").(string) + + d.Set("next_hop", "") + d.Set("destination_cidr", "") + + for _, r := range n.Routes { + + if r.DestinationCIDR == destCidr && r.NextHop == nextHop { + d.Set("destination_cidr", destCidr) + d.Set("next_hop", nextHop) + break + } + } + + return nil +} + +func resourceNetworkingRouterRouteV2Delete(d *schema.ResourceData, meta interface{}) error { + + routerId := d.Get("router_id").(string) + osMutexKV.Lock(routerId) + defer osMutexKV.Unlock(routerId) + + config := meta.(*Config) + + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + n, err := routers.Get(networkingClient, routerId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return nil + } + + return fmt.Errorf("Error retrieving OpenStack Neutron Router: %s", err) + } + + var updateOpts routers.UpdateOpts + + var destCidr string = d.Get("destination_cidr").(string) + var nextHop string = d.Get("next_hop").(string) + + var oldRts []routers.Route = n.Routes + var newRts []routers.Route + + for _, r := range oldRts { + + if r.DestinationCIDR != destCidr || r.NextHop != nextHop { + newRts = append(newRts, r) + } + } + + if len(oldRts) != len(newRts) { + r := routers.Route{DestinationCIDR: destCidr, NextHop: nextHop} + log.Printf( + "[INFO] Deleting route %s", r) + updateOpts.Routes = newRts + + log.Printf("[DEBUG] Updating Router %s with options: %+v", routerId, updateOpts) + + _, err = routers.Update(networkingClient, routerId, updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack Neutron Router: %s", err) + } + } else { + return fmt.Errorf("Route did not exist already") + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_v2.go new file mode 100644 index 000000000..d979a53e6 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_v2.go @@ -0,0 +1,257 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" +) + +func resourceNetworkingRouterV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingRouterV2Create, + Read: resourceNetworkingRouterV2Read, + Update: resourceNetworkingRouterV2Update, + Delete: resourceNetworkingRouterV2Delete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: false, + Computed: true, + }, + "distributed": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Computed: true, + }, + "external_gateway": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceNetworkingRouterV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := RouterCreateOpts{ + routers.CreateOpts{ + Name: d.Get("name").(string), + TenantID: d.Get("tenant_id").(string), + }, + MapValueSpecs(d), + } + + if asuRaw, ok := d.GetOk("admin_state_up"); ok { + asu := asuRaw.(bool) + createOpts.AdminStateUp = &asu + } + + if dRaw, ok := d.GetOk("distributed"); ok { + d := dRaw.(bool) + createOpts.Distributed = &d + } + + externalGateway := d.Get("external_gateway").(string) + if externalGateway != "" { + gatewayInfo := routers.GatewayInfo{ + NetworkID: externalGateway, + } + createOpts.GatewayInfo = &gatewayInfo + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + n, err := routers.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack Neutron router: %s", err) + } + log.Printf("[INFO] Router ID: %s", n.ID) + + log.Printf("[DEBUG] Waiting for OpenStack Neutron Router (%s) to become available", n.ID) + stateConf := &resource.StateChangeConf{ + Pending: []string{"BUILD", "PENDING_CREATE", "PENDING_UPDATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForRouterActive(networkingClient, n.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + d.SetId(n.ID) + + return resourceNetworkingRouterV2Read(d, meta) +} + +func resourceNetworkingRouterV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + n, err := routers.Get(networkingClient, d.Id()).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving OpenStack Neutron Router: %s", err) + } + + log.Printf("[DEBUG] Retrieved Router %s: %+v", d.Id(), n) + + d.Set("name", n.Name) + d.Set("admin_state_up", n.AdminStateUp) + d.Set("distributed", n.Distributed) + d.Set("tenant_id", n.TenantID) + d.Set("external_gateway", n.GatewayInfo.NetworkID) + + return nil +} + +func resourceNetworkingRouterV2Update(d *schema.ResourceData, meta interface{}) error { + routerId := d.Id() + osMutexKV.Lock(routerId) + defer osMutexKV.Unlock(routerId) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts routers.UpdateOpts + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + if d.HasChange("admin_state_up") { + asu := d.Get("admin_state_up").(bool) + updateOpts.AdminStateUp = &asu + } + if d.HasChange("external_gateway") { + externalGateway := d.Get("external_gateway").(string) + if externalGateway != "" { + gatewayInfo := routers.GatewayInfo{ + NetworkID: externalGateway, + } + updateOpts.GatewayInfo = &gatewayInfo + } + } + + log.Printf("[DEBUG] Updating Router %s with options: %+v", d.Id(), updateOpts) + + _, err = routers.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack Neutron Router: %s", err) + } + + return resourceNetworkingRouterV2Read(d, meta) +} + +func resourceNetworkingRouterV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: waitForRouterDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Router: %s", err) + } + + d.SetId("") + return nil +} + +func waitForRouterActive(networkingClient *gophercloud.ServiceClient, routerId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + r, err := routers.Get(networkingClient, routerId).Extract() + if err != nil { + return nil, r.Status, err + } + + log.Printf("[DEBUG] OpenStack Neutron Router: %+v", r) + return r, r.Status, nil + } +} + +func waitForRouterDelete(networkingClient *gophercloud.ServiceClient, routerId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack Router %s.\n", routerId) + + r, err := routers.Get(networkingClient, routerId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Router %s", routerId) + return r, "DELETED", nil + } + return r, "ACTIVE", err + } + + err = routers.Delete(networkingClient, routerId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Router %s", routerId) + return r, "DELETED", nil + } + return r, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack Router %s still active.\n", routerId) + return r, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_rule_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_rule_v2.go new file mode 100644 index 000000000..6f5464fed --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_rule_v2.go @@ -0,0 +1,316 @@ +package openstack + +import ( + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" +) + +func resourceNetworkingSecGroupRuleV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingSecGroupRuleV2Create, + Read: resourceNetworkingSecGroupRuleV2Read, + Delete: resourceNetworkingSecGroupRuleV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "direction": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "ethertype": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "port_range_min": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + }, + "port_range_max": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + }, + "protocol": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "remote_group_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "remote_ip_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + StateFunc: func(v interface{}) string { + return strings.ToLower(v.(string)) + }, + }, + "security_group_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + }, + } +} + +func resourceNetworkingSecGroupRuleV2Create(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + portRangeMin := d.Get("port_range_min").(int) + portRangeMax := d.Get("port_range_max").(int) + protocol := d.Get("protocol").(string) + + if protocol == "" { + if portRangeMin != 0 || portRangeMax != 0 { + return fmt.Errorf("A protocol must be specified when using port_range_min and port_range_max") + } + } + + opts := rules.CreateOpts{ + SecGroupID: d.Get("security_group_id").(string), + PortRangeMin: d.Get("port_range_min").(int), + PortRangeMax: d.Get("port_range_max").(int), + RemoteGroupID: d.Get("remote_group_id").(string), + RemoteIPPrefix: d.Get("remote_ip_prefix").(string), + TenantID: d.Get("tenant_id").(string), + } + + if v, ok := d.GetOk("direction"); ok { + direction := resourceNetworkingSecGroupRuleV2DetermineDirection(v.(string)) + opts.Direction = direction + } + + if v, ok := d.GetOk("ethertype"); ok { + ethertype := resourceNetworkingSecGroupRuleV2DetermineEtherType(v.(string)) + opts.EtherType = ethertype + } + + if v, ok := d.GetOk("protocol"); ok { + protocol := resourceNetworkingSecGroupRuleV2DetermineProtocol(v.(string)) + opts.Protocol = protocol + } + + log.Printf("[DEBUG] Create OpenStack Neutron security group: %#v", opts) + + security_group_rule, err := rules.Create(networkingClient, opts).Extract() + if err != nil { + return err + } + + log.Printf("[DEBUG] OpenStack Neutron Security Group Rule created: %#v", security_group_rule) + + d.SetId(security_group_rule.ID) + + return resourceNetworkingSecGroupRuleV2Read(d, meta) +} + +func resourceNetworkingSecGroupRuleV2Read(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Retrieve information about security group rule: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + security_group_rule, err := rules.Get(networkingClient, d.Id()).Extract() + + if err != nil { + return CheckDeleted(d, err, "OpenStack Security Group Rule") + } + + d.Set("direction", security_group_rule.Direction) + d.Set("ethertype", security_group_rule.EtherType) + d.Set("protocol", security_group_rule.Protocol) + d.Set("port_range_min", security_group_rule.PortRangeMin) + d.Set("port_range_max", security_group_rule.PortRangeMax) + d.Set("remote_group_id", security_group_rule.RemoteGroupID) + d.Set("remote_ip_prefix", security_group_rule.RemoteIPPrefix) + d.Set("security_group_id", security_group_rule.SecGroupID) + d.Set("tenant_id", security_group_rule.TenantID) + d.Set("region", GetRegion(d)) + + return nil +} + +func resourceNetworkingSecGroupRuleV2Delete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Destroy security group rule: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: waitForSecGroupRuleDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Security Group Rule: %s", err) + } + + d.SetId("") + return err +} + +func resourceNetworkingSecGroupRuleV2DetermineDirection(v string) rules.RuleDirection { + var direction rules.RuleDirection + switch v { + case "ingress": + direction = rules.DirIngress + case "egress": + direction = rules.DirEgress + } + + return direction +} + +func resourceNetworkingSecGroupRuleV2DetermineEtherType(v string) rules.RuleEtherType { + var etherType rules.RuleEtherType + switch v { + case "IPv4": + etherType = rules.EtherType4 + case "IPv6": + etherType = rules.EtherType6 + } + + return etherType +} + +func resourceNetworkingSecGroupRuleV2DetermineProtocol(v string) rules.RuleProtocol { + var protocol rules.RuleProtocol + + // Check and see if the requested protocol matched a list of known protocol names. + switch v { + case "tcp": + protocol = rules.ProtocolTCP + case "udp": + protocol = rules.ProtocolUDP + case "icmp": + protocol = rules.ProtocolICMP + case "ah": + protocol = rules.ProtocolAH + case "dccp": + protocol = rules.ProtocolDCCP + case "egp": + protocol = rules.ProtocolEGP + case "esp": + protocol = rules.ProtocolESP + case "gre": + protocol = rules.ProtocolGRE + case "igmp": + protocol = rules.ProtocolIGMP + case "ipv6-encap": + protocol = rules.ProtocolIPv6Encap + case "ipv6-frag": + protocol = rules.ProtocolIPv6Frag + case "ipv6-icmp": + protocol = rules.ProtocolIPv6ICMP + case "ipv6-nonxt": + protocol = rules.ProtocolIPv6NoNxt + case "ipv6-opts": + protocol = rules.ProtocolIPv6Opts + case "ipv6-route": + protocol = rules.ProtocolIPv6Route + case "ospf": + protocol = rules.ProtocolOSPF + case "pgm": + protocol = rules.ProtocolPGM + case "rsvp": + protocol = rules.ProtocolRSVP + case "sctp": + protocol = rules.ProtocolSCTP + case "udplite": + protocol = rules.ProtocolUDPLite + case "vrrp": + protocol = rules.ProtocolVRRP + } + + // If the protocol wasn't matched above, see if it's an integer. + if protocol == "" { + _, err := strconv.Atoi(v) + if err == nil { + protocol = rules.RuleProtocol(v) + } + } + + return protocol +} + +func waitForSecGroupRuleDelete(networkingClient *gophercloud.ServiceClient, secGroupRuleId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack Security Group Rule %s.\n", secGroupRuleId) + + r, err := rules.Get(networkingClient, secGroupRuleId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Neutron Security Group Rule %s", secGroupRuleId) + return r, "DELETED", nil + } + return r, "ACTIVE", err + } + + err = rules.Delete(networkingClient, secGroupRuleId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Neutron Security Group Rule %s", secGroupRuleId) + return r, "DELETED", nil + } + return r, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack Neutron Security Group Rule %s still active.\n", secGroupRuleId) + return r, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_v2.go new file mode 100644 index 000000000..effe0e746 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_v2.go @@ -0,0 +1,211 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" +) + +func resourceNetworkingSecGroupV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingSecGroupV2Create, + Read: resourceNetworkingSecGroupV2Read, + Update: resourceNetworkingSecGroupV2Update, + Delete: resourceNetworkingSecGroupV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "delete_default_rules": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceNetworkingSecGroupV2Create(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + opts := groups.CreateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + TenantID: d.Get("tenant_id").(string), + } + + log.Printf("[DEBUG] Create OpenStack Neutron Security Group: %#v", opts) + + security_group, err := groups.Create(networkingClient, opts).Extract() + if err != nil { + return err + } + + // Delete the default security group rules if it has been requested. + deleteDefaultRules := d.Get("delete_default_rules").(bool) + if deleteDefaultRules { + for _, rule := range security_group.Rules { + if err := rules.Delete(networkingClient, rule.ID).ExtractErr(); err != nil { + return fmt.Errorf( + "There was a problem deleting a default security group rule: %s", err) + } + } + } + + log.Printf("[DEBUG] OpenStack Neutron Security Group created: %#v", security_group) + + d.SetId(security_group.ID) + + return resourceNetworkingSecGroupV2Read(d, meta) +} + +func resourceNetworkingSecGroupV2Read(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Retrieve information about security group: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + security_group, err := groups.Get(networkingClient, d.Id()).Extract() + + if err != nil { + return CheckDeleted(d, err, "OpenStack Neutron Security group") + } + + d.Set("description", security_group.Description) + d.Set("tenant_id", security_group.TenantID) + d.Set("name", security_group.Name) + d.Set("region", GetRegion(d)) + + return nil +} + +func resourceNetworkingSecGroupV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var update bool + var updateOpts groups.UpdateOpts + + if d.HasChange("name") { + update = true + updateOpts.Name = d.Get("name").(string) + } + + if d.HasChange("description") { + update = true + updateOpts.Name = d.Get("description").(string) + } + + if update { + log.Printf("[DEBUG] Updating SecGroup %s with options: %#v", d.Id(), updateOpts) + _, err = groups.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack SecGroup: %s", err) + } + } + + return resourceNetworkingSecGroupV2Read(d, meta) +} + +func resourceNetworkingSecGroupV2Delete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Destroy security group: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: waitForSecGroupDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Security Group: %s", err) + } + + d.SetId("") + return err +} + +func waitForSecGroupDelete(networkingClient *gophercloud.ServiceClient, secGroupId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack Security Group %s.\n", secGroupId) + + r, err := groups.Get(networkingClient, secGroupId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Neutron Security Group %s", secGroupId) + return r, "DELETED", nil + } + return r, "ACTIVE", err + } + + err = groups.Delete(networkingClient, secGroupId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Neutron Security Group %s", secGroupId) + return r, "DELETED", nil + } + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + return r, "ACTIVE", nil + } + } + return r, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack Neutron Security Group %s still active.\n", secGroupId) + return r, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_subnet_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_subnet_v2.go new file mode 100644 index 000000000..be19338aa --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_subnet_v2.go @@ -0,0 +1,423 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" +) + +func resourceNetworkingSubnetV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingSubnetV2Create, + Read: resourceNetworkingSubnetV2Read, + Update: resourceNetworkingSubnetV2Update, + Delete: resourceNetworkingSubnetV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "network_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "cidr": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "allocation_pools": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "end": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "gateway_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Computed: true, + }, + "no_gateway": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: false, + }, + "ip_version": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 4, + ForceNew: true, + }, + "enable_dhcp": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: false, + Default: true, + }, + "dns_nameservers": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "host_routes": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "destination_cidr": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "next_hop": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceNetworkingSubnetV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := SubnetCreateOpts{ + subnets.CreateOpts{ + NetworkID: d.Get("network_id").(string), + CIDR: d.Get("cidr").(string), + Name: d.Get("name").(string), + TenantID: d.Get("tenant_id").(string), + AllocationPools: resourceSubnetAllocationPoolsV2(d), + DNSNameservers: resourceSubnetDNSNameserversV2(d), + HostRoutes: resourceSubnetHostRoutesV2(d), + EnableDHCP: nil, + }, + MapValueSpecs(d), + } + + noGateway := d.Get("no_gateway").(bool) + gatewayIP := d.Get("gateway_ip").(string) + + if gatewayIP != "" && noGateway { + return fmt.Errorf("Both gateway_ip and no_gateway cannot be set") + } + + if gatewayIP != "" { + createOpts.GatewayIP = &gatewayIP + } + + if noGateway { + disableGateway := "" + createOpts.GatewayIP = &disableGateway + } + + enableDHCP := d.Get("enable_dhcp").(bool) + createOpts.EnableDHCP = &enableDHCP + + if v, ok := d.GetOk("ip_version"); ok { + ipVersion := resourceNetworkingSubnetV2DetermineIPVersion(v.(int)) + createOpts.IPVersion = ipVersion + } + + s, err := subnets.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack Neutron subnet: %s", err) + } + + log.Printf("[DEBUG] Waiting for Subnet (%s) to become available", s.ID) + stateConf := &resource.StateChangeConf{ + Target: []string{"ACTIVE"}, + Refresh: waitForSubnetActive(networkingClient, s.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + d.SetId(s.ID) + + log.Printf("[DEBUG] Created Subnet %s: %#v", s.ID, s) + return resourceNetworkingSubnetV2Read(d, meta) +} + +func resourceNetworkingSubnetV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + s, err := subnets.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "subnet") + } + + log.Printf("[DEBUG] Retrieved Subnet %s: %#v", d.Id(), s) + + d.Set("network_id", s.NetworkID) + d.Set("cidr", s.CIDR) + d.Set("ip_version", s.IPVersion) + d.Set("name", s.Name) + d.Set("tenant_id", s.TenantID) + d.Set("gateway_ip", s.GatewayIP) + d.Set("dns_nameservers", s.DNSNameservers) + d.Set("host_routes", s.HostRoutes) + d.Set("enable_dhcp", s.EnableDHCP) + d.Set("network_id", s.NetworkID) + + // Set the allocation_pools + var allocationPools []map[string]interface{} + for _, v := range s.AllocationPools { + pool := make(map[string]interface{}) + pool["start"] = v.Start + pool["end"] = v.End + + allocationPools = append(allocationPools, pool) + } + d.Set("allocation_pools", allocationPools) + + d.Set("region", GetRegion(d)) + + return nil +} + +func resourceNetworkingSubnetV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + // Check if both gateway_ip and no_gateway are set + if _, ok := d.GetOk("gateway_ip"); ok { + noGateway := d.Get("no_gateway").(bool) + if noGateway { + return fmt.Errorf("Both gateway_ip and no_gateway cannot be set.") + } + } + + var updateOpts subnets.UpdateOpts + + noGateway := d.Get("no_gateway").(bool) + gatewayIP := d.Get("gateway_ip").(string) + + if gatewayIP != "" && noGateway { + return fmt.Errorf("Both gateway_ip and no_gateway cannot be set") + } + + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + + if d.HasChange("gateway_ip") { + updateOpts.GatewayIP = nil + if v, ok := d.GetOk("gateway_ip"); ok { + gatewayIP := v.(string) + updateOpts.GatewayIP = &gatewayIP + } + } + + if d.HasChange("no_gateway") { + if d.Get("no_gateway").(bool) { + gatewayIP := "" + updateOpts.GatewayIP = &gatewayIP + } + } + + if d.HasChange("dns_nameservers") { + updateOpts.DNSNameservers = resourceSubnetDNSNameserversV2(d) + } + + if d.HasChange("host_routes") { + updateOpts.HostRoutes = resourceSubnetHostRoutesV2(d) + } + + if d.HasChange("enable_dhcp") { + v := d.Get("enable_dhcp").(bool) + updateOpts.EnableDHCP = &v + } + + if d.HasChange("allocation_pools") { + updateOpts.AllocationPools = resourceSubnetAllocationPoolsV2(d) + } + + log.Printf("[DEBUG] Updating Subnet %s with options: %+v", d.Id(), updateOpts) + + _, err = subnets.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack Neutron Subnet: %s", err) + } + + return resourceNetworkingSubnetV2Read(d, meta) +} + +func resourceNetworkingSubnetV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: waitForSubnetDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Subnet: %s", err) + } + + d.SetId("") + return nil +} + +func resourceSubnetAllocationPoolsV2(d *schema.ResourceData) []subnets.AllocationPool { + rawAPs := d.Get("allocation_pools").([]interface{}) + aps := make([]subnets.AllocationPool, len(rawAPs)) + for i, raw := range rawAPs { + rawMap := raw.(map[string]interface{}) + aps[i] = subnets.AllocationPool{ + Start: rawMap["start"].(string), + End: rawMap["end"].(string), + } + } + return aps +} + +func resourceSubnetDNSNameserversV2(d *schema.ResourceData) []string { + rawDNSN := d.Get("dns_nameservers").(*schema.Set) + dnsn := make([]string, rawDNSN.Len()) + for i, raw := range rawDNSN.List() { + dnsn[i] = raw.(string) + } + return dnsn +} + +func resourceSubnetHostRoutesV2(d *schema.ResourceData) []subnets.HostRoute { + rawHR := d.Get("host_routes").([]interface{}) + hr := make([]subnets.HostRoute, len(rawHR)) + for i, raw := range rawHR { + rawMap := raw.(map[string]interface{}) + hr[i] = subnets.HostRoute{ + DestinationCIDR: rawMap["destination_cidr"].(string), + NextHop: rawMap["next_hop"].(string), + } + } + return hr +} + +func resourceNetworkingSubnetV2DetermineIPVersion(v int) gophercloud.IPVersion { + var ipVersion gophercloud.IPVersion + switch v { + case 4: + ipVersion = gophercloud.IPv4 + case 6: + ipVersion = gophercloud.IPv6 + } + + return ipVersion +} + +func waitForSubnetActive(networkingClient *gophercloud.ServiceClient, subnetId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + s, err := subnets.Get(networkingClient, subnetId).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack Neutron Subnet: %+v", s) + return s, "ACTIVE", nil + } +} + +func waitForSubnetDelete(networkingClient *gophercloud.ServiceClient, subnetId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack Subnet %s.\n", subnetId) + + s, err := subnets.Get(networkingClient, subnetId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Subnet %s", subnetId) + return s, "DELETED", nil + } + return s, "ACTIVE", err + } + + err = subnets.Delete(networkingClient, subnetId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Subnet %s", subnetId) + return s, "DELETED", nil + } + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + return s, "ACTIVE", nil + } + } + return s, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack Subnet %s still active.\n", subnetId) + return s, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_objectstorage_container_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_objectstorage_container_v1.go new file mode 100644 index 000000000..27fb7ae0c --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_objectstorage_container_v1.go @@ -0,0 +1,148 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceObjectStorageContainerV1() *schema.Resource { + return &schema.Resource{ + Create: resourceObjectStorageContainerV1Create, + Read: resourceObjectStorageContainerV1Read, + Update: resourceObjectStorageContainerV1Update, + Delete: resourceObjectStorageContainerV1Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "container_read": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "container_sync_to": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "container_sync_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "container_write": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "content_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: false, + }, + }, + } +} + +func resourceObjectStorageContainerV1Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + objectStorageClient, err := config.objectStorageV1Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack object storage client: %s", err) + } + + cn := d.Get("name").(string) + + createOpts := &containers.CreateOpts{ + ContainerRead: d.Get("container_read").(string), + ContainerSyncTo: d.Get("container_sync_to").(string), + ContainerSyncKey: d.Get("container_sync_key").(string), + ContainerWrite: d.Get("container_write").(string), + ContentType: d.Get("content_type").(string), + Metadata: resourceContainerMetadataV2(d), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + _, err = containers.Create(objectStorageClient, cn, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack container: %s", err) + } + log.Printf("[INFO] Container ID: %s", cn) + + // Store the ID now + d.SetId(cn) + + return resourceObjectStorageContainerV1Read(d, meta) +} + +func resourceObjectStorageContainerV1Read(d *schema.ResourceData, meta interface{}) error { + return nil +} + +func resourceObjectStorageContainerV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + objectStorageClient, err := config.objectStorageV1Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack object storage client: %s", err) + } + + updateOpts := containers.UpdateOpts{ + ContainerRead: d.Get("container_read").(string), + ContainerSyncTo: d.Get("container_sync_to").(string), + ContainerSyncKey: d.Get("container_sync_key").(string), + ContainerWrite: d.Get("container_write").(string), + ContentType: d.Get("content_type").(string), + } + + if d.HasChange("metadata") { + updateOpts.Metadata = resourceContainerMetadataV2(d) + } + + _, err = containers.Update(objectStorageClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack container: %s", err) + } + + return resourceObjectStorageContainerV1Read(d, meta) +} + +func resourceObjectStorageContainerV1Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + objectStorageClient, err := config.objectStorageV1Client(GetRegion(d)) + if err != nil { + return fmt.Errorf("Error creating OpenStack object storage client: %s", err) + } + + _, err = containers.Delete(objectStorageClient, d.Id()).Extract() + if err != nil { + return fmt.Errorf("Error deleting OpenStack container: %s", err) + } + + d.SetId("") + return nil +} + +func resourceContainerMetadataV2(d *schema.ResourceData) map[string]string { + m := make(map[string]string) + for key, val := range d.Get("metadata").(map[string]interface{}) { + m[key] = val.(string) + } + return m +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/types.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/types.go new file mode 100644 index 000000000..fd4fca56f --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/types.go @@ -0,0 +1,358 @@ +package openstack + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "strings" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups" + "github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets" + "github.com/gophercloud/gophercloud/openstack/dns/v2/zones" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" +) + +// LogRoundTripper satisfies the http.RoundTripper interface and is used to +// customize the default http client RoundTripper to allow for logging. +type LogRoundTripper struct { + Rt http.RoundTripper + OsDebug bool +} + +// RoundTrip performs a round-trip HTTP request and logs relevant information about it. +func (lrt *LogRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { + defer func() { + if request.Body != nil { + request.Body.Close() + } + }() + + // for future reference, this is how to access the Transport struct: + //tlsconfig := lrt.Rt.(*http.Transport).TLSClientConfig + + var err error + + if lrt.OsDebug { + log.Printf("[DEBUG] OpenStack Request URL: %s %s", request.Method, request.URL) + log.Printf("[DEBUG] Openstack Request Headers:\n%s", FormatHeaders(request.Header, "\n")) + + if request.Body != nil { + request.Body, err = lrt.logRequest(request.Body, request.Header.Get("Content-Type")) + if err != nil { + return nil, err + } + } + } + + response, err := lrt.Rt.RoundTrip(request) + if response == nil { + return nil, err + } + + if lrt.OsDebug { + log.Printf("[DEBUG] Openstack Response Code: %d", response.StatusCode) + log.Printf("[DEBUG] Openstack Response Headers:\n%s", FormatHeaders(response.Header, "\n")) + + response.Body, err = lrt.logResponse(response.Body, response.Header.Get("Content-Type")) + } + + return response, err +} + +// logRequest will log the HTTP Request details. +// If the body is JSON, it will attempt to be pretty-formatted. +func (lrt *LogRoundTripper) logRequest(original io.ReadCloser, contentType string) (io.ReadCloser, error) { + defer original.Close() + + var bs bytes.Buffer + _, err := io.Copy(&bs, original) + if err != nil { + return nil, err + } + + // Handle request contentType + if strings.HasPrefix(contentType, "application/json") { + debugInfo := lrt.formatJSON(bs.Bytes()) + log.Printf("[DEBUG] OpenStack Request Body: %s", debugInfo) + } else { + log.Printf("[DEBUG] OpenStack Request Body: %s", bs.String()) + } + + return ioutil.NopCloser(strings.NewReader(bs.String())), nil +} + +// logResponse will log the HTTP Response details. +// If the body is JSON, it will attempt to be pretty-formatted. +func (lrt *LogRoundTripper) logResponse(original io.ReadCloser, contentType string) (io.ReadCloser, error) { + if strings.HasPrefix(contentType, "application/json") { + var bs bytes.Buffer + defer original.Close() + _, err := io.Copy(&bs, original) + if err != nil { + return nil, err + } + debugInfo := lrt.formatJSON(bs.Bytes()) + if debugInfo != "" { + log.Printf("[DEBUG] OpenStack Response Body: %s", debugInfo) + } + return ioutil.NopCloser(strings.NewReader(bs.String())), nil + } + + log.Printf("[DEBUG] Not logging because OpenStack response body isn't JSON") + return original, nil +} + +// formatJSON will try to pretty-format a JSON body. +// It will also mask known fields which contain sensitive information. +func (lrt *LogRoundTripper) formatJSON(raw []byte) string { + var data map[string]interface{} + + err := json.Unmarshal(raw, &data) + if err != nil { + log.Printf("[DEBUG] Unable to parse OpenStack JSON: %s", err) + return string(raw) + } + + // Mask known password fields + if v, ok := data["auth"].(map[string]interface{}); ok { + if v, ok := v["identity"].(map[string]interface{}); ok { + if v, ok := v["password"].(map[string]interface{}); ok { + if v, ok := v["user"].(map[string]interface{}); ok { + v["password"] = "***" + } + } + } + } + + // Ignore the catalog + if v, ok := data["token"].(map[string]interface{}); ok { + if _, ok := v["catalog"]; ok { + return "" + } + } + + pretty, err := json.MarshalIndent(data, "", " ") + if err != nil { + log.Printf("[DEBUG] Unable to re-marshal OpenStack JSON: %s", err) + return string(raw) + } + + return string(pretty) +} + +// Firewall is an OpenStack firewall. +type Firewall struct { + firewalls.Firewall + routerinsertion.FirewallExt +} + +// FirewallCreateOpts represents the attributes used when creating a new firewall. +type FirewallCreateOpts struct { + firewalls.CreateOptsBuilder + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToFirewallCreateMap casts a CreateOptsExt struct to a map. +// It overrides firewalls.ToFirewallCreateMap to add the ValueSpecs field. +func (opts FirewallCreateOpts) ToFirewallCreateMap() (map[string]interface{}, error) { + body, err := opts.CreateOptsBuilder.ToFirewallCreateMap() + if err != nil { + return nil, err + } + + return AddValueSpecs(body), nil +} + +//FirewallUpdateOpts +type FirewallUpdateOpts struct { + firewalls.UpdateOptsBuilder +} + +func (opts FirewallUpdateOpts) ToFirewallUpdateMap() (map[string]interface{}, error) { + return BuildRequest(opts, "firewall") +} + +// FloatingIPCreateOpts represents the attributes used when creating a new floating ip. +type FloatingIPCreateOpts struct { + floatingips.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToFloatingIPCreateMap casts a CreateOpts struct to a map. +// It overrides floatingips.ToFloatingIPCreateMap to add the ValueSpecs field. +func (opts FloatingIPCreateOpts) ToFloatingIPCreateMap() (map[string]interface{}, error) { + return BuildRequest(opts, "floatingip") +} + +// KeyPairCreateOpts represents the attributes used when creating a new keypair. +type KeyPairCreateOpts struct { + keypairs.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToKeyPairCreateMap casts a CreateOpts struct to a map. +// It overrides keypairs.ToKeyPairCreateMap to add the ValueSpecs field. +func (opts KeyPairCreateOpts) ToKeyPairCreateMap() (map[string]interface{}, error) { + return BuildRequest(opts, "keypair") +} + +// NetworkCreateOpts represents the attributes used when creating a new network. +type NetworkCreateOpts struct { + networks.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToNetworkCreateMap casts a CreateOpts struct to a map. +// It overrides networks.ToNetworkCreateMap to add the ValueSpecs field. +func (opts NetworkCreateOpts) ToNetworkCreateMap() (map[string]interface{}, error) { + return BuildRequest(opts, "network") +} + +// PolicyCreateOpts represents the attributes used when creating a new firewall policy. +type PolicyCreateOpts struct { + policies.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToPolicyCreateMap casts a CreateOpts struct to a map. +// It overrides policies.ToFirewallPolicyCreateMap to add the ValueSpecs field. +func (opts PolicyCreateOpts) ToFirewallPolicyCreateMap() (map[string]interface{}, error) { + return BuildRequest(opts, "firewall_policy") +} + +// PortCreateOpts represents the attributes used when creating a new port. +type PortCreateOpts struct { + ports.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToPortCreateMap casts a CreateOpts struct to a map. +// It overrides ports.ToPortCreateMap to add the ValueSpecs field. +func (opts PortCreateOpts) ToPortCreateMap() (map[string]interface{}, error) { + return BuildRequest(opts, "port") +} + +// RecordSetCreateOpts represents the attributes used when creating a new DNS record set. +type RecordSetCreateOpts struct { + recordsets.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToRecordSetCreateMap casts a CreateOpts struct to a map. +// It overrides recordsets.ToRecordSetCreateMap to add the ValueSpecs field. +func (opts RecordSetCreateOpts) ToRecordSetCreateMap() (map[string]interface{}, error) { + b, err := BuildRequest(opts, "") + if err != nil { + return nil, err + } + + if m, ok := b[""].(map[string]interface{}); ok { + return m, nil + } + + return nil, fmt.Errorf("Expected map but got %T", b[""]) +} + +// RouterCreateOpts represents the attributes used when creating a new router. +type RouterCreateOpts struct { + routers.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToRouterCreateMap casts a CreateOpts struct to a map. +// It overrides routers.ToRouterCreateMap to add the ValueSpecs field. +func (opts RouterCreateOpts) ToRouterCreateMap() (map[string]interface{}, error) { + return BuildRequest(opts, "router") +} + +// RuleCreateOpts represents the attributes used when creating a new firewall rule. +type RuleCreateOpts struct { + rules.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToRuleCreateMap casts a CreateOpts struct to a map. +// It overrides rules.ToRuleCreateMap to add the ValueSpecs field. +func (opts RuleCreateOpts) ToRuleCreateMap() (map[string]interface{}, error) { + b, err := BuildRequest(opts, "firewall_rule") + if err != nil { + return nil, err + } + + if m := b["firewall_rule"].(map[string]interface{}); m["protocol"] == "any" { + m["protocol"] = nil + } + + return b, nil +} + +// ServerGroupCreateOpts represents the attributes used when creating a new router. +type ServerGroupCreateOpts struct { + servergroups.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToServerGroupCreateMap casts a CreateOpts struct to a map. +// It overrides routers.ToServerGroupCreateMap to add the ValueSpecs field. +func (opts ServerGroupCreateOpts) ToServerGroupCreateMap() (map[string]interface{}, error) { + return BuildRequest(opts, "server_group") +} + +// SubnetCreateOpts represents the attributes used when creating a new subnet. +type SubnetCreateOpts struct { + subnets.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToSubnetCreateMap casts a CreateOpts struct to a map. +// It overrides subnets.ToSubnetCreateMap to add the ValueSpecs field. +func (opts SubnetCreateOpts) ToSubnetCreateMap() (map[string]interface{}, error) { + b, err := BuildRequest(opts, "subnet") + if err != nil { + return nil, err + } + + if m := b["subnet"].(map[string]interface{}); m["gateway_ip"] == "" { + m["gateway_ip"] = nil + } + + return b, nil +} + +// ZoneCreateOpts represents the attributes used when creating a new DNS zone. +type ZoneCreateOpts struct { + zones.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToZoneCreateMap casts a CreateOpts struct to a map. +// It overrides zones.ToZoneCreateMap to add the ValueSpecs field. +func (opts ZoneCreateOpts) ToZoneCreateMap() (map[string]interface{}, error) { + b, err := BuildRequest(opts, "") + if err != nil { + return nil, err + } + + if m, ok := b[""].(map[string]interface{}); ok { + if opts.TTL > 0 { + m["ttl"] = opts.TTL + } + + return m, nil + } + + return nil, fmt.Errorf("Expected map but got %T", b[""]) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/util.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/util.go new file mode 100644 index 000000000..0c879c9ea --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/util.go @@ -0,0 +1,100 @@ +package openstack + +import ( + "fmt" + "net/http" + "os" + "sort" + "strings" + + "github.com/Unknwon/com" + "github.com/gophercloud/gophercloud" + "github.com/hashicorp/terraform/helper/schema" +) + +// BuildRequest takes an opts struct and builds a request body for +// Gophercloud to execute +func BuildRequest(opts interface{}, parent string) (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + b = AddValueSpecs(b) + + return map[string]interface{}{parent: b}, nil +} + +// CheckDeleted checks the error to see if it's a 404 (Not Found) and, if so, +// sets the resource ID to the empty string instead of throwing an error. +func CheckDeleted(d *schema.ResourceData, err error, msg string) error { + if _, ok := err.(gophercloud.ErrDefault404); ok { + d.SetId("") + return nil + } + + return fmt.Errorf("%s: %s", msg, err) +} + +// GetRegion returns the region from either d.Get("region") or OS_REGION_NAME +func GetRegion(d *schema.ResourceData) string { + if v, ok := d.GetOk("region"); ok { + return v.(string) + } + + if v := os.Getenv("OS_REGION_NAME"); v != "" { + return v + } + + return "" +} + +// AddValueSpecs expands the 'value_specs' object and removes 'value_specs' +// from the reqeust body. +func AddValueSpecs(body map[string]interface{}) map[string]interface{} { + if body["value_specs"] != nil { + for k, v := range body["value_specs"].(map[string]interface{}) { + body[k] = v + } + delete(body, "value_specs") + } + + return body +} + +// MapValueSpecs converts ResourceData into a map +func MapValueSpecs(d *schema.ResourceData) map[string]string { + m := make(map[string]string) + for key, val := range d.Get("value_specs").(map[string]interface{}) { + m[key] = val.(string) + } + return m +} + +// List of headers that need to be redacted +var REDACT_HEADERS = []string{"x-auth-token", "x-auth-key", "x-service-token", + "x-storage-token", "x-account-meta-temp-url-key", "x-account-meta-temp-url-key-2", + "x-container-meta-temp-url-key", "x-container-meta-temp-url-key-2", "set-cookie", + "x-subject-token"} + +// RedactHeaders processes a headers object, returning a redacted list +func RedactHeaders(headers http.Header) (processedHeaders []string) { + for name, header := range headers { + for _, v := range header { + if com.IsSliceContainsStr(REDACT_HEADERS, name) { + processedHeaders = append(processedHeaders, fmt.Sprintf("%v: %v", name, "***")) + } else { + processedHeaders = append(processedHeaders, fmt.Sprintf("%v: %v", name, v)) + } + } + } + return +} + +// FormatHeaders processes a headers object plus a deliminator, returning a string +func FormatHeaders(headers http.Header, seperator string) string { + redactedHeaders := RedactHeaders(headers) + sort.Strings(redactedHeaders) + + return strings.Join(redactedHeaders, seperator) +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 29cf5f875..ab759b7e0 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -3052,6 +3052,18 @@ "revision": "ac974c61c2f990f4115b119354b5e0b47550e888", "revisionTime": "2013-07-02T22:55:49Z" }, + { + "checksumSHA1": "FcJRecfBai4J9kGW+iUnJYNUXzU=", + "path": "github.com/terraform-providers/terraform-provider-aws/aws", + "revision": "3e47e77ac7e5e457b606b0f861816bcd3e2ceb4e", + "revisionTime": "2017-06-12T09:08:25Z" + }, + { + "checksumSHA1": "+LOuwysQ27JLjJNWSPKF/yXTMEc=", + "path": "github.com/terraform-providers/terraform-provider-openstack/openstack", + "revision": "6278902610d3077274146559ef10fbb5455e5129", + "revisionTime": "2017-06-09T18:33:54Z" + }, { "checksumSHA1": "y1hkty5dgBN9elK4gP1TtVjT4e8=", "path": "github.com/tomnomnom/linkheader",