Merge pull request #17428 from hashicorp/v-terraform-provider-aws-v1.10.0

Deps: Bump terraform-provider-aws@v1.10.0 and aws-sdk-go@v1.12.75
This commit is contained in:
Brian Flad 2018-02-26 03:37:00 -08:00 committed by GitHub
commit 7bb2c17630
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
70 changed files with 16469 additions and 1032 deletions

View File

@ -106,6 +106,7 @@ const (
IotServiceID = "iot" // Iot.
KinesisServiceID = "kinesis" // Kinesis.
KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo.
KmsServiceID = "kms" // Kms.
LambdaServiceID = "lambda" // Lambda.
LightsailServiceID = "lightsail" // Lightsail.
@ -1234,6 +1235,16 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
"kinesisvideo": service{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-west-2": endpoint{},
},
},
"kms": service{
Endpoints: endpoints{
@ -2462,6 +2473,12 @@ var awsusgovPartition = partition{
},
},
},
"ecr": service{
Endpoints: endpoints{
"us-gov-west-1": endpoint{},
},
},
"ecs": service{
Endpoints: endpoints{

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.12.73"
const SDKVersion = "1.12.75"

5574
vendor/github.com/aws/aws-sdk-go/service/appsync/api.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,29 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package appsync provides the client and types for making API
// requests to AWS AppSync.
//
// AWS AppSync provides API actions for creating and interacting with data sources
// using GraphQL from your application.
//
// See https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25 for more information on this service.
//
// See appsync package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/appsync/
//
// Using the Client
//
// To contact AWS AppSync with the SDK use the New function to create
// a new service client. With that client you can make API requests to the service.
// These clients are safe to use concurrently.
//
// See the SDK's documentation for more information on how to use the SDK.
// https://docs.aws.amazon.com/sdk-for-go/api/
//
// See aws.Config documentation for more information on configuring SDK clients.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
//
// See the AWS AppSync client AppSync for more
// information on creating client for this service.
// https://docs.aws.amazon.com/sdk-for-go/api/service/appsync/#New
package appsync

View File

@ -0,0 +1,69 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package appsync
const (
// ErrCodeApiKeyLimitExceededException for service response error code
// "ApiKeyLimitExceededException".
//
// The API key exceeded a limit. Try your request again.
ErrCodeApiKeyLimitExceededException = "ApiKeyLimitExceededException"
// ErrCodeApiKeyValidityOutOfBoundsException for service response error code
// "ApiKeyValidityOutOfBoundsException".
//
// The API key expiration must be set to a value between 1 and 365 days.
ErrCodeApiKeyValidityOutOfBoundsException = "ApiKeyValidityOutOfBoundsException"
// ErrCodeApiLimitExceededException for service response error code
// "ApiLimitExceededException".
//
// The GraphQL API exceeded a limit. Try your request again.
ErrCodeApiLimitExceededException = "ApiLimitExceededException"
// ErrCodeBadRequestException for service response error code
// "BadRequestException".
//
// The request is not well formed. For example, a value is invalid or a required
// field is missing. Check the field values, and try again.
ErrCodeBadRequestException = "BadRequestException"
// ErrCodeConcurrentModificationException for service response error code
// "ConcurrentModificationException".
//
// Another modification is being made. That modification must complete before
// you can make your change.
ErrCodeConcurrentModificationException = "ConcurrentModificationException"
// ErrCodeGraphQLSchemaException for service response error code
// "GraphQLSchemaException".
//
// The GraphQL schema is not valid.
ErrCodeGraphQLSchemaException = "GraphQLSchemaException"
// ErrCodeInternalFailureException for service response error code
// "InternalFailureException".
//
// An internal AWS AppSync error occurred. Try your request again.
ErrCodeInternalFailureException = "InternalFailureException"
// ErrCodeLimitExceededException for service response error code
// "LimitExceededException".
//
// The request exceeded a limit. Try your request again.
ErrCodeLimitExceededException = "LimitExceededException"
// ErrCodeNotFoundException for service response error code
// "NotFoundException".
//
// The resource specified in the request was not found. Check the resource and
// try again.
ErrCodeNotFoundException = "NotFoundException"
// ErrCodeUnauthorizedException for service response error code
// "UnauthorizedException".
//
// You are not authorized to perform this operation.
ErrCodeUnauthorizedException = "UnauthorizedException"
)

View File

@ -0,0 +1,97 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package appsync
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/aws/aws-sdk-go/private/protocol/restjson"
)
// AppSync provides the API operation methods for making requests to
// AWS AppSync. See this package's package overview docs
// for details on the service.
//
// AppSync methods are safe to use concurrently. It is not safe to
// modify mutate any of the struct's properties though.
type AppSync struct {
*client.Client
}
// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
// Service information constants
const (
ServiceName = "appsync" // Service endpoint prefix API calls made to.
EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
)
// New creates a new instance of the AppSync client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a AppSync client from just a session.
// svc := appsync.New(mySession)
//
// // Create a AppSync client with additional configuration
// svc := appsync.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *AppSync {
c := p.ClientConfig(EndpointsID, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *AppSync {
if len(signingName) == 0 {
signingName = "appsync"
}
svc := &AppSync{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
SigningName: signingName,
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "2017-07-25",
JSONVersion: "1.1",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
svc.Handlers.Build.PushBackNamed(restjson.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler)
// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
return svc
}
// newRequest creates a new request for a AppSync operation and runs any
// custom request initialization.
func (c *AppSync) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
return req
}

View File

@ -6280,6 +6280,89 @@ func (c *CognitoIdentityProvider) GetIdentityProviderByIdentifierWithContext(ctx
return out, req.Send()
}
const opGetSigningCertificate = "GetSigningCertificate"
// GetSigningCertificateRequest generates a "aws/request.Request" representing the
// client's request for the GetSigningCertificate operation. The "output" return
// value will be populated with the request's response once the request complets
// successfuly.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetSigningCertificate for more information on using the GetSigningCertificate
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetSigningCertificateRequest method.
// req, resp := client.GetSigningCertificateRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/cognito-idp-2016-04-18/GetSigningCertificate
func (c *CognitoIdentityProvider) GetSigningCertificateRequest(input *GetSigningCertificateInput) (req *request.Request, output *GetSigningCertificateOutput) {
op := &request.Operation{
Name: opGetSigningCertificate,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &GetSigningCertificateInput{}
}
output = &GetSigningCertificateOutput{}
req = c.newRequest(op, input, output)
return
}
// GetSigningCertificate API operation for Amazon Cognito Identity Provider.
//
// This method takes a user pool ID, and returns the signing certificate.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Cognito Identity Provider's
// API operation GetSigningCertificate for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInternalErrorException "InternalErrorException"
// This exception is thrown when Amazon Cognito encounters an internal error.
//
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// This exception is thrown when the Amazon Cognito service cannot find the
// requested resource.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/cognito-idp-2016-04-18/GetSigningCertificate
func (c *CognitoIdentityProvider) GetSigningCertificate(input *GetSigningCertificateInput) (*GetSigningCertificateOutput, error) {
req, out := c.GetSigningCertificateRequest(input)
return out, req.Send()
}
// GetSigningCertificateWithContext is the same as GetSigningCertificate with the addition of
// the ability to pass a context and additional request options.
//
// See GetSigningCertificate for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *CognitoIdentityProvider) GetSigningCertificateWithContext(ctx aws.Context, input *GetSigningCertificateInput, opts ...request.Option) (*GetSigningCertificateOutput, error) {
req, out := c.GetSigningCertificateRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetUICustomization = "GetUICustomization"
// GetUICustomizationRequest generates a "aws/request.Request" representing the
@ -11510,6 +11593,9 @@ type AdminInitiateAuthInput struct {
// * USER_SRP_AUTH will take in USERNAME and SRP_A and return the SRP variables
// to be used for next challenge execution.
//
// * USER_PASSWORD_AUTH will take in USERNAME and PASSWORD and return the
// next challenge or tokens.
//
// Valid values include:
//
// * USER_SRP_AUTH: Authentication flow for the Secure Remote Password (SRP)
@ -11524,6 +11610,11 @@ type AdminInitiateAuthInput struct {
// USERNAME and PASSWORD directly if the flow is enabled for calling the
// app client.
//
// * USER_PASSWORD_AUTH: Non-SRP authentication flow; USERNAME and PASSWORD
// are passed directly. If a user migration Lambda trigger is set, this flow
// will invoke the user migration Lambda if the USERNAME is not found in
// the user pool.
//
// AuthFlow is a required field
AuthFlow *string `type:"string" required:"true" enum:"AuthFlowType"`
@ -11533,9 +11624,8 @@ type AdminInitiateAuthInput struct {
// * For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH
// (required if the app client is configured with a client secret), DEVICE_KEY
//
// * For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: USERNAME (required), SECRET_HASH
// (required if the app client is configured with a client secret), REFRESH_TOKEN
// (required), DEVICE_KEY
// * For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH
// (required if the app client is configured with a client secret), DEVICE_KEY
//
// * For ADMIN_NO_SRP_AUTH: USERNAME (required), SECRET_HASH (if app client
// is configured with client secret), PASSWORD (required), DEVICE_KEY
@ -11693,7 +11783,7 @@ type AdminInitiateAuthOutput struct {
//
// All challenges require USERNAME and SECRET_HASH (if applicable).
//
// The value of the USER_IF_FOR_SRP attribute will be the user's actual username,
// The value of the USER_ID_FOR_SRP attribute will be the user's actual username,
// not an alias (such as email address or phone number), even if you specified
// an alias in your call to AdminInitiateAuth. This is because, in the AdminRespondToAuthChallenge
// API ChallengeResponses, the USERNAME attribute cannot be an alias.
@ -12099,7 +12189,7 @@ type AdminListUserAuthEventsInput struct {
// UserPoolId is a required field
UserPoolId *string `min:"1" type:"string" required:"true"`
// The user pool username.
// The user pool username or an alias.
//
// Username is a required field
Username *string `min:"1" type:"string" required:"true"`
@ -12573,7 +12663,7 @@ type AdminSetUserMFAPreferenceInput struct {
// UserPoolId is a required field
UserPoolId *string `min:"1" type:"string" required:"true"`
// The user pool username.
// The user pool username or alias.
//
// Username is a required field
Username *string `min:"1" type:"string" required:"true"`
@ -15055,6 +15145,15 @@ type CreateUserPoolInput struct {
EmailVerificationSubject *string `min:"1" type:"string"`
// The Lambda trigger configuration information for the new user pool.
//
// In a push model, event sources (such as Amazon S3 and custom applications)
// need permission to invoke a function. So you will need to make an extra call
// to add permission for these event sources to invoke your Lambda function.
//
// For more information on using the Lambda API to add permission, see AddPermission
// (https://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html).
//
// For adding permission using the AWS CLI, see add-permission (https://docs.aws.amazon.com/cli/latest/reference/lambda/add-permission.html).
LambdaConfig *LambdaConfigType `type:"structure"`
// Specifies MFA configuration details.
@ -17299,6 +17398,72 @@ func (s *GetIdentityProviderByIdentifierOutput) SetIdentityProvider(v *IdentityP
return s
}
// Request to get a signing certificate from Cognito.
type GetSigningCertificateInput struct {
_ struct{} `type:"structure"`
// The user pool ID.
//
// UserPoolId is a required field
UserPoolId *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s GetSigningCertificateInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetSigningCertificateInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetSigningCertificateInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetSigningCertificateInput"}
if s.UserPoolId == nil {
invalidParams.Add(request.NewErrParamRequired("UserPoolId"))
}
if s.UserPoolId != nil && len(*s.UserPoolId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetUserPoolId sets the UserPoolId field's value.
func (s *GetSigningCertificateInput) SetUserPoolId(v string) *GetSigningCertificateInput {
s.UserPoolId = &v
return s
}
// Response from Cognito for a signing certificate request.
type GetSigningCertificateOutput struct {
_ struct{} `type:"structure"`
// The signing certificate.
Certificate *string `type:"string"`
}
// String returns the string representation
func (s GetSigningCertificateOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetSigningCertificateOutput) GoString() string {
return s.String()
}
// SetCertificate sets the Certificate field's value.
func (s *GetSigningCertificateOutput) SetCertificate(v string) *GetSigningCertificateOutput {
s.Certificate = &v
return s
}
type GetUICustomizationInput struct {
_ struct{} `type:"structure"`
@ -17934,6 +18099,9 @@ type InitiateAuthInput struct {
// * USER_SRP_AUTH will take in USERNAME and SRP_A and return the SRP variables
// to be used for next challenge execution.
//
// * USER_PASSWORD_AUTH will take in USERNAME and PASSWORD and return the
// next challenge or tokens.
//
// Valid values include:
//
// * USER_SRP_AUTH: Authentication flow for the Secure Remote Password (SRP)
@ -17944,6 +18112,11 @@ type InitiateAuthInput struct {
//
// * CUSTOM_AUTH: Custom authentication flow.
//
// * USER_PASSWORD_AUTH: Non-SRP authentication flow; USERNAME and PASSWORD
// are passed directly. If a user migration Lambda trigger is set, this flow
// will invoke the user migration Lambda if the USERNAME is not found in
// the user pool.
//
// ADMIN_NO_SRP_AUTH is not a valid value.
//
// AuthFlow is a required field
@ -17955,9 +18128,8 @@ type InitiateAuthInput struct {
// * For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH
// (required if the app client is configured with a client secret), DEVICE_KEY
//
// * For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: USERNAME (required), SECRET_HASH
// (required if the app client is configured with a client secret), REFRESH_TOKEN
// (required), DEVICE_KEY
// * For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH
// (required if the app client is configured with a client secret), DEVICE_KEY
//
// * For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client is
// configured with client secret), DEVICE_KEY
@ -18160,6 +18332,9 @@ type LambdaConfigType struct {
// A Lambda trigger that is invoked before token generation.
PreTokenGeneration *string `min:"20" type:"string"`
// The user migration Lambda config type.
UserMigration *string `min:"20" type:"string"`
// Verifies the authentication challenge response.
VerifyAuthChallengeResponse *string `min:"20" type:"string"`
}
@ -18201,6 +18376,9 @@ func (s *LambdaConfigType) Validate() error {
if s.PreTokenGeneration != nil && len(*s.PreTokenGeneration) < 20 {
invalidParams.Add(request.NewErrParamMinLen("PreTokenGeneration", 20))
}
if s.UserMigration != nil && len(*s.UserMigration) < 20 {
invalidParams.Add(request.NewErrParamMinLen("UserMigration", 20))
}
if s.VerifyAuthChallengeResponse != nil && len(*s.VerifyAuthChallengeResponse) < 20 {
invalidParams.Add(request.NewErrParamMinLen("VerifyAuthChallengeResponse", 20))
}
@ -18259,6 +18437,12 @@ func (s *LambdaConfigType) SetPreTokenGeneration(v string) *LambdaConfigType {
return s
}
// SetUserMigration sets the UserMigration field's value.
func (s *LambdaConfigType) SetUserMigration(v string) *LambdaConfigType {
s.UserMigration = &v
return s
}
// SetVerifyAuthChallengeResponse sets the VerifyAuthChallengeResponse field's value.
func (s *LambdaConfigType) SetVerifyAuthChallengeResponse(v string) *LambdaConfigType {
s.VerifyAuthChallengeResponse = &v
@ -23881,6 +24065,9 @@ const (
// AuthFlowTypeAdminNoSrpAuth is a AuthFlowType enum value
AuthFlowTypeAdminNoSrpAuth = "ADMIN_NO_SRP_AUTH"
// AuthFlowTypeUserPasswordAuth is a AuthFlowType enum value
AuthFlowTypeUserPasswordAuth = "USER_PASSWORD_AUTH"
)
const (
@ -24016,6 +24203,9 @@ const (
// ExplicitAuthFlowsTypeCustomAuthFlowOnly is a ExplicitAuthFlowsType enum value
ExplicitAuthFlowsTypeCustomAuthFlowOnly = "CUSTOM_AUTH_FLOW_ONLY"
// ExplicitAuthFlowsTypeUserPasswordAuth is a ExplicitAuthFlowsType enum value
ExplicitAuthFlowsTypeUserPasswordAuth = "USER_PASSWORD_AUTH"
)
const (

4621
vendor/github.com/aws/aws-sdk-go/service/dax/api.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

33
vendor/github.com/aws/aws-sdk-go/service/dax/doc.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package dax provides the client and types for making API
// requests to Amazon DynamoDB Accelerator (DAX).
//
// DAX is a managed caching service engineered for Amazon DynamoDB. DAX dramatically
// speeds up database reads by caching frequently-accessed data from DynamoDB,
// so applications can access that data with sub-millisecond latency. You can
// create a DAX cluster easily, using the AWS Management Console. With a few
// simple modifications to your code, your application can begin taking advantage
// of the DAX cluster and realize significant improvements in read performance.
//
// See https://docs.aws.amazon.com/goto/WebAPI/dax-2017-04-19 for more information on this service.
//
// See dax package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/dax/
//
// Using the Client
//
// To contact Amazon DynamoDB Accelerator (DAX) with the SDK use the New function to create
// a new service client. With that client you can make API requests to the service.
// These clients are safe to use concurrently.
//
// See the SDK's documentation for more information on how to use the SDK.
// https://docs.aws.amazon.com/sdk-for-go/api/
//
// See aws.Config documentation for more information on configuring SDK clients.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
//
// See the Amazon DynamoDB Accelerator (DAX) client DAX for more
// information on creating client for this service.
// https://docs.aws.amazon.com/sdk-for-go/api/service/dax/#New
package dax

160
vendor/github.com/aws/aws-sdk-go/service/dax/errors.go generated vendored Normal file
View File

@ -0,0 +1,160 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package dax
const (
// ErrCodeClusterAlreadyExistsFault for service response error code
// "ClusterAlreadyExistsFault".
//
// You already have a DAX cluster with the given identifier.
ErrCodeClusterAlreadyExistsFault = "ClusterAlreadyExistsFault"
// ErrCodeClusterNotFoundFault for service response error code
// "ClusterNotFoundFault".
//
// The requested cluster ID does not refer to an existing DAX cluster.
ErrCodeClusterNotFoundFault = "ClusterNotFoundFault"
// ErrCodeClusterQuotaForCustomerExceededFault for service response error code
// "ClusterQuotaForCustomerExceededFault".
//
// You have attempted to exceed the maximum number of DAX clusters for your
// AWS account.
ErrCodeClusterQuotaForCustomerExceededFault = "ClusterQuotaForCustomerExceededFault"
// ErrCodeInsufficientClusterCapacityFault for service response error code
// "InsufficientClusterCapacityFault".
//
// There are not enough system resources to create the cluster you requested
// (or to resize an already-existing cluster).
ErrCodeInsufficientClusterCapacityFault = "InsufficientClusterCapacityFault"
// ErrCodeInvalidARNFault for service response error code
// "InvalidARNFault".
//
// The Amazon Resource Name (ARN) supplied in the request is not valid.
ErrCodeInvalidARNFault = "InvalidARNFault"
// ErrCodeInvalidClusterStateFault for service response error code
// "InvalidClusterStateFault".
//
// The requested DAX cluster is not in the available state.
ErrCodeInvalidClusterStateFault = "InvalidClusterStateFault"
// ErrCodeInvalidParameterCombinationException for service response error code
// "InvalidParameterCombinationException".
//
// Two or more incompatible parameters were specified.
ErrCodeInvalidParameterCombinationException = "InvalidParameterCombinationException"
// ErrCodeInvalidParameterGroupStateFault for service response error code
// "InvalidParameterGroupStateFault".
//
// One or more parameters in a parameter group are in an invalid state.
ErrCodeInvalidParameterGroupStateFault = "InvalidParameterGroupStateFault"
// ErrCodeInvalidParameterValueException for service response error code
// "InvalidParameterValueException".
//
// The value for a parameter is invalid.
ErrCodeInvalidParameterValueException = "InvalidParameterValueException"
// ErrCodeInvalidSubnet for service response error code
// "InvalidSubnet".
//
// An invalid subnet identifier was specified.
ErrCodeInvalidSubnet = "InvalidSubnet"
// ErrCodeInvalidVPCNetworkStateFault for service response error code
// "InvalidVPCNetworkStateFault".
//
// The VPC network is in an invalid state.
ErrCodeInvalidVPCNetworkStateFault = "InvalidVPCNetworkStateFault"
// ErrCodeNodeNotFoundFault for service response error code
// "NodeNotFoundFault".
//
// None of the nodes in the cluster have the given node ID.
ErrCodeNodeNotFoundFault = "NodeNotFoundFault"
// ErrCodeNodeQuotaForClusterExceededFault for service response error code
// "NodeQuotaForClusterExceededFault".
//
// You have attempted to exceed the maximum number of nodes for a DAX cluster.
ErrCodeNodeQuotaForClusterExceededFault = "NodeQuotaForClusterExceededFault"
// ErrCodeNodeQuotaForCustomerExceededFault for service response error code
// "NodeQuotaForCustomerExceededFault".
//
// You have attempted to exceed the maximum number of nodes for your AWS account.
ErrCodeNodeQuotaForCustomerExceededFault = "NodeQuotaForCustomerExceededFault"
// ErrCodeParameterGroupAlreadyExistsFault for service response error code
// "ParameterGroupAlreadyExistsFault".
//
// The specified parameter group already exists.
ErrCodeParameterGroupAlreadyExistsFault = "ParameterGroupAlreadyExistsFault"
// ErrCodeParameterGroupNotFoundFault for service response error code
// "ParameterGroupNotFoundFault".
//
// The specified parameter group does not exist.
ErrCodeParameterGroupNotFoundFault = "ParameterGroupNotFoundFault"
// ErrCodeParameterGroupQuotaExceededFault for service response error code
// "ParameterGroupQuotaExceededFault".
//
// You have attempted to exceed the maximum number of parameter groups.
ErrCodeParameterGroupQuotaExceededFault = "ParameterGroupQuotaExceededFault"
// ErrCodeSubnetGroupAlreadyExistsFault for service response error code
// "SubnetGroupAlreadyExistsFault".
//
// The specified subnet group already exists.
ErrCodeSubnetGroupAlreadyExistsFault = "SubnetGroupAlreadyExistsFault"
// ErrCodeSubnetGroupInUseFault for service response error code
// "SubnetGroupInUseFault".
//
// The specified subnet group is currently in use.
ErrCodeSubnetGroupInUseFault = "SubnetGroupInUseFault"
// ErrCodeSubnetGroupNotFoundFault for service response error code
// "SubnetGroupNotFoundFault".
//
// The requested subnet group name does not refer to an existing subnet group.
ErrCodeSubnetGroupNotFoundFault = "SubnetGroupNotFoundFault"
// ErrCodeSubnetGroupQuotaExceededFault for service response error code
// "SubnetGroupQuotaExceededFault".
//
// The request cannot be processed because it would exceed the allowed number
// of subnets in a subnet group.
ErrCodeSubnetGroupQuotaExceededFault = "SubnetGroupQuotaExceededFault"
// ErrCodeSubnetInUse for service response error code
// "SubnetInUse".
//
// The requested subnet is being used by another subnet group.
ErrCodeSubnetInUse = "SubnetInUse"
// ErrCodeSubnetQuotaExceededFault for service response error code
// "SubnetQuotaExceededFault".
//
// The request cannot be processed because it would exceed the allowed number
// of subnets in a subnet group.
ErrCodeSubnetQuotaExceededFault = "SubnetQuotaExceededFault"
// ErrCodeTagNotFoundFault for service response error code
// "TagNotFoundFault".
//
// The tag does not exist.
ErrCodeTagNotFoundFault = "TagNotFoundFault"
// ErrCodeTagQuotaPerResourceExceeded for service response error code
// "TagQuotaPerResourceExceeded".
//
// You have exceeded the maximum number of tags for this DAX cluster.
ErrCodeTagQuotaPerResourceExceeded = "TagQuotaPerResourceExceeded"
)

View File

@ -0,0 +1,95 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package dax
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
)
// DAX provides the API operation methods for making requests to
// Amazon DynamoDB Accelerator (DAX). See this package's package overview docs
// for details on the service.
//
// DAX methods are safe to use concurrently. It is not safe to
// modify mutate any of the struct's properties though.
type DAX struct {
*client.Client
}
// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
// Service information constants
const (
ServiceName = "dax" // Service endpoint prefix API calls made to.
EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
)
// New creates a new instance of the DAX client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a DAX client from just a session.
// svc := dax.New(mySession)
//
// // Create a DAX client with additional configuration
// svc := dax.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *DAX {
c := p.ClientConfig(EndpointsID, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DAX {
svc := &DAX{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
SigningName: signingName,
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "2017-04-19",
JSONVersion: "1.1",
TargetPrefix: "AmazonDAXV3",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler)
// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
return svc
}
// newRequest creates a new request for a DAX operation and runs any
// custom request initialization.
func (c *DAX) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
return req
}

View File

@ -8272,7 +8272,7 @@ func (c *EC2) DescribeAccountAttributesRequest(input *DescribeAccountAttributesI
//
// * default-vpc: The ID of the default VPC for your account, or none.
//
// * max-instances: The maximum number of On-Demand instances that you can
// * max-instances: The maximum number of On-Demand Instances that you can
// run.
//
// * vpc-max-security-groups-per-interface: The maximum number of security
@ -8390,6 +8390,94 @@ func (c *EC2) DescribeAddressesWithContext(ctx aws.Context, input *DescribeAddre
return out, req.Send()
}
const opDescribeAggregateIdFormat = "DescribeAggregateIdFormat"
// DescribeAggregateIdFormatRequest generates a "aws/request.Request" representing the
// client's request for the DescribeAggregateIdFormat operation. The "output" return
// value will be populated with the request's response once the request complets
// successfuly.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DescribeAggregateIdFormat for more information on using the DescribeAggregateIdFormat
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DescribeAggregateIdFormatRequest method.
// req, resp := client.DescribeAggregateIdFormatRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAggregateIdFormat
func (c *EC2) DescribeAggregateIdFormatRequest(input *DescribeAggregateIdFormatInput) (req *request.Request, output *DescribeAggregateIdFormatOutput) {
op := &request.Operation{
Name: opDescribeAggregateIdFormat,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DescribeAggregateIdFormatInput{}
}
output = &DescribeAggregateIdFormatOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeAggregateIdFormat API operation for Amazon Elastic Compute Cloud.
//
// Describes the longer ID format settings for all resource types in a specific
// region. This request is useful for performing a quick audit to determine
// whether a specific region is fully opted in for longer IDs (17-character
// IDs).
//
// This request only returns information about resource types that support longer
// IDs.
//
// The following resource types support longer IDs: bundle | conversion-task
// | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task
// | flow-log | image | import-task | instance | internet-gateway | network-acl
// | network-acl-association | network-interface | network-interface-attachment
// | prefix-list | reservation | route-table | route-table-association | security-group
// | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association
// | vpc-peering-connection.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Elastic Compute Cloud's
// API operation DescribeAggregateIdFormat for usage and error information.
// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAggregateIdFormat
func (c *EC2) DescribeAggregateIdFormat(input *DescribeAggregateIdFormatInput) (*DescribeAggregateIdFormatOutput, error) {
req, out := c.DescribeAggregateIdFormatRequest(input)
return out, req.Send()
}
// DescribeAggregateIdFormatWithContext is the same as DescribeAggregateIdFormat with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeAggregateIdFormat for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *EC2) DescribeAggregateIdFormatWithContext(ctx aws.Context, input *DescribeAggregateIdFormatInput, opts ...request.Option) (*DescribeAggregateIdFormatOutput, error) {
req, out := c.DescribeAggregateIdFormatRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDescribeAvailabilityZones = "DescribeAvailabilityZones"
// DescribeAvailabilityZonesRequest generates a "aws/request.Request" representing the
@ -9666,8 +9754,13 @@ func (c *EC2) DescribeIdFormatRequest(input *DescribeIdFormatInput) (req *reques
// request only returns information about resource types whose ID formats can
// be modified; it does not return information about other resource types.
//
// The following resource types support longer IDs: instance | reservation |
// snapshot | volume.
// The following resource types support longer IDs: bundle | conversion-task
// | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task
// | flow-log | image | import-task | instance | internet-gateway | network-acl
// | network-acl-association | network-interface | network-interface-attachment
// | prefix-list | reservation | route-table | route-table-association | security-group
// | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association
// | vpc-peering-connection.
//
// These settings apply to the IAM user who makes the request; they do not apply
// to the entire AWS account. By default, an IAM user defaults to the same settings
@ -9755,8 +9848,13 @@ func (c *EC2) DescribeIdentityIdFormatRequest(input *DescribeIdentityIdFormatInp
// other resource types. For more information, see Resource IDs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/resource-ids.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// The following resource types support longer IDs: instance | reservation |
// snapshot | volume.
// The following resource types support longer IDs: bundle | conversion-task
// | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task
// | flow-log | image | import-task | instance | internet-gateway | network-acl
// | network-acl-association | network-interface | network-interface-attachment
// | prefix-list | reservation | route-table | route-table-association | security-group
// | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association
// | vpc-peering-connection.
//
// These settings apply to the principal specified in the request. They do not
// apply to the principal that makes the request.
@ -11519,6 +11617,94 @@ func (c *EC2) DescribePrefixListsWithContext(ctx aws.Context, input *DescribePre
return out, req.Send()
}
const opDescribePrincipalIdFormat = "DescribePrincipalIdFormat"
// DescribePrincipalIdFormatRequest generates a "aws/request.Request" representing the
// client's request for the DescribePrincipalIdFormat operation. The "output" return
// value will be populated with the request's response once the request complets
// successfuly.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DescribePrincipalIdFormat for more information on using the DescribePrincipalIdFormat
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DescribePrincipalIdFormatRequest method.
// req, resp := client.DescribePrincipalIdFormatRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePrincipalIdFormat
func (c *EC2) DescribePrincipalIdFormatRequest(input *DescribePrincipalIdFormatInput) (req *request.Request, output *DescribePrincipalIdFormatOutput) {
op := &request.Operation{
Name: opDescribePrincipalIdFormat,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DescribePrincipalIdFormatInput{}
}
output = &DescribePrincipalIdFormatOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribePrincipalIdFormat API operation for Amazon Elastic Compute Cloud.
//
// Describes the ID format settings for the root user and all IAM roles and
// IAM users that have explicitly specified a longer ID (17-character ID) preference.
//
// By default, all IAM roles and IAM users default to the same ID settings as
// the root user, unless they explicitly override the settings. This request
// is useful for identifying those IAM users and IAM roles that have overridden
// the default ID settings.
//
// The following resource types support longer IDs: bundle | conversion-task
// | dhcp-options | elastic-ip-allocation | elastic-ip-association | export-task
// | flow-log | image | import-task | instance | internet-gateway | network-acl
// | network-acl-association | network-interface | network-interface-attachment
// | prefix-list | reservation | route-table | route-table-association | security-group
// | snapshot | subnet | subnet-cidr-block-association | volume | vpc | vpc-cidr-block-association
// | vpc-peering-connection.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Elastic Compute Cloud's
// API operation DescribePrincipalIdFormat for usage and error information.
// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePrincipalIdFormat
func (c *EC2) DescribePrincipalIdFormat(input *DescribePrincipalIdFormatInput) (*DescribePrincipalIdFormatOutput, error) {
req, out := c.DescribePrincipalIdFormatRequest(input)
return out, req.Send()
}
// DescribePrincipalIdFormatWithContext is the same as DescribePrincipalIdFormat with the addition of
// the ability to pass a context and additional request options.
//
// See DescribePrincipalIdFormat for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *EC2) DescribePrincipalIdFormatWithContext(ctx aws.Context, input *DescribePrincipalIdFormatInput, opts ...request.Option) (*DescribePrincipalIdFormatOutput, error) {
req, out := c.DescribePrincipalIdFormatRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDescribeRegions = "DescribeRegions"
// DescribeRegionsRequest generates a "aws/request.Request" representing the
@ -17435,8 +17621,16 @@ func (c *EC2) ModifyIdFormatRequest(input *ModifyIdFormatInput) (req *request.Re
//
// Modifies the ID format for the specified resource on a per-region basis.
// You can specify that resources should receive longer IDs (17-character IDs)
// when they are created. The following resource types support longer IDs: instance
// | reservation | snapshot | volume.
// when they are created.
//
// This request can only be used to modify longer ID settings for resource types
// that are within the opt-in period. Resources currently in their opt-in period
// include: bundle | conversion-task | dhcp-options | elastic-ip-allocation
// | elastic-ip-association | export-task | flow-log | image | import-task |
// internet-gateway | network-acl | network-acl-association | network-interface
// | network-interface-attachment | prefix-list | route-table | route-table-association
// | security-group | subnet | subnet-cidr-block-association | vpc | vpc-cidr-block-association
// | vpc-peering-connection.
//
// This setting applies to the IAM user who makes the request; it does not apply
// to the entire AWS account. By default, an IAM user defaults to the same settings
@ -17528,8 +17722,16 @@ func (c *EC2) ModifyIdentityIdFormatRequest(input *ModifyIdentityIdFormatInput)
// user for an account. You can specify that resources should receive longer
// IDs (17-character IDs) when they are created.
//
// The following resource types support longer IDs: instance | reservation |
// snapshot | volume. For more information, see Resource IDs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/resource-ids.html)
// This request can only be used to modify longer ID settings for resource types
// that are within the opt-in period. Resources currently in their opt-in period
// include: bundle | conversion-task | dhcp-options | elastic-ip-allocation
// | elastic-ip-association | export-task | flow-log | image | import-task |
// internet-gateway | network-acl | network-acl-association | network-interface
// | network-interface-attachment | prefix-list | route-table | route-table-association
// | security-group | subnet | subnet-cidr-block-association | vpc | vpc-cidr-block-association
// | vpc-peering-connection..
//
// For more information, see Resource IDs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/resource-ids.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// This setting applies to the principal specified in the request; it does not
@ -24165,7 +24367,7 @@ func (s *AttributeBooleanValue) SetValue(v bool) *AttributeBooleanValue {
type AttributeValue struct {
_ struct{} `type:"structure"`
// The attribute value. Note that the value is case-sensitive.
// The attribute value. The value is case-sensitive.
Value *string `locationName:"value" type:"string"`
}
@ -26210,14 +26412,34 @@ type CopyImageInput struct {
// in the Amazon Elastic Compute Cloud User Guide.
Encrypted *bool `locationName:"encrypted" type:"boolean"`
// The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when
// encrypting the snapshots of an image during a copy operation. This parameter
// is only required if you want to use a non-default CMK; if this parameter
// is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms
// namespace, followed by the region of the CMK, the AWS account ID of the CMK
// owner, the key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
// An identifier for the AWS Key Management Service (AWS KMS) customer master
// key (CMK) to use when creating the encrypted volume. This parameter is only
// required if you want to use a non-default CMK; if this parameter is not specified,
// the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted
// flag must also be set.
//
// The CMK identifier may be provided in any of the following formats:
//
// * Key ID
//
// * Key alias
//
// * ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed
// by the region of the CMK, the AWS account ID of the CMK owner, the key
// namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
//
//
// * ARN using key alias. The alias ARN contains the arn:aws:kms namespace,
// followed by the region of the CMK, the AWS account ID of the CMK owner,
// the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
//
//
// AWS parses KmsKeyId asynchronously, meaning that the action you call may
// appear to complete even though you provided an invalid identifier. This action
// will eventually report failure.
//
// The specified CMK must exist in the region that the snapshot is being copied
// to. If a KmsKeyId is specified, the Encrypted flag must also be set.
// to.
KmsKeyId *string `locationName:"kmsKeyId" type:"string"`
// The name of the new AMI in the destination region.
@ -26369,14 +26591,31 @@ type CopySnapshotInput struct {
// the Amazon Elastic Compute Cloud User Guide.
Encrypted *bool `locationName:"encrypted" type:"boolean"`
// The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when
// creating the snapshot copy. This parameter is only required if you want to
// use a non-default CMK; if this parameter is not specified, the default CMK
// for EBS is used. The ARN contains the arn:aws:kms namespace, followed by
// the region of the CMK, the AWS account ID of the CMK owner, the key namespace,
// and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
// The specified CMK must exist in the region that the snapshot is being copied
// to. If a KmsKeyId is specified, the Encrypted flag must also be set.
// An identifier for the AWS Key Management Service (AWS KMS) customer master
// key (CMK) to use when creating the encrypted volume. This parameter is only
// required if you want to use a non-default CMK; if this parameter is not specified,
// the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted
// flag must also be set.
//
// The CMK identifier may be provided in any of the following formats:
//
// * Key ID
//
// * Key alias
//
// * ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed
// by the region of the CMK, the AWS account ID of the CMK owner, the key
// namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
//
//
// * ARN using key alias. The alias ARN contains the arn:aws:kms namespace,
// followed by the region of the CMK, the AWS account ID of the CMK owner,
// the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
//
//
// AWS parses KmsKeyId asynchronously, meaning that the action you call may
// appear to complete even though you provided an invalid identifier. The action
// will eventually fail.
KmsKeyId *string `locationName:"kmsKeyId" type:"string"`
// The pre-signed URL that facilitates copying an encrypted snapshot. This parameter
@ -28835,7 +29074,7 @@ type CreateSecurityGroupInput struct {
// The name of the security group.
//
// Constraints: Up to 255 characters in length
// Constraints: Up to 255 characters in length. Cannot start with sg-.
//
// Constraints for EC2-Classic: ASCII characters
//
@ -29290,13 +29529,31 @@ type CreateVolumeInput struct {
// Constraint: Range is 100 to 20000 for Provisioned IOPS SSD volumes
Iops *int64 `type:"integer"`
// The full ARN of the AWS Key Management Service (AWS KMS) customer master
// An identifier for the AWS Key Management Service (AWS KMS) customer master
// key (CMK) to use when creating the encrypted volume. This parameter is only
// required if you want to use a non-default CMK; if this parameter is not specified,
// the default CMK for EBS is used. The ARN contains the arn:aws:kms namespace,
// followed by the region of the CMK, the AWS account ID of the CMK owner, the
// key namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
// If a KmsKeyId is specified, the Encrypted flag must also be set.
// the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted
// flag must also be set.
//
// The CMK identifier may be provided in any of the following formats:
//
// * Key ID
//
// * Key alias
//
// * ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed
// by the region of the CMK, the AWS account ID of the CMK owner, the key
// namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
//
//
// * ARN using key alias. The alias ARN contains the arn:aws:kms namespace,
// followed by the region of the CMK, the AWS account ID of the CMK owner,
// the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
//
//
// AWS parses KmsKeyId asynchronously, meaning that the action you call may
// appear to complete even though you provided an invalid identifier. The action
// will eventually fail.
KmsKeyId *string `type:"string"`
// The size of the volume, in GiBs.
@ -32999,6 +33256,66 @@ func (s *DescribeAddressesOutput) SetAddresses(v []*Address) *DescribeAddressesO
return s
}
type DescribeAggregateIdFormatInput struct {
_ struct{} `type:"structure"`
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
// the required permissions, the error response is DryRunOperation. Otherwise,
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
}
// String returns the string representation
func (s DescribeAggregateIdFormatInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeAggregateIdFormatInput) GoString() string {
return s.String()
}
// SetDryRun sets the DryRun field's value.
func (s *DescribeAggregateIdFormatInput) SetDryRun(v bool) *DescribeAggregateIdFormatInput {
s.DryRun = &v
return s
}
type DescribeAggregateIdFormatOutput struct {
_ struct{} `type:"structure"`
// Information about each resource's ID format.
Statuses []*IdFormat `locationName:"statusSet" locationNameList:"item" type:"list"`
// Indicates whether all resource types in the region are configured to use
// longer IDs. This value is only true if all users are configured to use longer
// IDs for all resources types in the region.
UseLongIdsAggregated *bool `locationName:"useLongIdsAggregated" type:"boolean"`
}
// String returns the string representation
func (s DescribeAggregateIdFormatOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeAggregateIdFormatOutput) GoString() string {
return s.String()
}
// SetStatuses sets the Statuses field's value.
func (s *DescribeAggregateIdFormatOutput) SetStatuses(v []*IdFormat) *DescribeAggregateIdFormatOutput {
s.Statuses = v
return s
}
// SetUseLongIdsAggregated sets the UseLongIdsAggregated field's value.
func (s *DescribeAggregateIdFormatOutput) SetUseLongIdsAggregated(v bool) *DescribeAggregateIdFormatOutput {
s.UseLongIdsAggregated = &v
return s
}
// Contains the parameters for DescribeAvailabilityZones.
type DescribeAvailabilityZonesInput struct {
_ struct{} `type:"structure"`
@ -34568,7 +34885,12 @@ func (s *DescribeIamInstanceProfileAssociationsOutput) SetNextToken(v string) *D
type DescribeIdFormatInput struct {
_ struct{} `type:"structure"`
// The type of resource: instance | reservation | snapshot | volume
// The type of resource: bundle | conversion-task | dhcp-options | elastic-ip-allocation
// | elastic-ip-association | export-task | flow-log | image | import-task |
// instance | internet-gateway | network-acl | network-acl-association | network-interface
// | network-interface-attachment | prefix-list | reservation | route-table
// | route-table-association | security-group | snapshot | subnet | subnet-cidr-block-association
// | volume | vpc | vpc-cidr-block-association | vpc-peering-connection
Resource *string `type:"string"`
}
@ -34622,7 +34944,12 @@ type DescribeIdentityIdFormatInput struct {
// PrincipalArn is a required field
PrincipalArn *string `locationName:"principalArn" type:"string" required:"true"`
// The type of resource: instance | reservation | snapshot | volume
// The type of resource: bundle | conversion-task | dhcp-options | elastic-ip-allocation
// | elastic-ip-association | export-task | flow-log | image | import-task |
// instance | internet-gateway | network-acl | network-acl-association | network-interface
// | network-interface-attachment | prefix-list | reservation | route-table
// | route-table-association | security-group | snapshot | subnet | subnet-cidr-block-association
// | volume | vpc | vpc-cidr-block-association | vpc-peering-connection
Resource *string `locationName:"resource" type:"string"`
}
@ -37418,6 +37745,98 @@ func (s *DescribePrefixListsOutput) SetPrefixLists(v []*PrefixList) *DescribePre
return s
}
type DescribePrincipalIdFormatInput struct {
_ struct{} `type:"structure"`
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
// the required permissions, the error response is DryRunOperation. Otherwise,
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
// The maximum number of results to return in a single call. To retrieve the
// remaining results, make another call with the returned NextToken value.
MaxResults *int64 `type:"integer"`
// The token to request the next page of results.
NextToken *string `type:"string"`
// The type of resource: bundle | conversion-task | dhcp-options | elastic-ip-allocation
// | elastic-ip-association | export-task | flow-log | image | import-task |
// instance | internet-gateway | network-acl | network-acl-association | network-interface
// | network-interface-attachment | prefix-list | reservation | route-table
// | route-table-association | security-group | snapshot | subnet | subnet-cidr-block-association
// | volume | vpc | vpc-cidr-block-association | vpc-peering-connection
Resources []*string `locationName:"Resource" locationNameList:"item" type:"list"`
}
// String returns the string representation
func (s DescribePrincipalIdFormatInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribePrincipalIdFormatInput) GoString() string {
return s.String()
}
// SetDryRun sets the DryRun field's value.
func (s *DescribePrincipalIdFormatInput) SetDryRun(v bool) *DescribePrincipalIdFormatInput {
s.DryRun = &v
return s
}
// SetMaxResults sets the MaxResults field's value.
func (s *DescribePrincipalIdFormatInput) SetMaxResults(v int64) *DescribePrincipalIdFormatInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *DescribePrincipalIdFormatInput) SetNextToken(v string) *DescribePrincipalIdFormatInput {
s.NextToken = &v
return s
}
// SetResources sets the Resources field's value.
func (s *DescribePrincipalIdFormatInput) SetResources(v []*string) *DescribePrincipalIdFormatInput {
s.Resources = v
return s
}
type DescribePrincipalIdFormatOutput struct {
_ struct{} `type:"structure"`
// The token to use to retrieve the next page of results. This value is null
// when there are no more results to return.
NextToken *string `locationName:"nextToken" type:"string"`
// Information about the ID format settings for the ARN.
Principals []*PrincipalIdFormat `locationName:"principalSet" locationNameList:"item" type:"list"`
}
// String returns the string representation
func (s DescribePrincipalIdFormatOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribePrincipalIdFormatOutput) GoString() string {
return s.String()
}
// SetNextToken sets the NextToken field's value.
func (s *DescribePrincipalIdFormatOutput) SetNextToken(v string) *DescribePrincipalIdFormatOutput {
s.NextToken = &v
return s
}
// SetPrincipals sets the Principals field's value.
func (s *DescribePrincipalIdFormatOutput) SetPrincipals(v []*PrincipalIdFormat) *DescribePrincipalIdFormatOutput {
s.Principals = v
return s
}
// Contains the parameters for DescribeRegions.
type DescribeRegionsInput struct {
_ struct{} `type:"structure"`
@ -38648,7 +39067,8 @@ type DescribeSecurityGroupsInput struct {
// The maximum number of results to return in a single call. To retrieve the
// remaining results, make another request with the returned NextToken value.
// This value can be between 5 and 1000.
// This value can be between 5 and 1000. If this parameter is not specified,
// then all results are returned.
MaxResults *int64 `type:"integer"`
// The token to request the next page of results.
@ -39740,8 +40160,8 @@ func (s *DescribeSpotPriceHistoryInput) SetStartTime(v time.Time) *DescribeSpotP
type DescribeSpotPriceHistoryOutput struct {
_ struct{} `type:"structure"`
// The token required to retrieve the next set of results. This value is null
// when there are no more results to return.
// The token required to retrieve the next set of results. This value is an
// empty string when there are no more results to return.
NextToken *string `locationName:"nextToken" type:"string"`
// The historical Spot prices.
@ -43295,7 +43715,8 @@ type EbsBlockDevice struct {
// it is not used in requests to create gp2, st1, sc1, or standard volumes.
Iops *int64 `locationName:"iops" type:"integer"`
// ID for a user-managed CMK under which the EBS volume is encrypted.
// Identifier (key ID, key alias, ID ARN, or alias ARN) for a user-managed CMK
// under which the EBS volume is encrypted.
//
// Note: This parameter is only supported on BlockDeviceMapping objects called
// by RunInstances (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html),
@ -46241,7 +46662,7 @@ type ImageDiskContainer struct {
// The format of the disk image being imported.
//
// Valid values: RAW | VHD | VMDK | OVA
// Valid values: VHD | VMDK | OVA
Format *string `type:"string"`
// The ID of the EBS snapshot to be used for importing the snapshot.
@ -46763,7 +47184,7 @@ type ImportInstanceLaunchSpecification struct {
InstanceInitiatedShutdownBehavior *string `locationName:"instanceInitiatedShutdownBehavior" type:"string" enum:"ShutdownBehavior"`
// The instance type. For more information about the instance types that you
// can import, see Instance Types (http://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html#vmimport-instance-types)
// can import, see Instance Types (http://docs.aws.amazon.com/vm-import/latest/userguide/vmie_prereqs.html#vmimport-instance-types)
// in the VM Import/Export User Guide.
InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"`
@ -46779,9 +47200,7 @@ type ImportInstanceLaunchSpecification struct {
// [EC2-VPC] The ID of the subnet in which to launch the instance.
SubnetId *string `locationName:"subnetId" type:"string"`
// The user data to make available to the instance. If you are using an AWS
// SDK or command line tool, Base64-encoding is performed for you, and you can
// load the text from a file. Otherwise, you must provide Base64-encoded text.
// The Base64-encoded user data to make available to the instance.
UserData *UserData `locationName:"userData" type:"structure"`
}
@ -49431,9 +49850,7 @@ type LaunchSpecification struct {
// The ID of the subnet in which to launch the instance.
SubnetId *string `locationName:"subnetId" type:"string"`
// The user data to make available to the instances. If you are using an AWS
// SDK or command line tool, Base64-encoding is performed for you, and you can
// load the text from a file. Otherwise, you must provide Base64-encoded text.
// The Base64-encoded user data for the instance.
UserData *string `locationName:"userData" type:"string"`
}
@ -51318,7 +51735,15 @@ func (s *ModifyHostsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *ModifyHostsO
type ModifyIdFormatInput struct {
_ struct{} `type:"structure"`
// The type of resource: instance | reservation | snapshot | volume
// The type of resource: bundle | conversion-task | dhcp-options | elastic-ip-allocation
// | elastic-ip-association | export-task | flow-log | image | import-task |
// internet-gateway | network-acl | network-acl-association | network-interface
// | network-interface-attachment | prefix-list | route-table | route-table-association
// | security-group | subnet | subnet-cidr-block-association | vpc | vpc-cidr-block-association
// | vpc-peering-connection.
//
// Alternatively, use the all-current option to include all resource types that
// are currently within their opt-in period for longer IDs.
//
// Resource is a required field
Resource *string `type:"string" required:"true"`
@ -51392,7 +51817,15 @@ type ModifyIdentityIdFormatInput struct {
// PrincipalArn is a required field
PrincipalArn *string `locationName:"principalArn" type:"string" required:"true"`
// The type of resource: instance | reservation | snapshot | volume
// The type of resource: bundle | conversion-task | dhcp-options | elastic-ip-allocation
// | elastic-ip-association | export-task | flow-log | image | import-task |
// internet-gateway | network-acl | network-acl-association | network-interface
// | network-interface-attachment | prefix-list | route-table | route-table-association
// | security-group | subnet | subnet-cidr-block-association | vpc | vpc-cidr-block-association
// | vpc-peering-connection.
//
// Alternatively, use the all-current option to include all resource types that
// are currently within their opt-in period for longer IDs.
//
// Resource is a required field
Resource *string `locationName:"resource" type:"string" required:"true"`
@ -53126,7 +53559,7 @@ type ModifyVpcEndpointServiceConfigurationInput struct {
// The Amazon Resource Names (ARNs) of Network Load Balancers to add to your
// service configuration.
AddNetworkLoadBalancerArns []*string `locationName:"addNetworkLoadBalancerArn" locationNameList:"item" type:"list"`
AddNetworkLoadBalancerArns []*string `locationName:"AddNetworkLoadBalancerArn" locationNameList:"item" type:"list"`
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
@ -53136,7 +53569,7 @@ type ModifyVpcEndpointServiceConfigurationInput struct {
// The Amazon Resource Names (ARNs) of Network Load Balancers to remove from
// your service configuration.
RemoveNetworkLoadBalancerArns []*string `locationName:"removeNetworkLoadBalancerArn" locationNameList:"item" type:"list"`
RemoveNetworkLoadBalancerArns []*string `locationName:"RemoveNetworkLoadBalancerArn" locationNameList:"item" type:"list"`
// The ID of the service.
//
@ -55211,6 +55644,39 @@ func (s *PricingDetail) SetPrice(v float64) *PricingDetail {
return s
}
// PrincipalIdFormat description
type PrincipalIdFormat struct {
_ struct{} `type:"structure"`
// PrincipalIdFormatARN description
Arn *string `locationName:"arn" type:"string"`
// PrincipalIdFormatStatuses description
Statuses []*IdFormat `locationName:"statusSet" locationNameList:"item" type:"list"`
}
// String returns the string representation
func (s PrincipalIdFormat) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PrincipalIdFormat) GoString() string {
return s.String()
}
// SetArn sets the Arn field's value.
func (s *PrincipalIdFormat) SetArn(v string) *PrincipalIdFormat {
s.Arn = &v
return s
}
// SetStatuses sets the Statuses field's value.
func (s *PrincipalIdFormat) SetStatuses(v []*IdFormat) *PrincipalIdFormat {
s.Statuses = v
return s
}
// Describes a secondary private IPv4 address for a network interface.
type PrivateIpAddressSpecification struct {
_ struct{} `type:"structure"`
@ -57297,12 +57763,10 @@ type RequestLaunchTemplateData struct {
// are created during launch.
TagSpecifications []*LaunchTemplateTagSpecificationRequest `locationName:"TagSpecification" locationNameList:"LaunchTemplateTagSpecificationRequest" type:"list"`
// The user data to make available to the instance. For more information, see
// Running Commands on Your Linux Instance at Launch (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html)
// The Base64-encoded user data to make available to the instance. For more
// information, see Running Commands on Your Linux Instance at Launch (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html)
// (Linux) and Adding User Data (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data)
// (Windows). If you are using a command line tool, base64-encoding is performed
// for you and you can load the text from a file. Otherwise, you must provide
// base64-encoded text.
// (Windows).
UserData *string `type:"string"`
}
@ -57827,9 +58291,7 @@ type RequestSpotLaunchSpecification struct {
// The ID of the subnet in which to launch the instance.
SubnetId *string `locationName:"subnetId" type:"string"`
// The user data to make available to the instances. If you are using an AWS
// SDK or command line tool, Base64-encoding is performed for you, and you can
// load the text from a file. Otherwise, you must provide Base64-encoded text.
// The Base64-encoded user data for the instance.
UserData *string `locationName:"userData" type:"string"`
}
@ -62422,7 +62884,7 @@ type SnapshotDiskContainer struct {
// The format of the disk image being imported.
//
// Valid values: RAW | VHD | VMDK | OVA
// Valid values: VHD | VMDK | OVA
Format *string `type:"string"`
// The URL to the Amazon S3-based disk image being imported. It can either be
@ -62691,9 +63153,7 @@ type SpotFleetLaunchSpecification struct {
// The tags to apply during creation.
TagSpecifications []*SpotFleetTagSpecification `locationName:"tagSpecificationSet" locationNameList:"item" type:"list"`
// The user data to make available to the instances. If you are using an AWS
// SDK or command line tool, Base64-encoding is performed for you, and you can
// load the text from a file. Otherwise, you must provide Base64-encoded text.
// The Base64-encoded user data to make available to the instances.
UserData *string `locationName:"userData" type:"string"`
// The number of units provided by the specified instance type. These are the
@ -67007,7 +67467,7 @@ type VpnConnectionOptionsSpecification struct {
// Indicate whether the VPN connection uses static routes only. If you are creating
// a VPN connection for a device that does not support BGP, you must specify
// true.
// true. Use CreateVpnConnectionRoute to create a static route.
//
// Default: false
StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"`
@ -68216,6 +68676,9 @@ const (
// NetworkInterfaceStatusAvailable is a NetworkInterfaceStatus enum value
NetworkInterfaceStatusAvailable = "available"
// NetworkInterfaceStatusAssociated is a NetworkInterfaceStatus enum value
NetworkInterfaceStatusAssociated = "associated"
// NetworkInterfaceStatusAttaching is a NetworkInterfaceStatus enum value
NetworkInterfaceStatusAttaching = "attaching"

View File

@ -4,7 +4,7 @@
// requests to Amazon Elastic Compute Cloud.
//
// Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity
// in the AWS Cloud. Using Amazon EC2 eliminates your need to invest in hardware
// in the AWS Cloud. Using Amazon EC2 eliminates the need to invest in hardware
// up front, so you can develop and deploy applications faster.
//
// See https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15 for more information on this service.

View File

@ -3573,6 +3573,9 @@ type Action struct {
// Information about the NETWORK_CONNECTION action described in this finding.
NetworkConnectionAction *NetworkConnectionAction `locationName:"networkConnectionAction" type:"structure"`
// Information about the PORT_PROBE action described in this finding.
PortProbeAction *PortProbeAction `locationName:"portProbeAction" type:"structure"`
}
// String returns the string representation
@ -3609,6 +3612,12 @@ func (s *Action) SetNetworkConnectionAction(v *NetworkConnectionAction) *Action
return s
}
// SetPortProbeAction sets the PortProbeAction field's value.
func (s *Action) SetPortProbeAction(v *PortProbeAction) *Action {
s.PortProbeAction = v
return s
}
// Archive Findings Request
type ArchiveFindingsInput struct {
_ struct{} `type:"structure"`
@ -6830,6 +6839,72 @@ func (s *Organization) SetOrg(v string) *Organization {
return s
}
// Information about the PORT_PROBE action described in this finding.
type PortProbeAction struct {
_ struct{} `type:"structure"`
// Port probe blocked information.
Blocked *bool `locationName:"blocked" type:"boolean"`
// A list of port probe details objects.
PortProbeDetails []*PortProbeDetail `locationName:"portProbeDetails" type:"list"`
}
// String returns the string representation
func (s PortProbeAction) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PortProbeAction) GoString() string {
return s.String()
}
// SetBlocked sets the Blocked field's value.
func (s *PortProbeAction) SetBlocked(v bool) *PortProbeAction {
s.Blocked = &v
return s
}
// SetPortProbeDetails sets the PortProbeDetails field's value.
func (s *PortProbeAction) SetPortProbeDetails(v []*PortProbeDetail) *PortProbeAction {
s.PortProbeDetails = v
return s
}
// Details about the port probe finding.
type PortProbeDetail struct {
_ struct{} `type:"structure"`
// Local port information of the connection.
LocalPortDetails *LocalPortDetails `locationName:"localPortDetails" type:"structure"`
// Remote IP information of the connection.
RemoteIpDetails *RemoteIpDetails `locationName:"remoteIpDetails" type:"structure"`
}
// String returns the string representation
func (s PortProbeDetail) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PortProbeDetail) GoString() string {
return s.String()
}
// SetLocalPortDetails sets the LocalPortDetails field's value.
func (s *PortProbeDetail) SetLocalPortDetails(v *LocalPortDetails) *PortProbeDetail {
s.LocalPortDetails = v
return s
}
// SetRemoteIpDetails sets the RemoteIpDetails field's value.
func (s *PortProbeDetail) SetRemoteIpDetails(v *RemoteIpDetails) *PortProbeDetail {
s.RemoteIpDetails = v
return s
}
// Other private IP address information of the EC2 instance.
type PrivateIpAddressDetails struct {
_ struct{} `type:"structure"`

View File

@ -3468,6 +3468,9 @@ func (c *KMS) RetireGrantRequest(input *RetireGrantInput) (req *request.Request,
// API operation RetireGrant for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInvalidArnException "InvalidArnException"
// The request was rejected because a specified ARN was not valid.
//
// * ErrCodeInvalidGrantTokenException "InvalidGrantTokenException"
// The request was rejected because the specified grant token is not valid.
//

View File

@ -1504,12 +1504,12 @@ func (c *RDS) CreateDBInstanceReadReplicaRequest(input *CreateDBInstanceReadRepl
// MariaDB, or PostgreSQL. For more information, see Working with PostgreSQL,
// MySQL, and MariaDB Read Replicas (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReadRepl.html).
//
// Amazon Aurora does not support this action. You must call the CreateDBInstance
// Amazon Aurora doesn't support this action. You must call the CreateDBInstance
// action to create a DB instance for an Aurora DB cluster.
//
// All Read Replica DB instances are created with backups disabled. All other
// DB instance attributes (including DB security groups and DB parameter groups)
// are inherited from the source DB instance, except as specified below.
// are inherited from the source DB instance, except as specified following.
//
// Your source DB instance must have backup retention enabled.
//
@ -3126,7 +3126,7 @@ func (c *RDS) DescribeAccountAttributesRequest(input *DescribeAccountAttributesI
// The description for a quota includes the quota name, current usage toward
// that quota, and the quota's maximum value.
//
// This command does not take any parameters.
// This command doesn't take any parameters.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@ -7691,6 +7691,8 @@ func (c *RDS) PromoteReadReplicaRequest(input *PromoteReadReplicaInput) (req *re
// promotion process. Once the instance is promoted to a primary instance, backups
// are taken based on your backup settings.
//
// This command doesn't apply to Aurora MySQL and Aurora PostgreSQL.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@ -8903,16 +8905,19 @@ func (c *RDS) RestoreDBInstanceFromDBSnapshotRequest(input *RestoreDBInstanceFro
//
// If your intent is to replace your original DB instance with the new, restored
// DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot
// action. RDS does not allow two DB instances with the same name. Once you
// have renamed your original DB instance with a different identifier, then
// you can pass the original name of the DB instance as the DBInstanceIdentifier
// in the call to the RestoreDBInstanceFromDBSnapshot action. The result is
// that you will replace the original DB instance with the DB instance created
// from the snapshot.
// action. RDS doesn't allow two DB instances with the same name. Once you have
// renamed your original DB instance with a different identifier, then you can
// pass the original name of the DB instance as the DBInstanceIdentifier in
// the call to the RestoreDBInstanceFromDBSnapshot action. The result is that
// you will replace the original DB instance with the DB instance created from
// the snapshot.
//
// If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier
// must be the ARN of the shared DB snapshot.
//
// This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora,
// use RestoreDBClusterFromSnapshot.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@ -9203,6 +9208,9 @@ func (c *RDS) RestoreDBInstanceToPointInTimeRequest(input *RestoreDBInstanceToPo
// with mirroring; in this case, the instance becomes a mirrored deployment
// and not a single-AZ deployment.
//
// This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora,
// use RestoreDBClusterToPointInTime.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@ -9440,7 +9448,7 @@ func (c *RDS) StartDBInstanceRequest(input *StartDBInstanceInput) (req *request.
// AWS CLI command, or the StopDBInstance action. For more information, see
// Stopping and Starting a DB instance in the AWS RDS user guide.
//
// This command does not apply to Aurora MySQL and Aurora PostgreSQL.
// This command doesn't apply to Aurora MySQL and Aurora PostgreSQL.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@ -9563,7 +9571,7 @@ func (c *RDS) StopDBInstanceRequest(input *StopDBInstanceInput) (req *request.Re
// do a point-in-time restore if necessary. For more information, see Stopping
// and Starting a DB instance in the AWS RDS user guide.
//
// This command does not apply to Aurora MySQL and Aurora PostgreSQL.
// This command doesn't apply to Aurora MySQL and Aurora PostgreSQL.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@ -11143,16 +11151,21 @@ type CreateDBClusterInput struct {
// The name of the database engine to be used for this DB cluster.
//
// Valid Values: aurora, aurora-postgresql
// Valid Values: aurora (for MySQL 5.6-compatible Aurora), aurora-mysql (for
// MySQL 5.7-compatible Aurora), and aurora-postgresql
//
// Engine is a required field
Engine *string `type:"string" required:"true"`
// The version number of the database engine to use.
//
// Aurora
// Aurora MySQL
//
// Example: 5.6.10a
// Example: 5.6.10a, 5.7.12
//
// Aurora PostgreSQL
//
// Example: 9.6.3
EngineVersion *string `type:"string"`
// The AWS KMS key identifier for an encrypted DB cluster.
@ -11507,6 +11520,14 @@ type CreateDBClusterParameterGroupInput struct {
// and can be applied only to a DB cluster running a database engine and engine
// version compatible with that DB cluster parameter group family.
//
// Aurora MySQL
//
// Example: aurora5.6, aurora-mysql5.7
//
// Aurora PostgreSQL
//
// Example: aurora-postgresql9.6
//
// DBParameterGroupFamily is a required field
DBParameterGroupFamily *string `type:"string" required:"true"`
@ -11994,7 +12015,9 @@ type CreateDBInstanceInput struct {
//
// Valid Values:
//
// * aurora
// * aurora (for MySQL 5.6-compatible Aurora)
//
// * aurora-mysql (for MySQL 5.7-compatible Aurora)
//
// * aurora-postgresql
//
@ -12874,14 +12897,14 @@ type CreateDBInstanceReadReplicaInput struct {
// a MonitoringRoleArn value.
MonitoringRoleArn *string `type:"string"`
// Specifies whether the read replica is in a Multi-AZ deployment.
// Specifies whether the Read Replica is in a Multi-AZ deployment.
//
// You can create a Read Replica as a Multi-AZ DB instance. RDS creates a standby
// of your replica in another Availability Zone for failover support for the
// replica. Creating your Read Replica as a Multi-AZ DB instance is independent
// of whether the source database is a Multi-AZ DB instance.
//
// Currently PostgreSQL Read Replicas can only be created as single-AZ DB instances.
// Currently, you can't create PostgreSQL Read Replicas as Multi-AZ DB instances.
MultiAZ *bool `type:"boolean"`
// The option group the DB instance is associated with. If omitted, the default
@ -21492,7 +21515,7 @@ type ModifyDBClusterInput struct {
NewDBClusterIdentifier *string `type:"string"`
// A value that indicates that the DB cluster should be associated with the
// specified option group. Changing this parameter does not result in an outage
// specified option group. Changing this parameter doesn't result in an outage
// except in the following case, and the change is applied during the next maintenance
// window unless the ApplyImmediately parameter is set to true for this request.
// If the parameter change results in an option group that enables OEM, this
@ -21851,8 +21874,8 @@ type ModifyDBInstanceInput struct {
AllocatedStorage *int64 `type:"integer"`
// Indicates that major version upgrades are allowed. Changing this parameter
// does not result in an outage and the change is asynchronously applied as
// soon as possible.
// doesn't result in an outage and the change is asynchronously applied as soon
// as possible.
//
// Constraints: This parameter must be set to true when specifying a value for
// the EngineVersion parameter that is a different major version than the DB
@ -21875,8 +21898,8 @@ type ModifyDBInstanceInput struct {
ApplyImmediately *bool `type:"boolean"`
// Indicates that minor version upgrades are applied automatically to the DB
// instance during the maintenance window. Changing this parameter does not
// result in an outage except in the following case and the change is asynchronously
// instance during the maintenance window. Changing this parameter doesn't result
// in an outage except in the following case and the change is asynchronously
// applied as soon as possible. An outage will result if this parameter is set
// to true during the maintenance window, and a newer minor version is available,
// and RDS has enabled auto patching for that engine version.
@ -21947,7 +21970,7 @@ type ModifyDBInstanceInput struct {
DBInstanceIdentifier *string `type:"string" required:"true"`
// The name of the DB parameter group to apply to the DB instance. Changing
// this setting does not result in an outage. The parameter group name itself
// this setting doesn't result in an outage. The parameter group name itself
// is changed immediately, but the actual parameter changes are not applied
// until you reboot the instance without failover. The db instance will NOT
// be rebooted automatically and the parameter changes will NOT be applied during
@ -22008,7 +22031,7 @@ type ModifyDBInstanceInput struct {
DBPortNumber *int64 `type:"integer"`
// A list of DB security groups to authorize on this DB instance. Changing this
// setting does not result in an outage and the change is asynchronously applied
// setting doesn't result in an outage and the change is asynchronously applied
// as soon as possible.
//
// Constraints:
@ -22075,7 +22098,7 @@ type ModifyDBInstanceInput struct {
// The new Provisioned IOPS (I/O operations per second) value for the RDS instance.
//
// Changing this setting does not result in an outage and the change is applied
// Changing this setting doesn't result in an outage and the change is applied
// during the next maintenance window unless the ApplyImmediately parameter
// is set to true for this request. If you are migrating from Provisioned IOPS
// to standard storage, set this value to 0. The DB instance will require a
@ -22110,7 +22133,7 @@ type ModifyDBInstanceInput struct {
// The new password for the master user. The password can include any printable
// ASCII character except "/", """, or "@".
//
// Changing this parameter does not result in an outage and the change is asynchronously
// Changing this parameter doesn't result in an outage and the change is asynchronously
// applied as soon as possible. Between the time of the request and the completion
// of the request, the MasterUserPassword element exists in the PendingModifiedValues
// element of the operation response.
@ -22167,7 +22190,7 @@ type ModifyDBInstanceInput struct {
MonitoringRoleArn *string `type:"string"`
// Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter
// does not result in an outage and the change is applied during the next maintenance
// doesn't result in an outage and the change is applied during the next maintenance
// window unless the ApplyImmediately parameter is set to true for this request.
MultiAZ *bool `type:"boolean"`
@ -22189,7 +22212,7 @@ type ModifyDBInstanceInput struct {
NewDBInstanceIdentifier *string `type:"string"`
// Indicates that the DB instance should be associated with the specified option
// group. Changing this parameter does not result in an outage except in the
// group. Changing this parameter doesn't result in an outage except in the
// following case and the change is applied during the next maintenance window
// unless the ApplyImmediately parameter is set to true for this request. If
// the parameter change results in an option group that enables OEM, this change
@ -22208,7 +22231,7 @@ type ModifyDBInstanceInput struct {
// The daily time range during which automated backups are created if automated
// backups are enabled, as determined by the BackupRetentionPeriod parameter.
// Changing this parameter does not result in an outage and the change is asynchronously
// Changing this parameter doesn't result in an outage and the change is asynchronously
// applied as soon as possible.
//
// Amazon Aurora
@ -22228,8 +22251,8 @@ type ModifyDBInstanceInput struct {
PreferredBackupWindow *string `type:"string"`
// The weekly time range (in UTC) during which system maintenance can occur,
// which might result in an outage. Changing this parameter does not result
// in an outage, except in the following situation, and the change is asynchronously
// which might result in an outage. Changing this parameter doesn't result in
// an outage, except in the following situation, and the change is asynchronously
// applied as soon as possible. If there are pending actions that cause a reboot,
// and the maintenance window is changed to include the current time, then changing
// this parameter will cause a reboot of the DB instance. If moving this window
@ -25573,9 +25596,13 @@ type RestoreDBClusterFromS3Input struct {
// The version number of the database engine to use.
//
// Aurora
// Aurora MySQL
//
// Example: 5.6.10a
//
// Aurora PostgreSQL
//
// Example: 9.6.3
EngineVersion *string `type:"string"`
// The AWS KMS key identifier for an encrypted DB cluster.
@ -26492,8 +26519,6 @@ type RestoreDBInstanceFromDBSnapshotInput struct {
//
// * For MySQL 5.7, minor version 5.7.16 or higher
//
// * Aurora 5.6 or higher.
//
// Default: false
EnableIAMDatabaseAuthentication *bool `type:"boolean"`
@ -26506,10 +26531,6 @@ type RestoreDBInstanceFromDBSnapshotInput struct {
//
// Valid Values:
//
// * aurora
//
// * aurora-postgresql
//
// * mariadb
//
// * mysql
@ -27438,8 +27459,6 @@ type RestoreDBInstanceToPointInTimeInput struct {
//
// * For MySQL 5.7, minor version 5.7.16 or higher
//
// * Aurora 5.6 or higher.
//
// Default: false
EnableIAMDatabaseAuthentication *bool `type:"boolean"`
@ -27451,10 +27470,6 @@ type RestoreDBInstanceToPointInTimeInput struct {
//
// Valid Values:
//
// * aurora
//
// * aurora-postgresql
//
// * mariadb
//
// * mysql

View File

@ -1,8 +1,88 @@
## 1.10.0 (February 24, 2018)
NOTES:
* resource/aws_dx_lag: `number_of_connections` was deprecated and will be removed in future major version. Use `aws_dx_connection` and `aws_dx_connection_association` resources instead. Default connections will be removed as part of LAG creation automatically in future major version. ([#3367](https://github.com/terraform-providers/terraform-provider-aws/issues/3367))
FEATURES:
* **New Data Source:** `aws_inspector_rules_packages` ([#3175](https://github.com/terraform-providers/terraform-provider-aws/issues/3175))
* **New Resource:** `aws_api_gateway_vpc_link` ([#2512](https://github.com/terraform-providers/terraform-provider-aws/issues/2512))
* **New Resource:** `aws_appsync_graphql_api` ([#2494](https://github.com/terraform-providers/terraform-provider-aws/issues/2494))
* **New Resource:** `aws_dax_cluster` ([#2884](https://github.com/terraform-providers/terraform-provider-aws/issues/2884))
* **New Resource:** `aws_gamelift_alias` ([#3353](https://github.com/terraform-providers/terraform-provider-aws/issues/3353))
* **New Resource:** `aws_gamelift_fleet` ([#3327](https://github.com/terraform-providers/terraform-provider-aws/issues/3327))
* **New Resource:** `aws_lb_listener_certificate` ([#2686](https://github.com/terraform-providers/terraform-provider-aws/issues/2686))
* **New Resource:** `aws_s3_bucket_metric` ([#916](https://github.com/terraform-providers/terraform-provider-aws/issues/916))
* **New Resource:** `aws_ses_domain_mail_from` ([#2029](https://github.com/terraform-providers/terraform-provider-aws/issues/2029))
* **New Resource:** `aws_iot_thing_type` ([#3302](https://github.com/terraform-providers/terraform-provider-aws/issues/3302))
ENHANCEMENTS:
* data-source/aws_kms_alias: Always return `target_key_arn` ([#3304](https://github.com/terraform-providers/terraform-provider-aws/issues/3304))
* resource/aws_autoscaling_policy: Add support for `target_tracking_configuration` ([#2611](https://github.com/terraform-providers/terraform-provider-aws/issues/2611))
* resource/aws_codebuild_project: Support VPC configuration ([#2547](https://github.com/terraform-providers/terraform-provider-aws/issues/2547)] [[#3324](https://github.com/terraform-providers/terraform-provider-aws/issues/3324))
* resource/aws_cloudtrail: Add `event_selector` argument ([#2258](https://github.com/terraform-providers/terraform-provider-aws/issues/2258))
* resource/aws_codedeploy_deployment_group: Validate DeploymentReady and InstanceReady `trigger_events` ([#3412](https://github.com/terraform-providers/terraform-provider-aws/issues/3412))
* resource/aws_db_parameter_group: Validate underscore `name` during plan ([#3396](https://github.com/terraform-providers/terraform-provider-aws/issues/3396))
* resource/aws_directory_service_directory Add `edition` argument ([#3421](https://github.com/terraform-providers/terraform-provider-aws/issues/3421))
* resource/aws_directory_service_directory Validate `size` argument ([#3453](https://github.com/terraform-providers/terraform-provider-aws/issues/3453))
* resource/aws_dx_connection: Add support for tagging ([#2990](https://github.com/terraform-providers/terraform-provider-aws/issues/2990))
* resource/aws_dx_connection: Add support for import ([#2992](https://github.com/terraform-providers/terraform-provider-aws/issues/2992))
* resource/aws_dx_lag: Add support for tagging ([#2990](https://github.com/terraform-providers/terraform-provider-aws/issues/2990))
* resource/aws_dx_lag: Add support for import ([#2992](https://github.com/terraform-providers/terraform-provider-aws/issues/2992))
* resource/aws_emr_cluster: Add `autoscaling_policy` argument ([#2877](https://github.com/terraform-providers/terraform-provider-aws/issues/2877))
* resource/aws_emr_cluster: Add `scale_down_behavior` argument ([#3063](https://github.com/terraform-providers/terraform-provider-aws/issues/3063))
* resource/aws_instance: Expose reason of `shutting-down` state during creation ([#3371](https://github.com/terraform-providers/terraform-provider-aws/issues/3371))
* resource/aws_instance: Include size of user_data in validation error message ([#2971](https://github.com/terraform-providers/terraform-provider-aws/issues/2971))
* resource/aws_instance: Remove extra API call on creation for SGs ([#3426](https://github.com/terraform-providers/terraform-provider-aws/issues/3426))
* resource/aws_lambda_function: Recompute `version` and `qualified_arn` attributes on publish ([#3032](https://github.com/terraform-providers/terraform-provider-aws/issues/3032))
* resource/aws_lb_target_group: Allow stickiness block set to false with TCP ([#2954](https://github.com/terraform-providers/terraform-provider-aws/issues/2954))
* resource/aws_lb_listener_rule: Validate `priority` over 50000 ([#3379](https://github.com/terraform-providers/terraform-provider-aws/issues/3379))
* resource/aws_lb_listener_rule: Make `priority` argument optional ([#3219](https://github.com/terraform-providers/terraform-provider-aws/issues/3219))
* resource/aws_rds_cluster: Add `hosted_zone_id` attribute ([#3267](https://github.com/terraform-providers/terraform-provider-aws/issues/3267))
* resource/aws_rds_cluster: Add support for `source_region` (encrypted cross-region replicas) ([#3415](https://github.com/terraform-providers/terraform-provider-aws/issues/3415))
* resource/aws_rds_cluster_instance: Support `availability_zone` ([#2812](https://github.com/terraform-providers/terraform-provider-aws/issues/2812))
* resource/aws_rds_cluster_parameter_group: Validate underscore `name` during plan ([#3396](https://github.com/terraform-providers/terraform-provider-aws/issues/3396))
* resource/aws_route53_record Add `allow_overwrite` argument ([#2926](https://github.com/terraform-providers/terraform-provider-aws/issues/2926))
* resource/aws_s3_bucket Ssupport for SSE-KMS replication configuration ([#2625](https://github.com/terraform-providers/terraform-provider-aws/issues/2625))
* resource/aws_spot_fleet_request: Validate `iam_fleet_role` as ARN during plan ([#3431](https://github.com/terraform-providers/terraform-provider-aws/issues/3431))
* resource/aws_sqs_queue: Validate `name` during plan ([#2837](https://github.com/terraform-providers/terraform-provider-aws/issues/2837))
* resource/aws_ssm_association: Allow updating `targets` ([#2807](https://github.com/terraform-providers/terraform-provider-aws/issues/2807))
* resource/aws_service_discovery_service: Support routing policy and update the type of DNS record ([#3273](https://github.com/terraform-providers/terraform-provider-aws/issues/3273))
BUG FIXES:
* data-source/aws_elb_service_account: Correct GovCloud region ([#3315](https://github.com/terraform-providers/terraform-provider-aws/issues/3315))
* resource/aws_acm_certificate_validation: Prevent crash on `validation_record_fqdns` ([#3336](https://github.com/terraform-providers/terraform-provider-aws/issues/3336))
* resource/aws_acm_certificate_validation: Fix `validation_record_fqdns` handling with combined root and wildcard requests ([#3366](https://github.com/terraform-providers/terraform-provider-aws/issues/3366))
* resource/aws_autoscaling_policy: `cooldown` with zero value not set correctly ([#2809](https://github.com/terraform-providers/terraform-provider-aws/issues/2809))
* resource/aws_cloudtrail: Now respects initial `include_global_service_events = false` ([#2817](https://github.com/terraform-providers/terraform-provider-aws/issues/2817))
* resource/aws_dynamodb_table: Retry deletion on ResourceInUseException ([#3355](https://github.com/terraform-providers/terraform-provider-aws/issues/3355))
* resource/aws_dx_lag: `number_of_connections` deprecated (made Optional). Omitting field may now prevent spurious diffs. ([#3367](https://github.com/terraform-providers/terraform-provider-aws/issues/3367))
* resource/aws_ecs_service: Retry DescribeServices after creation ([#3387](https://github.com/terraform-providers/terraform-provider-aws/issues/3387))
* resource/aws_ecs_service: Fix reading `load_balancer` into state ([#3502](https://github.com/terraform-providers/terraform-provider-aws/issues/3502))
* resource/aws_elasticsearch_domain: Retry creation on `ValidationException` ([#3375](https://github.com/terraform-providers/terraform-provider-aws/issues/3375))
* resource/aws_iam_user_ssh_key: Correctly set status after creation ([#3390](https://github.com/terraform-providers/terraform-provider-aws/issues/3390))
* resource/aws_instance: Bump deletion timeout to 20mins ([#3452](https://github.com/terraform-providers/terraform-provider-aws/issues/3452))
* resource/aws_kinesis_firehose_delivery_stream: Retry on additional IAM eventual consistency errors ([#3381](https://github.com/terraform-providers/terraform-provider-aws/issues/3381))
* resource/aws_route53_record: Trim trailing dot during import ([#3321](https://github.com/terraform-providers/terraform-provider-aws/issues/3321))
* resource/aws_s3_bucket: Prevent crashes on location and replication read retry timeouts ([#3338](https://github.com/terraform-providers/terraform-provider-aws/issues/3338))
* resource/aws_s3_bucket: Always set replication_configuration in state ([#3349](https://github.com/terraform-providers/terraform-provider-aws/issues/3349))
* resource/aws_security_group: Allow empty rule description ([#2846](https://github.com/terraform-providers/terraform-provider-aws/issues/2846))
* resource/aws_sns_topic: Fix exit after updating first attribute ([#3360](https://github.com/terraform-providers/terraform-provider-aws/issues/3360))
* resource/aws_spot_instance_request: Bump delete timeout to 20mins ([#3435](https://github.com/terraform-providers/terraform-provider-aws/issues/3435))
* resource/aws_sqs_queue: Skip SQS ListQueueTags in aws-us-gov partition ([#3376](https://github.com/terraform-providers/terraform-provider-aws/issues/3376))
* resource/aws_vpc_endpoint: Treat pending as expected state during deletion ([#3370](https://github.com/terraform-providers/terraform-provider-aws/issues/3370))
* resource/aws_vpc_peering_connection: Treat `pending-acceptance` as expected during deletion ([#3393](https://github.com/terraform-providers/terraform-provider-aws/issues/3393))
* resource/aws_cognito_user_pool_client: support `USER_PASSWORD_AUTH` for explicit_auth_flows ([#3417](https://github.com/terraform-providers/terraform-provider-aws/issues/3417))
## 1.9.0 (February 09, 2018)
NOTES:
* data-source/aws_region: `current` field is deprecated and the data source defaults to the provider region if no endpoint or name is specified ([#3157](https://github.com/terraform-providers/terraform-provider-aws/issues/3157))
* data-source/aws_iam_policy_document: Statements are now de-duplicated per `Sid`s ([#2890](https://github.com/terraform-providers/terraform-provider-aws/issues/2890))
FEATURES:

View File

@ -18,6 +18,7 @@ import (
"github.com/aws/aws-sdk-go/service/acm"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/aws/aws-sdk-go/service/applicationautoscaling"
"github.com/aws/aws-sdk-go/service/appsync"
"github.com/aws/aws-sdk-go/service/athena"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/batch"
@ -36,6 +37,7 @@ import (
"github.com/aws/aws-sdk-go/service/cognitoidentityprovider"
"github.com/aws/aws-sdk-go/service/configservice"
"github.com/aws/aws-sdk-go/service/databasemigrationservice"
"github.com/aws/aws-sdk-go/service/dax"
"github.com/aws/aws-sdk-go/service/devicefarm"
"github.com/aws/aws-sdk-go/service/directconnect"
"github.com/aws/aws-sdk-go/service/directoryservice"
@ -148,6 +150,7 @@ type AWSClient struct {
cognitoconn *cognitoidentity.CognitoIdentity
cognitoidpconn *cognitoidentityprovider.CognitoIdentityProvider
configconn *configservice.ConfigService
daxconn *dax.DAX
devicefarmconn *devicefarm.DeviceFarm
dmsconn *databasemigrationservice.DatabaseMigrationService
dsconn *directoryservice.DirectoryService
@ -208,6 +211,7 @@ type AWSClient struct {
athenaconn *athena.Athena
dxconn *directconnect.DirectConnect
mediastoreconn *mediastore.MediaStore
appsyncconn *appsync.AppSync
}
func (c *AWSClient) S3() *s3.S3 {
@ -413,8 +417,9 @@ func (c *Config) Client() (interface{}, error) {
client.configconn = configservice.New(sess)
client.cognitoconn = cognitoidentity.New(sess)
client.cognitoidpconn = cognitoidentityprovider.New(sess)
client.dmsconn = databasemigrationservice.New(sess)
client.codepipelineconn = codepipeline.New(sess)
client.daxconn = dax.New(awsDynamoSess)
client.dmsconn = databasemigrationservice.New(sess)
client.dsconn = directoryservice.New(sess)
client.dynamodbconn = dynamodb.New(awsDynamoSess)
client.ecrconn = ecr.New(awsEcrSess)
@ -458,6 +463,7 @@ func (c *Config) Client() (interface{}, error) {
client.athenaconn = athena.New(sess)
client.dxconn = directconnect.New(sess)
client.mediastoreconn = mediastore.New(sess)
client.appsyncconn = appsync.New(sess)
// Workaround for https://github.com/aws/aws-sdk-go/issues/1376
client.kinesisconn.Handlers.Retry.PushBack(func(r *request.Request) {

View File

@ -23,7 +23,7 @@ var elbAccountIdPerRegionMap = map[string]string{
"sa-east-1": "507241528517",
"us-east-1": "127311923021",
"us-east-2": "033677994240",
"us-gov-west": "048591011584",
"us-gov-west-1": "048591011584",
"us-west-1": "027434742980",
"us-west-2": "797873946194",
}

View File

@ -0,0 +1,56 @@
package aws
import (
"errors"
"fmt"
"log"
"sort"
"time"
"github.com/aws/aws-sdk-go/service/inspector"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsInspectorRulesPackages() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsInspectorRulesPackagesRead,
Schema: map[string]*schema.Schema{
"arns": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
}
}
func dataSourceAwsInspectorRulesPackagesRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).inspectorconn
log.Printf("[DEBUG] Reading Rules Packages.")
d.SetId(time.Now().UTC().String())
var arns []string
input := &inspector.ListRulesPackagesInput{}
err := conn.ListRulesPackagesPages(input, func(page *inspector.ListRulesPackagesOutput, lastPage bool) bool {
for _, arn := range page.RulesPackageArns {
arns = append(arns, *arn)
}
return !lastPage
})
if err != nil {
return fmt.Errorf("Error fetching Rules Packages: %s", err)
}
if len(arns) == 0 {
return errors.New("No rules packages found.")
}
sort.Strings(arns)
d.Set("arns", arns)
return nil
}

View File

@ -5,7 +5,7 @@ import (
"log"
"time"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/kms"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/schema"
@ -64,25 +64,27 @@ func dataSourceAwsKmsAliasRead(d *schema.ResourceData, meta interface{}) error {
d.SetId(time.Now().UTC().String())
d.Set("arn", alias.AliasArn)
// Some aliases do not return TargetKeyId (e.g. aliases for AWS services or
// aliases not associated with a Customer Managed Key (CMK))
// ListAliases can return an alias for an AWS service key (e.g.
// alias/aws/rds) without a TargetKeyId if the alias has not yet been
// used for the first time. In that situation, calling DescribeKey will
// associate an actual key with the alias, and the next call to
// ListAliases will have a TargetKeyId for the alias.
//
// For a simpler codepath, we always call DescribeKey with the alias
// name to get the target key's ARN and Id direct from AWS.
//
// https://docs.aws.amazon.com/kms/latest/APIReference/API_ListAliases.html
if alias.TargetKeyId != nil {
aliasARN, err := arn.Parse(*alias.AliasArn)
if err != nil {
return err
}
targetKeyARN := arn.ARN{
Partition: aliasARN.Partition,
Service: aliasARN.Service,
Region: aliasARN.Region,
AccountID: aliasARN.AccountID,
Resource: fmt.Sprintf("key/%s", *alias.TargetKeyId),
}
d.Set("target_key_arn", targetKeyARN.String())
d.Set("target_key_id", alias.TargetKeyId)
req := &kms.DescribeKeyInput{
KeyId: aws.String(target.(string)),
}
resp, err := conn.DescribeKey(req)
if err != nil {
return errwrap.Wrapf("Error calling KMS DescribeKey: {{err}}", err)
}
d.Set("target_key_arn", resp.KeyMetadata.Arn)
d.Set("target_key_id", resp.KeyMetadata.KeyId)
return nil
}

View File

@ -200,6 +200,7 @@ func Provider() terraform.ResourceProvider {
"aws_iam_server_certificate": dataSourceAwsIAMServerCertificate(),
"aws_iam_user": dataSourceAwsIAMUser(),
"aws_internet_gateway": dataSourceAwsInternetGateway(),
"aws_inspector_rules_packages": dataSourceAwsInspectorRulesPackages(),
"aws_instance": dataSourceAwsInstance(),
"aws_instances": dataSourceAwsInstances(),
"aws_ip_ranges": dataSourceAwsIPRanges(),
@ -267,10 +268,12 @@ func Provider() terraform.ResourceProvider {
"aws_api_gateway_stage": resourceAwsApiGatewayStage(),
"aws_api_gateway_usage_plan": resourceAwsApiGatewayUsagePlan(),
"aws_api_gateway_usage_plan_key": resourceAwsApiGatewayUsagePlanKey(),
"aws_api_gateway_vpc_link": resourceAwsApiGatewayVpcLink(),
"aws_app_cookie_stickiness_policy": resourceAwsAppCookieStickinessPolicy(),
"aws_appautoscaling_target": resourceAwsAppautoscalingTarget(),
"aws_appautoscaling_policy": resourceAwsAppautoscalingPolicy(),
"aws_appautoscaling_scheduled_action": resourceAwsAppautoscalingScheduledAction(),
"aws_appsync_graphql_api": resourceAwsAppsyncGraphqlApi(),
"aws_athena_database": resourceAwsAthenaDatabase(),
"aws_athena_named_query": resourceAwsAthenaNamedQuery(),
"aws_autoscaling_attachment": resourceAwsAutoscalingAttachment(),
@ -314,6 +317,7 @@ func Provider() terraform.ResourceProvider {
"aws_codebuild_project": resourceAwsCodeBuildProject(),
"aws_codepipeline": resourceAwsCodePipeline(),
"aws_customer_gateway": resourceAwsCustomerGateway(),
"aws_dax_cluster": resourceAwsDaxCluster(),
"aws_db_event_subscription": resourceAwsDbEventSubscription(),
"aws_db_instance": resourceAwsDbInstance(),
"aws_db_option_group": resourceAwsDbOptionGroup(),
@ -366,7 +370,9 @@ func Provider() terraform.ResourceProvider {
"aws_emr_instance_group": resourceAwsEMRInstanceGroup(),
"aws_emr_security_configuration": resourceAwsEMRSecurityConfiguration(),
"aws_flow_log": resourceAwsFlowLog(),
"aws_gamelift_alias": resourceAwsGameliftAlias(),
"aws_gamelift_build": resourceAwsGameliftBuild(),
"aws_gamelift_fleet": resourceAwsGameliftFleet(),
"aws_glacier_vault": resourceAwsGlacierVault(),
"aws_glue_catalog_database": resourceAwsGlueCatalogDatabase(),
"aws_guardduty_detector": resourceAwsGuardDutyDetector(),
@ -401,6 +407,7 @@ func Provider() terraform.ResourceProvider {
"aws_internet_gateway": resourceAwsInternetGateway(),
"aws_iot_certificate": resourceAwsIotCertificate(),
"aws_iot_policy": resourceAwsIotPolicy(),
"aws_iot_thing_type": resourceAwsIotThingType(),
"aws_iot_topic_rule": resourceAwsIotTopicRule(),
"aws_key_pair": resourceAwsKeyPair(),
"aws_kinesis_firehose_delivery_stream": resourceAwsKinesisFirehoseDeliveryStream(),
@ -470,6 +477,7 @@ func Provider() terraform.ResourceProvider {
"aws_ses_active_receipt_rule_set": resourceAwsSesActiveReceiptRuleSet(),
"aws_ses_domain_identity": resourceAwsSesDomainIdentity(),
"aws_ses_domain_dkim": resourceAwsSesDomainDkim(),
"aws_ses_domain_mail_from": resourceAwsSesDomainMailFrom(),
"aws_ses_receipt_filter": resourceAwsSesReceiptFilter(),
"aws_ses_receipt_rule": resourceAwsSesReceiptRule(),
"aws_ses_receipt_rule_set": resourceAwsSesReceiptRuleSet(),
@ -480,6 +488,7 @@ func Provider() terraform.ResourceProvider {
"aws_s3_bucket_policy": resourceAwsS3BucketPolicy(),
"aws_s3_bucket_object": resourceAwsS3BucketObject(),
"aws_s3_bucket_notification": resourceAwsS3BucketNotification(),
"aws_s3_bucket_metric": resourceAwsS3BucketMetric(),
"aws_security_group": resourceAwsSecurityGroup(),
"aws_network_interface_sg_attachment": resourceAwsNetworkInterfaceSGAttachment(),
"aws_default_security_group": resourceAwsDefaultSecurityGroup(),
@ -554,6 +563,8 @@ func Provider() terraform.ResourceProvider {
"aws_lb": resourceAwsLb(),
"aws_alb_listener": resourceAwsLbListener(),
"aws_lb_listener": resourceAwsLbListener(),
"aws_alb_listener_certificate": resourceAwsLbListenerCertificate(),
"aws_lb_listener_certificate": resourceAwsLbListenerCertificate(),
"aws_alb_listener_rule": resourceAwsLbbListenerRule(),
"aws_lb_listener_rule": resourceAwsLbbListenerRule(),
"aws_alb_target_group": resourceAwsLbTargetGroup(),

View File

@ -3,13 +3,12 @@ package aws
import (
"fmt"
"log"
"reflect"
"sort"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/acm"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
@ -59,7 +58,7 @@ func resourceAwsAcmCertificateValidationCreate(d *schema.ResourceData, meta inte
}
if validation_record_fqdns, ok := d.GetOk("validation_record_fqdns"); ok {
err := resourceAwsAcmCertificateCheckValidationRecords(validation_record_fqdns.(*schema.Set).List(), resp.Certificate)
err := resourceAwsAcmCertificateCheckValidationRecords(validation_record_fqdns.(*schema.Set).List(), resp.Certificate, acmconn)
if err != nil {
return err
}
@ -83,28 +82,52 @@ func resourceAwsAcmCertificateValidationCreate(d *schema.ResourceData, meta inte
})
}
func resourceAwsAcmCertificateCheckValidationRecords(validation_record_fqdns []interface{}, cert *acm.CertificateDetail) error {
expected_fqdns := make([]string, len(cert.DomainValidationOptions))
for i, v := range cert.DomainValidationOptions {
if *v.ValidationMethod == acm.ValidationMethodDns {
expected_fqdns[i] = strings.TrimSuffix(*v.ResourceRecord.Name, ".")
func resourceAwsAcmCertificateCheckValidationRecords(validationRecordFqdns []interface{}, cert *acm.CertificateDetail, conn *acm.ACM) error {
expectedFqdns := make(map[string]*acm.DomainValidation)
if len(cert.DomainValidationOptions) == 0 {
input := &acm.DescribeCertificateInput{
CertificateArn: cert.CertificateArn,
}
err := resource.Retry(1*time.Minute, func() *resource.RetryError {
log.Printf("[DEBUG] Certificate domain validation options empty for %q, retrying", cert.CertificateArn)
output, err := conn.DescribeCertificate(input)
if err != nil {
return resource.NonRetryableError(err)
}
if len(output.Certificate.DomainValidationOptions) == 0 {
return resource.RetryableError(fmt.Errorf("Certificate domain validation options empty for %s", *cert.CertificateArn))
}
cert = output.Certificate
return nil
})
if err != nil {
return err
}
}
for _, v := range cert.DomainValidationOptions {
if v.ValidationMethod != nil {
if *v.ValidationMethod != acm.ValidationMethodDns {
return fmt.Errorf("validation_record_fqdns is only valid for DNS validation")
}
newExpectedFqdn := strings.TrimSuffix(*v.ResourceRecord.Name, ".")
expectedFqdns[newExpectedFqdn] = v
} else if len(v.ValidationEmails) > 0 {
// ACM API sometimes is not sending ValidationMethod for EMAIL validation
return fmt.Errorf("validation_record_fqdns is only valid for DNS validation")
}
}
actual_validation_record_fqdns := make([]string, 0, len(validation_record_fqdns))
for _, v := range validation_record_fqdns {
val := v.(string)
actual_validation_record_fqdns = append(actual_validation_record_fqdns, strings.TrimSuffix(val, "."))
for _, v := range validationRecordFqdns {
delete(expectedFqdns, strings.TrimSuffix(v.(string), "."))
}
sort.Strings(expected_fqdns)
sort.Strings(actual_validation_record_fqdns)
log.Printf("[DEBUG] Checking validation_record_fqdns. Expected: %v, Actual: %v", expected_fqdns, actual_validation_record_fqdns)
if !reflect.DeepEqual(expected_fqdns, actual_validation_record_fqdns) {
return fmt.Errorf("Certificate needs %v to be set but only %v was passed to validation_record_fqdns", expected_fqdns, actual_validation_record_fqdns)
if len(expectedFqdns) > 0 {
var errors error
for expectedFqdn, domainValidation := range expectedFqdns {
errors = multierror.Append(errors, fmt.Errorf("missing %s DNS validation record: %s", *domainValidation.DomainName, expectedFqdn))
}
return errors
}
return nil

View File

@ -0,0 +1,205 @@
package aws
import (
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/apigateway"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsApiGatewayVpcLink() *schema.Resource {
return &schema.Resource{
Create: resourceAwsApiGatewayVpcLinkCreate,
Read: resourceAwsApiGatewayVpcLinkRead,
Update: resourceAwsApiGatewayVpcLinkUpdate,
Delete: resourceAwsApiGatewayVpcLinkDelete,
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
},
"description": {
Type: schema.TypeString,
Optional: true,
},
"target_arns": {
Type: schema.TypeSet,
MaxItems: 1,
Required: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
}
}
func resourceAwsApiGatewayVpcLinkCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
input := &apigateway.CreateVpcLinkInput{
Name: aws.String(d.Get("name").(string)),
TargetArns: expandStringList(d.Get("target_arns").(*schema.Set).List()),
}
if v, ok := d.GetOk("description"); ok {
input.Description = aws.String(v.(string))
}
resp, err := conn.CreateVpcLink(input)
if err != nil {
return err
}
d.SetId(*resp.Id)
stateConf := &resource.StateChangeConf{
Pending: []string{apigateway.VpcLinkStatusPending},
Target: []string{apigateway.VpcLinkStatusAvailable},
Refresh: apigatewayVpcLinkRefreshStatusFunc(conn, *resp.Id),
Timeout: 8 * time.Minute,
MinTimeout: 3 * time.Second,
}
_, err = stateConf.WaitForState()
if err != nil {
d.SetId("")
return fmt.Errorf("[WARN] Error waiting for APIGateway Vpc Link status to be \"%s\": %s", apigateway.VpcLinkStatusAvailable, err)
}
return nil
}
func resourceAwsApiGatewayVpcLinkRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
input := &apigateway.GetVpcLinkInput{
VpcLinkId: aws.String(d.Id()),
}
resp, err := conn.GetVpcLink(input)
if err != nil {
if isAWSErr(err, apigateway.ErrCodeNotFoundException, "") {
log.Printf("[WARN] VPC Link %s not found, removing from state", d.Id())
d.SetId("")
return nil
}
return err
}
d.Set("name", resp.Name)
d.Set("description", resp.Description)
d.Set("target_arn", flattenStringList(resp.TargetArns))
return nil
}
func resourceAwsApiGatewayVpcLinkUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
operations := make([]*apigateway.PatchOperation, 0)
if d.HasChange("name") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/name"),
Value: aws.String(d.Get("name").(string)),
})
}
if d.HasChange("description") {
operations = append(operations, &apigateway.PatchOperation{
Op: aws.String("replace"),
Path: aws.String("/description"),
Value: aws.String(d.Get("description").(string)),
})
}
input := &apigateway.UpdateVpcLinkInput{
VpcLinkId: aws.String(d.Id()),
PatchOperations: operations,
}
_, err := conn.UpdateVpcLink(input)
if err != nil {
if isAWSErr(err, apigateway.ErrCodeNotFoundException, "") {
log.Printf("[WARN] VPC Link %s not found, removing from state", d.Id())
d.SetId("")
return nil
}
return err
}
stateConf := &resource.StateChangeConf{
Pending: []string{apigateway.VpcLinkStatusPending},
Target: []string{apigateway.VpcLinkStatusAvailable},
Refresh: apigatewayVpcLinkRefreshStatusFunc(conn, d.Id()),
Timeout: 8 * time.Minute,
MinTimeout: 3 * time.Second,
}
_, err = stateConf.WaitForState()
if err != nil {
return fmt.Errorf("[WARN] Error waiting for APIGateway Vpc Link status to be \"%s\": %s", apigateway.VpcLinkStatusAvailable, err)
}
return nil
}
func resourceAwsApiGatewayVpcLinkDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).apigateway
input := &apigateway.DeleteVpcLinkInput{
VpcLinkId: aws.String(d.Id()),
}
_, err := conn.DeleteVpcLink(input)
if err != nil {
if isAWSErr(err, apigateway.ErrCodeNotFoundException, "") {
return nil
}
return err
}
stateConf := resource.StateChangeConf{
Pending: []string{apigateway.VpcLinkStatusPending,
apigateway.VpcLinkStatusAvailable,
apigateway.VpcLinkStatusDeleting},
Target: []string{""},
Timeout: 5 * time.Minute,
MinTimeout: 1 * time.Second,
Refresh: func() (interface{}, string, error) {
resp, err := conn.GetVpcLink(&apigateway.GetVpcLinkInput{
VpcLinkId: aws.String(d.Id()),
})
if err != nil {
if isAWSErr(err, apigateway.ErrCodeNotFoundException, "") {
return 1, "", nil
}
return nil, "failed", err
}
return resp, *resp.Status, nil
},
}
if _, err := stateConf.WaitForState(); err != nil {
return err
}
return nil
}
func apigatewayVpcLinkRefreshStatusFunc(conn *apigateway.APIGateway, vl string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
input := &apigateway.GetVpcLinkInput{
VpcLinkId: aws.String(vl),
}
resp, err := conn.GetVpcLink(input)
if err != nil {
return nil, "failed", err
}
return resp, *resp.Status, nil
}
}

View File

@ -0,0 +1,195 @@
package aws
import (
"fmt"
"log"
"regexp"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/appsync"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
)
func resourceAwsAppsyncGraphqlApi() *schema.Resource {
return &schema.Resource{
Create: resourceAwsAppsyncGraphqlApiCreate,
Read: resourceAwsAppsyncGraphqlApiRead,
Update: resourceAwsAppsyncGraphqlApiUpdate,
Delete: resourceAwsAppsyncGraphqlApiDelete,
Schema: map[string]*schema.Schema{
"authentication_type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{
appsync.AuthenticationTypeApiKey,
appsync.AuthenticationTypeAwsIam,
appsync.AuthenticationTypeAmazonCognitoUserPools,
}, false),
},
"name": {
Type: schema.TypeString,
Required: true,
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(`[_A-Za-z][_0-9A-Za-z]*`).MatchString(value) {
errors = append(errors, fmt.Errorf("%q must match [_A-Za-z][_0-9A-Za-z]*", k))
}
return
},
},
"user_pool_config": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"app_id_client_regex": {
Type: schema.TypeString,
Optional: true,
},
"aws_region": {
Type: schema.TypeString,
Required: true,
},
"default_action": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{
appsync.DefaultActionAllow,
appsync.DefaultActionDeny,
}, false),
},
"user_pool_id": {
Type: schema.TypeString,
Required: true,
},
},
},
},
"arn": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceAwsAppsyncGraphqlApiCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).appsyncconn
input := &appsync.CreateGraphqlApiInput{
AuthenticationType: aws.String(d.Get("authentication_type").(string)),
Name: aws.String(d.Get("name").(string)),
}
if v, ok := d.GetOk("user_pool_config"); ok {
input.UserPoolConfig = expandAppsyncGraphqlApiUserPoolConfig(v.([]interface{}))
}
resp, err := conn.CreateGraphqlApi(input)
if err != nil {
return err
}
d.SetId(*resp.GraphqlApi.ApiId)
d.Set("arn", resp.GraphqlApi.Arn)
return nil
}
func resourceAwsAppsyncGraphqlApiRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).appsyncconn
input := &appsync.GetGraphqlApiInput{
ApiId: aws.String(d.Id()),
}
resp, err := conn.GetGraphqlApi(input)
if err != nil {
if isAWSErr(err, appsync.ErrCodeNotFoundException, "") {
log.Printf("[WARN] No such entity found for Appsync Graphql API (%s)", d.Id())
d.SetId("")
return nil
}
return err
}
d.Set("authentication_type", resp.GraphqlApi.AuthenticationType)
d.Set("name", resp.GraphqlApi.Name)
d.Set("user_pool_config", flattenAppsyncGraphqlApiUserPoolConfig(resp.GraphqlApi.UserPoolConfig))
d.Set("arn", resp.GraphqlApi.Arn)
return nil
}
func resourceAwsAppsyncGraphqlApiUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).appsyncconn
input := &appsync.UpdateGraphqlApiInput{
ApiId: aws.String(d.Id()),
Name: aws.String(d.Get("name").(string)),
}
if d.HasChange("authentication_type") {
input.AuthenticationType = aws.String(d.Get("authentication_type").(string))
}
if d.HasChange("user_pool_config") {
input.UserPoolConfig = expandAppsyncGraphqlApiUserPoolConfig(d.Get("user_pool_config").([]interface{}))
}
_, err := conn.UpdateGraphqlApi(input)
if err != nil {
return err
}
return resourceAwsAppsyncGraphqlApiRead(d, meta)
}
func resourceAwsAppsyncGraphqlApiDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).appsyncconn
input := &appsync.DeleteGraphqlApiInput{
ApiId: aws.String(d.Id()),
}
_, err := conn.DeleteGraphqlApi(input)
if err != nil {
if isAWSErr(err, appsync.ErrCodeNotFoundException, "") {
return nil
}
return err
}
return nil
}
func expandAppsyncGraphqlApiUserPoolConfig(config []interface{}) *appsync.UserPoolConfig {
if len(config) < 1 {
return nil
}
cg := config[0].(map[string]interface{})
upc := &appsync.UserPoolConfig{
AwsRegion: aws.String(cg["aws_region"].(string)),
DefaultAction: aws.String(cg["default_action"].(string)),
UserPoolId: aws.String(cg["user_pool_id"].(string)),
}
if v, ok := cg["app_id_client_regex"].(string); ok && v != "" {
upc.AppIdClientRegex = aws.String(v)
}
return upc
}
func flattenAppsyncGraphqlApiUserPoolConfig(upc *appsync.UserPoolConfig) []interface{} {
if upc == nil {
return []interface{}{}
}
m := make(map[string]interface{}, 1)
m["aws_region"] = *upc.AwsRegion
m["default_action"] = *upc.DefaultAction
m["user_pool_id"] = *upc.UserPoolId
if upc.AppIdClientRegex != nil {
m["app_id_client_regex"] = *upc.AppIdClientRegex
}
return []interface{}{m}
}

View File

@ -10,6 +10,7 @@ import (
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
)
func resourceAwsAutoscalingPolicy() *schema.Resource {
@ -31,7 +32,7 @@ func resourceAwsAutoscalingPolicy() *schema.Resource {
},
"adjustment_type": &schema.Schema{
Type: schema.TypeString,
Required: true,
Optional: true,
},
"autoscaling_group_name": &schema.Schema{
Type: schema.TypeString,
@ -57,8 +58,9 @@ func resourceAwsAutoscalingPolicy() *schema.Resource {
Computed: true,
},
"min_adjustment_magnitude": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntAtLeast(1),
},
"min_adjustment_step": &schema.Schema{
Type: schema.TypeInt,
@ -93,6 +95,84 @@ func resourceAwsAutoscalingPolicy() *schema.Resource {
},
Set: resourceAwsAutoscalingScalingAdjustmentHash,
},
"target_tracking_configuration": &schema.Schema{
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"predefined_metric_specification": &schema.Schema{
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
ConflictsWith: []string{"target_tracking_configuration.0.customized_metric_specification"},
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"predefined_metric_type": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"resource_label": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
},
},
},
"customized_metric_specification": &schema.Schema{
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
ConflictsWith: []string{"target_tracking_configuration.0.predefined_metric_specification"},
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"metric_dimension": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"value": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
},
},
},
"metric_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"namespace": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"statistic": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"unit": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
},
},
},
"target_value": &schema.Schema{
Type: schema.TypeFloat,
Required: true,
},
"disable_scale_in": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: false,
},
},
},
},
},
}
}
@ -147,6 +227,7 @@ func resourceAwsAutoscalingPolicyRead(d *schema.ResourceData, meta interface{})
d.Set("name", p.PolicyName)
d.Set("scaling_adjustment", p.ScalingAdjustment)
d.Set("step_adjustment", flattenStepAdjustments(p.StepAdjustments))
d.Set("target_tracking_configuration", flattenTargetTrackingConfiguration(p.TargetTrackingConfiguration))
return nil
}
@ -204,11 +285,11 @@ func getAwsAutoscalingPutScalingPolicyInput(d *schema.ResourceData) (autoscaling
params.AdjustmentType = aws.String(v.(string))
}
if v, ok := d.GetOk("cooldown"); ok {
if v, ok := d.GetOkExists("cooldown"); ok {
params.Cooldown = aws.Int64(int64(v.(int)))
}
if v, ok := d.GetOk("estimated_instance_warmup"); ok {
if v, ok := d.GetOkExists("estimated_instance_warmup"); ok {
params.EstimatedInstanceWarmup = aws.Int64(int64(v.(int)))
}
@ -221,7 +302,7 @@ func getAwsAutoscalingPutScalingPolicyInput(d *schema.ResourceData) (autoscaling
}
//if policy_type=="SimpleScaling" then scaling_adjustment is required and 0 is allowed
if v, ok := d.GetOk("scaling_adjustment"); ok || *params.PolicyType == "SimpleScaling" {
if v, ok := d.GetOkExists("scaling_adjustment"); ok || *params.PolicyType == "SimpleScaling" {
params.ScalingAdjustment = aws.Int64(int64(v.(int)))
}
@ -233,14 +314,18 @@ func getAwsAutoscalingPutScalingPolicyInput(d *schema.ResourceData) (autoscaling
params.StepAdjustments = steps
}
if v, ok := d.GetOk("min_adjustment_magnitude"); ok {
if v, ok := d.GetOkExists("min_adjustment_magnitude"); ok {
// params.MinAdjustmentMagnitude = aws.Int64(int64(d.Get("min_adjustment_magnitude").(int)))
params.MinAdjustmentMagnitude = aws.Int64(int64(v.(int)))
} else if v, ok := d.GetOk("min_adjustment_step"); ok {
} else if v, ok := d.GetOkExists("min_adjustment_step"); ok {
// params.MinAdjustmentStep = aws.Int64(int64(d.Get("min_adjustment_step").(int)))
params.MinAdjustmentStep = aws.Int64(int64(v.(int)))
}
if v, ok := d.GetOk("target_tracking_configuration"); ok {
params.TargetTrackingConfiguration = expandTargetTrackingConfiguration(v.([]interface{}))
}
// Validate our final input to confirm it won't error when sent to AWS.
// First, SimpleScaling policy types...
if *params.PolicyType == "SimpleScaling" && params.StepAdjustments != nil {
@ -252,6 +337,9 @@ func getAwsAutoscalingPutScalingPolicyInput(d *schema.ResourceData) (autoscaling
if *params.PolicyType == "SimpleScaling" && params.EstimatedInstanceWarmup != nil {
return params, fmt.Errorf("SimpleScaling policy types cannot use estimated_instance_warmup!")
}
if *params.PolicyType == "SimpleScaling" && params.TargetTrackingConfiguration != nil {
return params, fmt.Errorf("SimpleScaling policy types cannot use target_tracking_configuration!")
}
// Second, StepScaling policy types...
if *params.PolicyType == "StepScaling" && params.ScalingAdjustment != nil {
@ -260,6 +348,29 @@ func getAwsAutoscalingPutScalingPolicyInput(d *schema.ResourceData) (autoscaling
if *params.PolicyType == "StepScaling" && params.Cooldown != nil {
return params, fmt.Errorf("StepScaling policy types cannot use cooldown!")
}
if *params.PolicyType == "StepScaling" && params.TargetTrackingConfiguration != nil {
return params, fmt.Errorf("StepScaling policy types cannot use target_tracking_configuration!")
}
// Third, TargetTrackingScaling policy types...
if *params.PolicyType == "TargetTrackingScaling" && params.AdjustmentType != nil {
return params, fmt.Errorf("TargetTrackingScaling policy types cannot use adjustment_type!")
}
if *params.PolicyType == "TargetTrackingScaling" && params.Cooldown != nil {
return params, fmt.Errorf("TargetTrackingScaling policy types cannot use cooldown!")
}
if *params.PolicyType == "TargetTrackingScaling" && params.MetricAggregationType != nil {
return params, fmt.Errorf("TargetTrackingScaling policy types cannot use metric_aggregation_type!")
}
if *params.PolicyType == "TargetTrackingScaling" && params.MinAdjustmentMagnitude != nil {
return params, fmt.Errorf("TargetTrackingScaling policy types cannot use min_adjustment_magnitude!")
}
if *params.PolicyType == "TargetTrackingScaling" && params.ScalingAdjustment != nil {
return params, fmt.Errorf("TargetTrackingScaling policy types cannot use scaling_adjustment!")
}
if *params.PolicyType == "TargetTrackingScaling" && params.StepAdjustments != nil {
return params, fmt.Errorf("TargetTrackingScaling policy types cannot use step_adjustments!")
}
return params, nil
}
@ -310,3 +421,94 @@ func resourceAwsAutoscalingScalingAdjustmentHash(v interface{}) int {
return hashcode.String(buf.String())
}
func expandTargetTrackingConfiguration(configs []interface{}) *autoscaling.TargetTrackingConfiguration {
if len(configs) < 1 {
return nil
}
config := configs[0].(map[string]interface{})
result := &autoscaling.TargetTrackingConfiguration{}
result.TargetValue = aws.Float64(config["target_value"].(float64))
if v, ok := config["disable_scale_in"]; ok {
result.DisableScaleIn = aws.Bool(v.(bool))
}
if v, ok := config["predefined_metric_specification"]; ok && len(v.([]interface{})) > 0 {
spec := v.([]interface{})[0].(map[string]interface{})
predSpec := &autoscaling.PredefinedMetricSpecification{
PredefinedMetricType: aws.String(spec["predefined_metric_type"].(string)),
}
if val, ok := spec["resource_label"]; ok && val.(string) != "" {
predSpec.ResourceLabel = aws.String(val.(string))
}
result.PredefinedMetricSpecification = predSpec
}
if v, ok := config["customized_metric_specification"]; ok && len(v.([]interface{})) > 0 {
spec := v.([]interface{})[0].(map[string]interface{})
customSpec := &autoscaling.CustomizedMetricSpecification{
Namespace: aws.String(spec["namespace"].(string)),
MetricName: aws.String(spec["metric_name"].(string)),
Statistic: aws.String(spec["statistic"].(string)),
}
if val, ok := spec["unit"]; ok {
customSpec.Unit = aws.String(val.(string))
}
if val, ok := spec["metric_dimension"]; ok {
dims := val.([]interface{})
metDimList := make([]*autoscaling.MetricDimension, len(dims))
for i := range metDimList {
dim := dims[i].(map[string]interface{})
md := &autoscaling.MetricDimension{
Name: aws.String(dim["name"].(string)),
Value: aws.String(dim["value"].(string)),
}
metDimList[i] = md
}
customSpec.Dimensions = metDimList
}
result.CustomizedMetricSpecification = customSpec
}
return result
}
func flattenTargetTrackingConfiguration(config *autoscaling.TargetTrackingConfiguration) []interface{} {
if config == nil {
return []interface{}{}
}
result := map[string]interface{}{}
result["disable_scale_in"] = *config.DisableScaleIn
result["target_value"] = *config.TargetValue
if config.PredefinedMetricSpecification != nil {
spec := map[string]interface{}{}
spec["predefined_metric_type"] = *config.PredefinedMetricSpecification.PredefinedMetricType
if config.PredefinedMetricSpecification.ResourceLabel != nil {
spec["resource_label"] = *config.PredefinedMetricSpecification.ResourceLabel
}
result["predefined_metric_specification"] = []map[string]interface{}{spec}
}
if config.CustomizedMetricSpecification != nil {
spec := map[string]interface{}{}
spec["metric_name"] = *config.CustomizedMetricSpecification.MetricName
spec["namespace"] = *config.CustomizedMetricSpecification.Namespace
spec["statistic"] = *config.CustomizedMetricSpecification.Statistic
if config.CustomizedMetricSpecification.Unit != nil {
spec["unit"] = *config.CustomizedMetricSpecification.Unit
}
if config.CustomizedMetricSpecification.Dimensions != nil {
dimSpec := make([]interface{}, len(config.CustomizedMetricSpecification.Dimensions))
for i := range dimSpec {
dim := map[string]interface{}{}
rawDim := config.CustomizedMetricSpecification.Dimensions[i]
dim["name"] = *rawDim.Name
dim["value"] = *rawDim.Value
dimSpec[i] = dim
}
spec["metric_dimension"] = dimSpec
}
result["customized_metric_specification"] = []map[string]interface{}{spec}
}
return []interface{}{result}
}

View File

@ -9,6 +9,7 @@ import (
"github.com/aws/aws-sdk-go/service/cloudtrail"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
)
func resourceAwsCloudTrail() *schema.Resource {
@ -72,6 +73,51 @@ func resourceAwsCloudTrail() *schema.Resource {
Optional: true,
ValidateFunc: validateArn,
},
"event_selector": {
Type: schema.TypeList,
Optional: true,
MaxItems: 5,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"read_write_type": {
Type: schema.TypeString,
Optional: true,
Default: cloudtrail.ReadWriteTypeAll,
ValidateFunc: validation.StringInSlice([]string{
cloudtrail.ReadWriteTypeAll,
cloudtrail.ReadWriteTypeReadOnly,
cloudtrail.ReadWriteTypeWriteOnly,
}, false),
},
"include_management_events": {
Type: schema.TypeBool,
Optional: true,
Default: true,
},
"data_resource": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"AWS::S3::Object", "AWS::Lambda::Function"}, false),
},
"values": {
Type: schema.TypeList,
Required: true,
MaxItems: 250,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
},
},
},
},
},
"home_region": {
Type: schema.TypeString,
Computed: true,
@ -99,7 +145,7 @@ func resourceAwsCloudTrailCreate(d *schema.ResourceData, meta interface{}) error
if v, ok := d.GetOk("cloud_watch_logs_role_arn"); ok {
input.CloudWatchLogsRoleArn = aws.String(v.(string))
}
if v, ok := d.GetOk("include_global_service_events"); ok {
if v, ok := d.GetOkExists("include_global_service_events"); ok {
input.IncludeGlobalServiceEvents = aws.Bool(v.(bool))
}
if v, ok := d.GetOk("is_multi_region_trail"); ok {
@ -150,6 +196,13 @@ func resourceAwsCloudTrailCreate(d *schema.ResourceData, meta interface{}) error
}
}
// Event Selectors
if _, ok := d.GetOk("event_selector"); ok {
if err := cloudTrailSetEventSelectors(conn, d); err != nil {
return err
}
}
return resourceAwsCloudTrailUpdate(d, meta)
}
@ -227,6 +280,18 @@ func resourceAwsCloudTrailRead(d *schema.ResourceData, meta interface{}) error {
}
d.Set("enable_logging", logstatus)
// Get EventSelectors
eventSelectorsOut, err := conn.GetEventSelectors(&cloudtrail.GetEventSelectorsInput{
TrailName: aws.String(d.Id()),
})
if err != nil {
return err
}
if err := d.Set("event_selector", flattenAwsCloudTrailEventSelector(eventSelectorsOut.EventSelectors)); err != nil {
return err
}
return nil
}
@ -300,6 +365,13 @@ func resourceAwsCloudTrailUpdate(d *schema.ResourceData, meta interface{}) error
}
}
if !d.IsNewResource() && d.HasChange("event_selector") {
log.Printf("[DEBUG] Updating event selector on CloudTrail: %s", input)
if err := cloudTrailSetEventSelectors(conn, d); err != nil {
return err
}
}
log.Printf("[DEBUG] CloudTrail updated: %s", t)
return resourceAwsCloudTrailRead(d, meta)
@ -357,3 +429,98 @@ func cloudTrailSetLogging(conn *cloudtrail.CloudTrail, enabled bool, id string)
return nil
}
func cloudTrailSetEventSelectors(conn *cloudtrail.CloudTrail, d *schema.ResourceData) error {
input := &cloudtrail.PutEventSelectorsInput{
TrailName: aws.String(d.Id()),
}
eventSelectors := expandAwsCloudTrailEventSelector(d.Get("event_selector").([]interface{}))
input.EventSelectors = eventSelectors
if err := input.Validate(); err != nil {
return fmt.Errorf("Error validate CloudTrail (%s): %s", d.Id(), err)
}
_, err := conn.PutEventSelectors(input)
if err != nil {
return fmt.Errorf("Error set event selector on CloudTrail (%s): %s", d.Id(), err)
}
return nil
}
func expandAwsCloudTrailEventSelector(configured []interface{}) []*cloudtrail.EventSelector {
eventSelectors := make([]*cloudtrail.EventSelector, 0, len(configured))
for _, raw := range configured {
data := raw.(map[string]interface{})
dataResources := expandAwsCloudTrailEventSelectorDataResource(data["data_resource"].([]interface{}))
es := &cloudtrail.EventSelector{
IncludeManagementEvents: aws.Bool(data["include_management_events"].(bool)),
ReadWriteType: aws.String(data["read_write_type"].(string)),
DataResources: dataResources,
}
eventSelectors = append(eventSelectors, es)
}
return eventSelectors
}
func expandAwsCloudTrailEventSelectorDataResource(configured []interface{}) []*cloudtrail.DataResource {
dataResources := make([]*cloudtrail.DataResource, 0, len(configured))
for _, raw := range configured {
data := raw.(map[string]interface{})
values := make([]*string, len(data["values"].([]interface{})))
for i, vv := range data["values"].([]interface{}) {
str := vv.(string)
values[i] = aws.String(str)
}
dataResource := &cloudtrail.DataResource{
Type: aws.String(data["type"].(string)),
Values: values,
}
dataResources = append(dataResources, dataResource)
}
return dataResources
}
func flattenAwsCloudTrailEventSelector(configured []*cloudtrail.EventSelector) []map[string]interface{} {
eventSelectors := make([]map[string]interface{}, 0, len(configured))
// Prevent default configurations shows differences
if len(configured) == 1 && len(configured[0].DataResources) == 0 {
return eventSelectors
}
for _, raw := range configured {
item := make(map[string]interface{})
item["read_write_type"] = *raw.ReadWriteType
item["include_management_events"] = *raw.IncludeManagementEvents
item["data_resource"] = flattenAwsCloudTrailEventSelectorDataResource(raw.DataResources)
eventSelectors = append(eventSelectors, item)
}
return eventSelectors
}
func flattenAwsCloudTrailEventSelectorDataResource(configured []*cloudtrail.DataResource) []map[string]interface{} {
dataResources := make([]map[string]interface{}, 0, len(configured))
for _, raw := range configured {
item := make(map[string]interface{})
item["type"] = *raw.Type
item["values"] = flattenStringList(raw.Values)
dataResources = append(dataResources, item)
}
return dataResources
}

View File

@ -181,6 +181,33 @@ func resourceAwsCodeBuildProject() *schema.Resource {
ValidateFunc: validateAwsCodeBuildTimeout,
},
"tags": tagsSchema(),
"vpc_config": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"vpc_id": {
Type: schema.TypeString,
Required: true,
},
"subnets": {
Type: schema.TypeSet,
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
// Set: schema.HashString,
MaxItems: 16,
},
"security_group_ids": {
Type: schema.TypeSet,
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
// Set: schema.HashString,
MaxItems: 5,
},
},
},
},
},
}
}
@ -215,6 +242,10 @@ func resourceAwsCodeBuildProjectCreate(d *schema.ResourceData, meta interface{})
params.TimeoutInMinutes = aws.Int64(int64(v.(int)))
}
if v, ok := d.GetOk("vpc_config"); ok {
params.VpcConfig = expandCodeBuildVpcConfig(v.([]interface{}))
}
if v, ok := d.GetOk("tags"); ok {
params.Tags = tagsFromMapCodeBuild(v.(map[string]interface{}))
}
@ -230,6 +261,10 @@ func resourceAwsCodeBuildProjectCreate(d *schema.ResourceData, meta interface{})
return resource.RetryableError(err)
}
if isAWSErr(err, "InvalidInputException", "Not authorized to perform DescribeSecurityGroups") {
return resource.RetryableError(err)
}
return resource.NonRetryableError(err)
}
@ -326,6 +361,21 @@ func expandProjectEnvironment(d *schema.ResourceData) *codebuild.ProjectEnvironm
return projectEnv
}
func expandCodeBuildVpcConfig(rawVpcConfig []interface{}) *codebuild.VpcConfig {
vpcConfig := codebuild.VpcConfig{}
if len(rawVpcConfig) == 0 {
return &vpcConfig
} else {
data := rawVpcConfig[0].(map[string]interface{})
vpcConfig.VpcId = aws.String(data["vpc_id"].(string))
vpcConfig.Subnets = expandStringList(data["subnets"].(*schema.Set).List())
vpcConfig.SecurityGroupIds = expandStringList(data["security_group_ids"].(*schema.Set).List())
return &vpcConfig
}
}
func expandProjectSource(d *schema.ResourceData) codebuild.ProjectSource {
configs := d.Get("source").(*schema.Set).List()
projectSource := codebuild.ProjectSource{}
@ -380,15 +430,19 @@ func resourceAwsCodeBuildProjectRead(d *schema.ResourceData, meta interface{}) e
project := resp.Projects[0]
if err := d.Set("artifacts", flattenAwsCodebuildProjectArtifacts(project.Artifacts)); err != nil {
if err := d.Set("artifacts", flattenAwsCodeBuildProjectArtifacts(project.Artifacts)); err != nil {
return err
}
if err := d.Set("environment", schema.NewSet(resourceAwsCodeBuildProjectEnvironmentHash, flattenAwsCodebuildProjectEnvironment(project.Environment))); err != nil {
if err := d.Set("environment", schema.NewSet(resourceAwsCodeBuildProjectEnvironmentHash, flattenAwsCodeBuildProjectEnvironment(project.Environment))); err != nil {
return err
}
if err := d.Set("source", flattenAwsCodebuildProjectSource(project.Source)); err != nil {
if err := d.Set("source", flattenAwsCodeBuildProjectSource(project.Source)); err != nil {
return err
}
if err := d.Set("vpc_config", flattenAwsCodeBuildVpcConfig(project.VpcConfig)); err != nil {
return err
}
@ -427,6 +481,10 @@ func resourceAwsCodeBuildProjectUpdate(d *schema.ResourceData, meta interface{})
params.Artifacts = &projectArtifacts
}
if d.HasChange("vpc_config") {
params.VpcConfig = expandCodeBuildVpcConfig(d.Get("vpc_config").([]interface{}))
}
if d.HasChange("description") {
params.Description = aws.String(d.Get("description").(string))
}
@ -474,7 +532,7 @@ func resourceAwsCodeBuildProjectDelete(d *schema.ResourceData, meta interface{})
return nil
}
func flattenAwsCodebuildProjectArtifacts(artifacts *codebuild.ProjectArtifacts) *schema.Set {
func flattenAwsCodeBuildProjectArtifacts(artifacts *codebuild.ProjectArtifacts) *schema.Set {
artifactSet := schema.Set{
F: resourceAwsCodeBuildProjectArtifactsHash,
@ -509,7 +567,7 @@ func flattenAwsCodebuildProjectArtifacts(artifacts *codebuild.ProjectArtifacts)
return &artifactSet
}
func flattenAwsCodebuildProjectEnvironment(environment *codebuild.ProjectEnvironment) []interface{} {
func flattenAwsCodeBuildProjectEnvironment(environment *codebuild.ProjectEnvironment) []interface{} {
envConfig := map[string]interface{}{}
envConfig["type"] = *environment.Type
@ -525,7 +583,7 @@ func flattenAwsCodebuildProjectEnvironment(environment *codebuild.ProjectEnviron
}
func flattenAwsCodebuildProjectSource(source *codebuild.ProjectSource) []interface{} {
func flattenAwsCodeBuildProjectSource(source *codebuild.ProjectSource) []interface{} {
l := make([]interface{}, 1)
m := map[string]interface{}{}
@ -548,6 +606,19 @@ func flattenAwsCodebuildProjectSource(source *codebuild.ProjectSource) []interfa
return l
}
func flattenAwsCodeBuildVpcConfig(vpcConfig *codebuild.VpcConfig) []interface{} {
if vpcConfig != nil {
values := map[string]interface{}{}
values["vpc_id"] = *vpcConfig.VpcId
values["subnets"] = schema.NewSet(schema.HashString, flattenStringList(vpcConfig.Subnets))
values["security_group_ids"] = schema.NewSet(schema.HashString, flattenStringList(vpcConfig.SecurityGroupIds))
return []interface{}{values}
}
return nil
}
func resourceAwsCodeBuildProjectArtifactsHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})

View File

@ -1106,9 +1106,11 @@ func validateTriggerEvent(v interface{}, k string) (ws []string, errors []error)
"DeploymentSuccess": true,
"DeploymentFailure": true,
"DeploymentRollback": true,
"DeploymentReady": true,
"InstanceStart": true,
"InstanceSuccess": true,
"InstanceFailure": true,
"InstanceReady": true,
}
if !triggerEvents[value] {

View File

@ -53,6 +53,7 @@ func resourceAwsCognitoUserPoolClient() *schema.Resource {
ValidateFunc: validation.StringInSlice([]string{
cognitoidentityprovider.ExplicitAuthFlowsTypeAdminNoSrpAuth,
cognitoidentityprovider.ExplicitAuthFlowsTypeCustomAuthFlowOnly,
cognitoidentityprovider.ExplicitAuthFlowsTypeUserPasswordAuth,
}, false),
},
},

View File

@ -0,0 +1,588 @@
package aws
import (
"fmt"
"log"
"sort"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/service/dax"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsDaxCluster() *schema.Resource {
return &schema.Resource{
Create: resourceAwsDaxClusterCreate,
Read: resourceAwsDaxClusterRead,
Update: resourceAwsDaxClusterUpdate,
Delete: resourceAwsDaxClusterDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(45 * time.Minute),
Delete: schema.DefaultTimeout(45 * time.Minute),
Update: schema.DefaultTimeout(90 * time.Minute),
},
Schema: map[string]*schema.Schema{
"arn": {
Type: schema.TypeString,
Computed: true,
},
"cluster_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
StateFunc: func(val interface{}) string {
return strings.ToLower(val.(string))
},
// DAX follows the same naming convention as ElastiCache clusters
ValidateFunc: validateElastiCacheClusterId,
},
"iam_role_arn": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateArn,
},
"node_type": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"replication_factor": {
Type: schema.TypeInt,
Required: true,
},
"availability_zones": {
Type: schema.TypeSet,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"description": {
Type: schema.TypeString,
Optional: true,
},
"notification_topic_arn": {
Type: schema.TypeString,
Optional: true,
},
"parameter_group_name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"maintenance_window": {
Type: schema.TypeString,
Optional: true,
Computed: true,
StateFunc: func(val interface{}) string {
return strings.ToLower(val.(string))
},
ValidateFunc: validateOnceAWeekWindowFormat,
},
"security_group_ids": {
Type: schema.TypeSet,
Optional: true,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"subnet_group_name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"tags": tagsSchema(),
"port": {
Type: schema.TypeInt,
Computed: true,
},
"configuration_endpoint": {
Type: schema.TypeString,
Computed: true,
},
"cluster_address": {
Type: schema.TypeString,
Computed: true,
},
"nodes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"id": {
Type: schema.TypeString,
Computed: true,
},
"address": {
Type: schema.TypeString,
Computed: true,
},
"port": {
Type: schema.TypeInt,
Computed: true,
},
"availability_zone": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
},
}
}
func resourceAwsDaxClusterCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).daxconn
clusterName := d.Get("cluster_name").(string)
iamRoleArn := d.Get("iam_role_arn").(string)
nodeType := d.Get("node_type").(string)
numNodes := int64(d.Get("replication_factor").(int))
subnetGroupName := d.Get("subnet_group_name").(string)
securityIdSet := d.Get("security_group_ids").(*schema.Set)
securityIds := expandStringList(securityIdSet.List())
tags := tagsFromMapDax(d.Get("tags").(map[string]interface{}))
req := &dax.CreateClusterInput{
ClusterName: aws.String(clusterName),
IamRoleArn: aws.String(iamRoleArn),
NodeType: aws.String(nodeType),
ReplicationFactor: aws.Int64(numNodes),
SecurityGroupIds: securityIds,
SubnetGroupName: aws.String(subnetGroupName),
Tags: tags,
}
// optionals can be defaulted by AWS
if v, ok := d.GetOk("description"); ok {
req.Description = aws.String(v.(string))
}
if v, ok := d.GetOk("parameter_group_name"); ok {
req.ParameterGroupName = aws.String(v.(string))
}
if v, ok := d.GetOk("maintenance_window"); ok {
req.PreferredMaintenanceWindow = aws.String(v.(string))
}
if v, ok := d.GetOk("notification_topic_arn"); ok {
req.NotificationTopicArn = aws.String(v.(string))
}
preferred_azs := d.Get("availability_zones").(*schema.Set).List()
if len(preferred_azs) > 0 {
azs := expandStringList(preferred_azs)
req.AvailabilityZones = azs
}
// IAM roles take some time to propagate
var resp *dax.CreateClusterOutput
err := resource.Retry(30*time.Second, func() *resource.RetryError {
var err error
resp, err = conn.CreateCluster(req)
if err != nil {
if isAWSErr(err, dax.ErrCodeInvalidParameterValueException, "No permission to assume role") {
log.Print("[DEBUG] Retrying create of DAX cluster")
return resource.RetryableError(err)
}
return resource.NonRetryableError(err)
}
return nil
})
if err != nil {
return fmt.Errorf("Error creating DAX cluster: %s", err)
}
// Assign the cluster id as the resource ID
// DAX always retains the id in lower case, so we have to
// mimic that or else we won't be able to refresh a resource whose
// name contained uppercase characters.
d.SetId(strings.ToLower(*resp.Cluster.ClusterName))
pending := []string{"creating", "modifying"}
stateConf := &resource.StateChangeConf{
Pending: pending,
Target: []string{"available"},
Refresh: daxClusterStateRefreshFunc(conn, d.Id(), "available", pending),
Timeout: d.Timeout(schema.TimeoutCreate),
MinTimeout: 10 * time.Second,
Delay: 30 * time.Second,
}
log.Printf("[DEBUG] Waiting for state to become available: %v", d.Id())
_, sterr := stateConf.WaitForState()
if sterr != nil {
return fmt.Errorf("Error waiting for DAX cluster (%s) to be created: %s", d.Id(), sterr)
}
return resourceAwsDaxClusterRead(d, meta)
}
func resourceAwsDaxClusterRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).daxconn
req := &dax.DescribeClustersInput{
ClusterNames: []*string{aws.String(d.Id())},
}
res, err := conn.DescribeClusters(req)
if err != nil {
if isAWSErr(err, dax.ErrCodeClusterNotFoundFault, "") {
log.Printf("[WARN] DAX cluster (%s) not found", d.Id())
d.SetId("")
return nil
}
return err
}
if len(res.Clusters) == 0 {
log.Printf("[WARN] DAX cluster (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
c := res.Clusters[0]
d.Set("arn", c.ClusterArn)
d.Set("cluster_name", c.ClusterName)
d.Set("description", c.Description)
d.Set("iam_role_arn", c.IamRoleArn)
d.Set("node_type", c.NodeType)
d.Set("replication_factor", c.TotalNodes)
if c.ClusterDiscoveryEndpoint != nil {
d.Set("port", c.ClusterDiscoveryEndpoint.Port)
d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", *c.ClusterDiscoveryEndpoint.Address, *c.ClusterDiscoveryEndpoint.Port)))
d.Set("cluster_address", aws.String(fmt.Sprintf("%s", *c.ClusterDiscoveryEndpoint.Address)))
}
d.Set("subnet_group_name", c.SubnetGroup)
d.Set("security_group_ids", flattenDaxSecurityGroupIds(c.SecurityGroups))
if c.ParameterGroup != nil {
d.Set("parameter_group_name", c.ParameterGroup.ParameterGroupName)
}
d.Set("maintenance_window", c.PreferredMaintenanceWindow)
if c.NotificationConfiguration != nil {
if *c.NotificationConfiguration.TopicStatus == "active" {
d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn)
}
}
if err := setDaxClusterNodeData(d, c); err != nil {
return err
}
// list tags for resource
// set tags
arn, err := buildDaxArn(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region)
if err != nil {
log.Printf("[DEBUG] Error building ARN for DAX Cluster, not setting Tags for cluster %s", *c.ClusterName)
} else {
resp, err := conn.ListTags(&dax.ListTagsInput{
ResourceName: aws.String(arn),
})
if err != nil {
log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn)
}
var dt []*dax.Tag
if len(resp.Tags) > 0 {
dt = resp.Tags
}
d.Set("tags", tagsToMapDax(dt))
}
return nil
}
func resourceAwsDaxClusterUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).daxconn
arn, err := buildDaxArn(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region)
if err != nil {
log.Printf("[DEBUG] Error building ARN for DAX Cluster, not updating Tags for cluster %s", d.Id())
} else {
if err := setTagsDax(conn, d, arn); err != nil {
return err
}
}
req := &dax.UpdateClusterInput{
ClusterName: aws.String(d.Id()),
}
requestUpdate := false
awaitUpdate := false
if d.HasChange("description") {
req.Description = aws.String(d.Get("description").(string))
requestUpdate = true
}
if d.HasChange("security_group_ids") {
if attr := d.Get("security_group_ids").(*schema.Set); attr.Len() > 0 {
req.SecurityGroupIds = expandStringList(attr.List())
requestUpdate = true
}
}
if d.HasChange("parameter_group_name") {
req.ParameterGroupName = aws.String(d.Get("parameter_group_name").(string))
requestUpdate = true
}
if d.HasChange("maintenance_window") {
req.PreferredMaintenanceWindow = aws.String(d.Get("maintenance_window").(string))
requestUpdate = true
}
if d.HasChange("notification_topic_arn") {
v := d.Get("notification_topic_arn").(string)
req.NotificationTopicArn = aws.String(v)
if v == "" {
inactive := "inactive"
req.NotificationTopicStatus = &inactive
}
requestUpdate = true
}
if requestUpdate {
log.Printf("[DEBUG] Modifying DAX Cluster (%s), opts:\n%s", d.Id(), req)
_, err := conn.UpdateCluster(req)
if err != nil {
return fmt.Errorf("[WARN] Error updating DAX cluster (%s), error: %s", d.Id(), err)
}
awaitUpdate = true
}
if d.HasChange("replication_factor") {
oraw, nraw := d.GetChange("replication_factor")
o := oraw.(int)
n := nraw.(int)
if n < o {
log.Printf("[INFO] Decreasing nodes in DAX cluster %s from %d to %d", d.Id(), o, n)
_, err := conn.DecreaseReplicationFactor(&dax.DecreaseReplicationFactorInput{
ClusterName: aws.String(d.Id()),
NewReplicationFactor: aws.Int64(int64(nraw.(int))),
})
if err != nil {
return fmt.Errorf("[WARN] Error increasing nodes in DAX cluster %s, error: %s", d.Id(), err)
}
awaitUpdate = true
}
if n > o {
log.Printf("[INFO] Increasing nodes in DAX cluster %s from %d to %d", d.Id(), o, n)
_, err := conn.IncreaseReplicationFactor(&dax.IncreaseReplicationFactorInput{
ClusterName: aws.String(d.Id()),
NewReplicationFactor: aws.Int64(int64(nraw.(int))),
})
if err != nil {
return fmt.Errorf("[WARN] Error increasing nodes in DAX cluster %s, error: %s", d.Id(), err)
}
awaitUpdate = true
}
}
if awaitUpdate {
log.Printf("[DEBUG] Waiting for update: %s", d.Id())
pending := []string{"modifying"}
stateConf := &resource.StateChangeConf{
Pending: pending,
Target: []string{"available"},
Refresh: daxClusterStateRefreshFunc(conn, d.Id(), "available", pending),
Timeout: d.Timeout(schema.TimeoutUpdate),
MinTimeout: 10 * time.Second,
Delay: 30 * time.Second,
}
_, sterr := stateConf.WaitForState()
if sterr != nil {
return fmt.Errorf("Error waiting for DAX (%s) to update: %s", d.Id(), sterr)
}
}
return resourceAwsDaxClusterRead(d, meta)
}
func setDaxClusterNodeData(d *schema.ResourceData, c *dax.Cluster) error {
sortedNodes := make([]*dax.Node, len(c.Nodes))
copy(sortedNodes, c.Nodes)
sort.Sort(byNodeId(sortedNodes))
nodeDate := make([]map[string]interface{}, 0, len(sortedNodes))
for _, node := range sortedNodes {
if node.NodeId == nil || node.Endpoint == nil || node.Endpoint.Address == nil || node.Endpoint.Port == nil || node.AvailabilityZone == nil {
return fmt.Errorf("Unexpected nil pointer in: %s", node)
}
nodeDate = append(nodeDate, map[string]interface{}{
"id": *node.NodeId,
"address": *node.Endpoint.Address,
"port": int(*node.Endpoint.Port),
"availability_zone": *node.AvailabilityZone,
})
}
return d.Set("nodes", nodeDate)
}
type byNodeId []*dax.Node
func (b byNodeId) Len() int { return len(b) }
func (b byNodeId) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byNodeId) Less(i, j int) bool {
return b[i].NodeId != nil && b[j].NodeId != nil &&
*b[i].NodeId < *b[j].NodeId
}
func resourceAwsDaxClusterDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).daxconn
req := &dax.DeleteClusterInput{
ClusterName: aws.String(d.Id()),
}
err := resource.Retry(5*time.Minute, func() *resource.RetryError {
_, err := conn.DeleteCluster(req)
if err != nil {
if isAWSErr(err, dax.ErrCodeInvalidClusterStateFault, "") {
return resource.RetryableError(err)
}
return resource.NonRetryableError(err)
}
return nil
})
if err != nil {
return err
}
log.Printf("[DEBUG] Waiting for deletion: %v", d.Id())
stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "available", "deleting", "incompatible-parameters", "incompatible-network"},
Target: []string{},
Refresh: daxClusterStateRefreshFunc(conn, d.Id(), "", []string{}),
Timeout: d.Timeout(schema.TimeoutDelete),
MinTimeout: 10 * time.Second,
Delay: 30 * time.Second,
}
_, sterr := stateConf.WaitForState()
if sterr != nil {
return fmt.Errorf("Error waiting for DAX (%s) to delete: %s", d.Id(), sterr)
}
d.SetId("")
return nil
}
func daxClusterStateRefreshFunc(conn *dax.DAX, clusterID, givenState string, pending []string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
resp, err := conn.DescribeClusters(&dax.DescribeClustersInput{
ClusterNames: []*string{aws.String(clusterID)},
})
if err != nil {
if isAWSErr(err, dax.ErrCodeClusterNotFoundFault, "") {
log.Printf("[DEBUG] Detect deletion")
return nil, "", nil
}
log.Printf("[ERROR] daxClusterStateRefreshFunc: %s", err)
return nil, "", err
}
if len(resp.Clusters) == 0 {
return nil, "", fmt.Errorf("[WARN] Error: no DAX clusters found for id (%s)", clusterID)
}
var c *dax.Cluster
for _, cluster := range resp.Clusters {
if *cluster.ClusterName == clusterID {
log.Printf("[DEBUG] Found matching DAX cluster: %s", *cluster.ClusterName)
c = cluster
}
}
if c == nil {
return nil, "", fmt.Errorf("[WARN] Error: no matching DAX cluster for id (%s)", clusterID)
}
// DescribeCluster returns a response without status late on in the
// deletion process - assume cluster is still deleting until we
// get ClusterNotFoundFault
if c.Status == nil {
log.Printf("[DEBUG] DAX Cluster %s has no status attribute set - assume status is deleting", clusterID)
return c, "deleting", nil
}
log.Printf("[DEBUG] DAX Cluster (%s) status: %v", clusterID, *c.Status)
// return the current state if it's in the pending array
for _, p := range pending {
log.Printf("[DEBUG] DAX: checking pending state (%s) for cluster (%s), cluster status: %s", pending, clusterID, *c.Status)
s := *c.Status
if p == s {
log.Printf("[DEBUG] Return with status: %v", *c.Status)
return c, p, nil
}
}
// return given state if it's not in pending
if givenState != "" {
log.Printf("[DEBUG] DAX: checking given state (%s) of cluster (%s) against cluster status (%s)", givenState, clusterID, *c.Status)
// check to make sure we have the node count we're expecting
if int64(len(c.Nodes)) != *c.TotalNodes {
log.Printf("[DEBUG] Node count is not what is expected: %d found, %d expected", len(c.Nodes), *c.TotalNodes)
return nil, "creating", nil
}
log.Printf("[DEBUG] Node count matched (%d)", len(c.Nodes))
// loop the nodes and check their status as well
for _, n := range c.Nodes {
log.Printf("[DEBUG] Checking cache node for status: %s", n)
if n.NodeStatus != nil && *n.NodeStatus != "available" {
log.Printf("[DEBUG] Node (%s) is not yet available, status: %s", *n.NodeId, *n.NodeStatus)
return nil, "creating", nil
}
log.Printf("[DEBUG] Cache node not in expected state")
}
log.Printf("[DEBUG] DAX returning given state (%s), cluster: %s", givenState, c)
return c, givenState, nil
}
log.Printf("[DEBUG] current status: %v", *c.Status)
return c, *c.Status, nil
}
}
func buildDaxArn(identifier, partition, accountid, region string) (string, error) {
if partition == "" {
return "", fmt.Errorf("Unable to construct DAX ARN because of missing AWS partition")
}
if accountid == "" {
return "", fmt.Errorf("Unable to construct DAX ARN because of missing AWS Account ID")
}
arn := arn.ARN{
Partition: partition,
Service: "dax",
Region: region,
AccountID: accountid,
Resource: fmt.Sprintf("cache/%s", identifier),
}
return arn.String(), nil
}

View File

@ -8,17 +8,11 @@ import (
"github.com/hashicorp/terraform/helper/schema"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/directoryservice"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/validation"
)
var directoryCreationFuncs = map[string]func(*directoryservice.DirectoryService, *schema.ResourceData) (string, error){
"SimpleAD": createSimpleDirectoryService,
"MicrosoftAD": createActiveDirectoryService,
"ADConnector": createDirectoryConnector,
}
func resourceAwsDirectoryServiceDirectory() *schema.Resource {
return &schema.Resource{
Create: resourceAwsDirectoryServiceDirectoryCreate,
@ -44,8 +38,12 @@ func resourceAwsDirectoryServiceDirectory() *schema.Resource {
"size": {
Type: schema.TypeString,
Optional: true,
Default: "Large",
Computed: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice([]string{
directoryservice.DirectorySizeLarge,
directoryservice.DirectorySizeSmall,
}, false),
},
"alias": {
Type: schema.TypeString,
@ -67,6 +65,7 @@ func resourceAwsDirectoryServiceDirectory() *schema.Resource {
"tags": tagsSchema(),
"vpc_settings": {
Type: schema.TypeList,
MaxItems: 1,
Optional: true,
ForceNew: true,
Elem: &schema.Resource{
@ -88,6 +87,7 @@ func resourceAwsDirectoryServiceDirectory() *schema.Resource {
},
"connect_settings": {
Type: schema.TypeList,
MaxItems: 1,
Optional: true,
ForceNew: true,
Elem: &schema.Resource{
@ -141,96 +141,91 @@ func resourceAwsDirectoryServiceDirectory() *schema.Resource {
"type": {
Type: schema.TypeString,
Optional: true,
Default: "SimpleAD",
Default: directoryservice.DirectoryTypeSimpleAd,
ForceNew: true,
ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
validTypes := []string{"SimpleAD", "MicrosoftAD"}
value := v.(string)
for validType, _ := range directoryCreationFuncs {
if validType == value {
return
}
}
es = append(es, fmt.Errorf("%q must be one of %q", k, validTypes))
return
},
ValidateFunc: validation.StringInSlice([]string{
directoryservice.DirectoryTypeAdconnector,
directoryservice.DirectoryTypeMicrosoftAd,
directoryservice.DirectoryTypeSimpleAd,
}, false),
},
"edition": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice([]string{
directoryservice.DirectoryEditionEnterprise,
directoryservice.DirectoryEditionStandard,
}, false),
},
},
}
}
func buildVpcSettings(d *schema.ResourceData) (vpcSettings *directoryservice.DirectoryVpcSettings, err error) {
if v, ok := d.GetOk("vpc_settings"); !ok {
v, ok := d.GetOk("vpc_settings")
if !ok {
return nil, fmt.Errorf("vpc_settings is required for type = SimpleAD or MicrosoftAD")
} else {
settings := v.([]interface{})
}
settings := v.([]interface{})
s := settings[0].(map[string]interface{})
var subnetIds []*string
for _, id := range s["subnet_ids"].(*schema.Set).List() {
subnetIds = append(subnetIds, aws.String(id.(string)))
}
if len(settings) > 1 {
return nil, fmt.Errorf("Only a single vpc_settings block is expected")
} else if len(settings) == 1 {
s := settings[0].(map[string]interface{})
var subnetIds []*string
for _, id := range s["subnet_ids"].(*schema.Set).List() {
subnetIds = append(subnetIds, aws.String(id.(string)))
}
vpcSettings = &directoryservice.DirectoryVpcSettings{
SubnetIds: subnetIds,
VpcId: aws.String(s["vpc_id"].(string)),
}
}
vpcSettings = &directoryservice.DirectoryVpcSettings{
SubnetIds: subnetIds,
VpcId: aws.String(s["vpc_id"].(string)),
}
return vpcSettings, nil
}
func buildConnectSettings(d *schema.ResourceData) (connectSettings *directoryservice.DirectoryConnectSettings, err error) {
if v, ok := d.GetOk("connect_settings"); !ok {
v, ok := d.GetOk("connect_settings")
if !ok {
return nil, fmt.Errorf("connect_settings is required for type = ADConnector")
} else {
settings := v.([]interface{})
}
settings := v.([]interface{})
s := settings[0].(map[string]interface{})
if len(settings) > 1 {
return nil, fmt.Errorf("Only a single connect_settings block is expected")
} else if len(settings) == 1 {
s := settings[0].(map[string]interface{})
var subnetIds []*string
for _, id := range s["subnet_ids"].(*schema.Set).List() {
subnetIds = append(subnetIds, aws.String(id.(string)))
}
var subnetIds []*string
for _, id := range s["subnet_ids"].(*schema.Set).List() {
subnetIds = append(subnetIds, aws.String(id.(string)))
}
var customerDnsIps []*string
for _, id := range s["customer_dns_ips"].(*schema.Set).List() {
customerDnsIps = append(customerDnsIps, aws.String(id.(string)))
}
var customerDnsIps []*string
for _, id := range s["customer_dns_ips"].(*schema.Set).List() {
customerDnsIps = append(customerDnsIps, aws.String(id.(string)))
}
connectSettings = &directoryservice.DirectoryConnectSettings{
CustomerDnsIps: customerDnsIps,
CustomerUserName: aws.String(s["customer_username"].(string)),
SubnetIds: subnetIds,
VpcId: aws.String(s["vpc_id"].(string)),
}
}
connectSettings = &directoryservice.DirectoryConnectSettings{
CustomerDnsIps: customerDnsIps,
CustomerUserName: aws.String(s["customer_username"].(string)),
SubnetIds: subnetIds,
VpcId: aws.String(s["vpc_id"].(string)),
}
return connectSettings, nil
}
func createDirectoryConnector(dsconn *directoryservice.DirectoryService, d *schema.ResourceData) (directoryId string, err error) {
if _, ok := d.GetOk("size"); !ok {
return "", fmt.Errorf("size is required for type = ADConnector")
}
input := directoryservice.ConnectDirectoryInput{
Name: aws.String(d.Get("name").(string)),
Password: aws.String(d.Get("password").(string)),
Size: aws.String(d.Get("size").(string)),
}
if v, ok := d.GetOk("description"); ok {
input.Description = aws.String(v.(string))
}
if v, ok := d.GetOk("size"); ok {
input.Size = aws.String(v.(string))
} else {
// Matching previous behavior of Default: "Large" for Size attribute
input.Size = aws.String(directoryservice.DirectorySizeLarge)
}
if v, ok := d.GetOk("short_name"); ok {
input.ShortName = aws.String(v.(string))
}
@ -251,19 +246,20 @@ func createDirectoryConnector(dsconn *directoryservice.DirectoryService, d *sche
}
func createSimpleDirectoryService(dsconn *directoryservice.DirectoryService, d *schema.ResourceData) (directoryId string, err error) {
if _, ok := d.GetOk("size"); !ok {
return "", fmt.Errorf("size is required for type = SimpleAD")
}
input := directoryservice.CreateDirectoryInput{
Name: aws.String(d.Get("name").(string)),
Password: aws.String(d.Get("password").(string)),
Size: aws.String(d.Get("size").(string)),
}
if v, ok := d.GetOk("description"); ok {
input.Description = aws.String(v.(string))
}
if v, ok := d.GetOk("size"); ok {
input.Size = aws.String(v.(string))
} else {
// Matching previous behavior of Default: "Large" for Size attribute
input.Size = aws.String(directoryservice.DirectorySizeLarge)
}
if v, ok := d.GetOk("short_name"); ok {
input.ShortName = aws.String(v.(string))
}
@ -295,6 +291,9 @@ func createActiveDirectoryService(dsconn *directoryservice.DirectoryService, d *
if v, ok := d.GetOk("short_name"); ok {
input.ShortName = aws.String(v.(string))
}
if v, ok := d.GetOk("edition"); ok {
input.Edition = aws.String(v.(string))
}
input.VpcSettings, err = buildVpcSettings(d)
if err != nil {
@ -314,13 +313,18 @@ func createActiveDirectoryService(dsconn *directoryservice.DirectoryService, d *
func resourceAwsDirectoryServiceDirectoryCreate(d *schema.ResourceData, meta interface{}) error {
dsconn := meta.(*AWSClient).dsconn
creationFunc, ok := directoryCreationFuncs[d.Get("type").(string)]
if !ok {
// Shouldn't happen as this is validated above
return fmt.Errorf("Unsupported directory type: %s", d.Get("type"))
var directoryId string
var err error
directoryType := d.Get("type").(string)
if directoryType == directoryservice.DirectoryTypeAdconnector {
directoryId, err = createDirectoryConnector(dsconn, d)
} else if directoryType == directoryservice.DirectoryTypeMicrosoftAd {
directoryId, err = createActiveDirectoryService(dsconn, d)
} else if directoryType == directoryservice.DirectoryTypeSimpleAd {
directoryId, err = createSimpleDirectoryService(dsconn, d)
}
directoryId, err := creationFunc(dsconn, d)
if err != nil {
return err
}
@ -330,8 +334,12 @@ func resourceAwsDirectoryServiceDirectoryCreate(d *schema.ResourceData, meta int
// Wait for creation
log.Printf("[DEBUG] Waiting for DS (%q) to become available", d.Id())
stateConf := &resource.StateChangeConf{
Pending: []string{"Requested", "Creating", "Created"},
Target: []string{"Active"},
Pending: []string{
directoryservice.DirectoryStageRequested,
directoryservice.DirectoryStageCreating,
directoryservice.DirectoryStageCreated,
},
Target: []string{directoryservice.DirectoryStageActive},
Refresh: func() (interface{}, string, error) {
resp, err := dsconn.DescribeDirectories(&directoryservice.DescribeDirectoriesInput{
DirectoryIds: []*string{aws.String(d.Id())},
@ -427,28 +435,23 @@ func resourceAwsDirectoryServiceDirectoryRead(d *schema.ResourceData, meta inter
dir := out.DirectoryDescriptions[0]
log.Printf("[DEBUG] Received DS directory: %s", dir)
d.Set("access_url", *dir.AccessUrl)
d.Set("alias", *dir.Alias)
if dir.Description != nil {
d.Set("description", *dir.Description)
}
d.Set("access_url", dir.AccessUrl)
d.Set("alias", dir.Alias)
d.Set("description", dir.Description)
if *dir.Type == "ADConnector" {
if *dir.Type == directoryservice.DirectoryTypeAdconnector {
d.Set("dns_ip_addresses", schema.NewSet(schema.HashString, flattenStringList(dir.ConnectSettings.ConnectIps)))
} else {
d.Set("dns_ip_addresses", schema.NewSet(schema.HashString, flattenStringList(dir.DnsIpAddrs)))
}
d.Set("name", *dir.Name)
if dir.ShortName != nil {
d.Set("short_name", *dir.ShortName)
}
if dir.Size != nil {
d.Set("size", *dir.Size)
}
d.Set("type", *dir.Type)
d.Set("name", dir.Name)
d.Set("short_name", dir.ShortName)
d.Set("size", dir.Size)
d.Set("edition", dir.Edition)
d.Set("type", dir.Type)
d.Set("vpc_settings", flattenDSVpcSettings(dir.VpcSettings))
d.Set("connect_settings", flattenDSConnectSettings(dir.DnsIpAddrs, dir.ConnectSettings))
d.Set("enable_sso", *dir.SsoEnabled)
d.Set("enable_sso", dir.SsoEnabled)
if dir.VpcSettings != nil {
d.Set("security_group_id", *dir.VpcSettings.SecurityGroupId)
@ -481,21 +484,24 @@ func resourceAwsDirectoryServiceDirectoryDelete(d *schema.ResourceData, meta int
// Wait for deletion
log.Printf("[DEBUG] Waiting for DS (%q) to be deleted", d.Id())
stateConf := &resource.StateChangeConf{
Pending: []string{"Deleting"},
Target: []string{"Deleted"},
Pending: []string{
directoryservice.DirectoryStageActive,
directoryservice.DirectoryStageDeleting,
},
Target: []string{directoryservice.DirectoryStageDeleted},
Refresh: func() (interface{}, string, error) {
resp, err := dsconn.DescribeDirectories(&directoryservice.DescribeDirectoriesInput{
DirectoryIds: []*string{aws.String(d.Id())},
})
if err != nil {
if dserr, ok := err.(awserr.Error); ok && dserr.Code() == "EntityDoesNotExistException" {
return 42, "Deleted", nil
if isAWSErr(err, directoryservice.ErrCodeEntityDoesNotExistException, "") {
return 42, directoryservice.DirectoryStageDeleted, nil
}
return nil, "error", err
}
if len(resp.DirectoryDescriptions) == 0 {
return 42, "Deleted", nil
return 42, directoryservice.DirectoryStageDeleted, nil
}
ds := resp.DirectoryDescriptions[0]

View File

@ -2,9 +2,11 @@ package aws
import (
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/service/directconnect"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
@ -14,25 +16,34 @@ func resourceAwsDxConnection() *schema.Resource {
return &schema.Resource{
Create: resourceAwsDxConnectionCreate,
Read: resourceAwsDxConnectionRead,
Update: resourceAwsDxConnectionUpdate,
Delete: resourceAwsDxConnectionDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
"arn": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"bandwidth": &schema.Schema{
"bandwidth": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateDxConnectionBandWidth,
},
"location": &schema.Schema{
"location": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"tags": tagsSchema(),
},
}
}
@ -40,53 +51,105 @@ func resourceAwsDxConnection() *schema.Resource {
func resourceAwsDxConnectionCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).dxconn
input := &directconnect.CreateConnectionInput{
req := &directconnect.CreateConnectionInput{
Bandwidth: aws.String(d.Get("bandwidth").(string)),
ConnectionName: aws.String(d.Get("name").(string)),
Location: aws.String(d.Get("location").(string)),
}
resp, err := conn.CreateConnection(input)
log.Printf("[DEBUG] Creating Direct Connect connection: %#v", req)
resp, err := conn.CreateConnection(req)
if err != nil {
return err
}
d.SetId(*resp.ConnectionId)
return resourceAwsDxConnectionRead(d, meta)
d.SetId(aws.StringValue(resp.ConnectionId))
return resourceAwsDxConnectionUpdate(d, meta)
}
func resourceAwsDxConnectionRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).dxconn
connectionId := d.Id()
input := &directconnect.DescribeConnectionsInput{
ConnectionId: aws.String(connectionId),
}
resp, err := conn.DescribeConnections(input)
resp, err := conn.DescribeConnections(&directconnect.DescribeConnectionsInput{
ConnectionId: aws.String(d.Id()),
})
if err != nil {
if isNoSuchDxConnectionErr(err) {
log.Printf("[WARN] Direct Connect connection (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
return err
}
if len(resp.Connections) < 1 {
log.Printf("[WARN] Direct Connect connection (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
if len(resp.Connections) != 1 {
return fmt.Errorf("[ERROR] Number of DX Connection (%s) isn't one, got %d", connectionId, len(resp.Connections))
return fmt.Errorf("[ERROR] Number of Direct Connect connections (%s) isn't one, got %d", d.Id(), len(resp.Connections))
}
if d.Id() != *resp.Connections[0].ConnectionId {
return fmt.Errorf("[ERROR] DX Connection (%s) not found", connectionId)
connection := resp.Connections[0]
if d.Id() != aws.StringValue(connection.ConnectionId) {
return fmt.Errorf("[ERROR] Direct Connect connection (%s) not found", d.Id())
}
if aws.StringValue(connection.ConnectionState) == directconnect.ConnectionStateDeleted {
log.Printf("[WARN] Direct Connect connection (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
arn := arn.ARN{
Partition: meta.(*AWSClient).partition,
Region: meta.(*AWSClient).region,
Service: "directconnect",
AccountID: meta.(*AWSClient).accountid,
Resource: fmt.Sprintf("dxcon/%s", d.Id()),
}.String()
d.Set("arn", arn)
d.Set("name", connection.ConnectionName)
d.Set("bandwidth", connection.Bandwidth)
d.Set("location", connection.Location)
if err := getTagsDX(conn, d, arn); err != nil {
return err
}
return nil
}
func resourceAwsDxConnectionUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).dxconn
arn := arn.ARN{
Partition: meta.(*AWSClient).partition,
Region: meta.(*AWSClient).region,
Service: "directconnect",
AccountID: meta.(*AWSClient).accountid,
Resource: fmt.Sprintf("dxcon/%s", d.Id()),
}.String()
if err := setTagsDX(conn, d, arn); err != nil {
return err
}
return resourceAwsDxConnectionRead(d, meta)
}
func resourceAwsDxConnectionDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).dxconn
input := &directconnect.DeleteConnectionInput{
log.Printf("[DEBUG] Deleting Direct Connect connection: %s", d.Id())
_, err := conn.DeleteConnection(&directconnect.DeleteConnectionInput{
ConnectionId: aws.String(d.Id()),
}
_, err := conn.DeleteConnection(input)
})
if err != nil {
if isNoSuchDxConnectionErr(err) {
return nil
}
return err
}
deleteStateConf := &resource.StateChangeConf{
Pending: []string{directconnect.ConnectionStatePending, directconnect.ConnectionStateOrdering, directconnect.ConnectionStateAvailable, directconnect.ConnectionStateRequested, directconnect.ConnectionStateDeleting},
Target: []string{directconnect.ConnectionStateDeleted},
@ -97,9 +160,9 @@ func resourceAwsDxConnectionDelete(d *schema.ResourceData, meta interface{}) err
}
_, err = deleteStateConf.WaitForState()
if err != nil {
return fmt.Errorf("Error waiting for Dx Connection (%s) to be deleted: %s", d.Id(), err)
return fmt.Errorf("Error waiting for Direct Connect connection (%s) to be deleted: %s", d.Id(), err)
}
d.SetId("")
return nil
}
@ -112,6 +175,13 @@ func dxConnectionRefreshStateFunc(conn *directconnect.DirectConnect, connId stri
if err != nil {
return nil, "failed", err
}
if len(resp.Connections) < 1 {
return resp, directconnect.ConnectionStateDeleted, nil
}
return resp, *resp.Connections[0].ConnectionState, nil
}
}
func isNoSuchDxConnectionErr(err error) bool {
return isAWSErr(err, "DirectConnectClientException", "Could not find Connection with ID")
}

View File

@ -2,9 +2,11 @@ package aws
import (
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/service/directconnect"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
@ -16,33 +18,44 @@ func resourceAwsDxLag() *schema.Resource {
Read: resourceAwsDxLagRead,
Update: resourceAwsDxLagUpdate,
Delete: resourceAwsDxLagDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
"arn": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Required: true,
},
"connections_bandwidth": &schema.Schema{
"connections_bandwidth": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateDxConnectionBandWidth,
},
"location": &schema.Schema{
"location": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"number_of_connections": &schema.Schema{
"number_of_connections": {
Type: schema.TypeInt,
Required: true,
Optional: true,
Computed: true,
ForceNew: true,
Deprecated: "Use aws_dx_connection and aws_dx_connection_association resources instead. " +
"Default connections will be removed as part of LAG creation automatically in future versions.",
},
"force_destroy": &schema.Schema{
"force_destroy": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"tags": tagsSchema(),
},
}
}
@ -50,89 +63,164 @@ func resourceAwsDxLag() *schema.Resource {
func resourceAwsDxLagCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).dxconn
input := &directconnect.CreateLagInput{
var noOfConnections int
if v, ok := d.GetOk("number_of_connections"); ok {
noOfConnections = v.(int)
} else {
noOfConnections = 1
}
req := &directconnect.CreateLagInput{
ConnectionsBandwidth: aws.String(d.Get("connections_bandwidth").(string)),
LagName: aws.String(d.Get("name").(string)),
Location: aws.String(d.Get("location").(string)),
NumberOfConnections: aws.Int64(int64(d.Get("number_of_connections").(int))),
NumberOfConnections: aws.Int64(int64(noOfConnections)),
}
resp, err := conn.CreateLag(input)
log.Printf("[DEBUG] Creating Direct Connect LAG: %#v", req)
resp, err := conn.CreateLag(req)
if err != nil {
return err
}
d.SetId(*resp.LagId)
return resourceAwsDxLagRead(d, meta)
// TODO: Remove default connection(s) automatically provisioned by AWS
// per NumberOfConnections
d.SetId(aws.StringValue(resp.LagId))
return resourceAwsDxLagUpdate(d, meta)
}
func resourceAwsDxLagRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).dxconn
lagId := d.Id()
input := &directconnect.DescribeLagsInput{
LagId: aws.String(lagId),
}
resp, err := conn.DescribeLags(input)
resp, err := conn.DescribeLags(&directconnect.DescribeLagsInput{
LagId: aws.String(d.Id()),
})
if err != nil {
if isNoSuchDxLagErr(err) {
log.Printf("[WARN] Direct Connect LAG (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
return err
}
if len(resp.Lags) < 1 {
log.Printf("[WARN] Direct Connect LAG (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
if len(resp.Lags) != 1 {
return fmt.Errorf("[ERROR] Number of DX Lag (%s) isn't one, got %d", lagId, len(resp.Lags))
return fmt.Errorf("[ERROR] Number of Direct Connect LAGs (%s) isn't one, got %d", d.Id(), len(resp.Lags))
}
if d.Id() != *resp.Lags[0].LagId {
return fmt.Errorf("[ERROR] DX Lag (%s) not found", lagId)
lag := resp.Lags[0]
if d.Id() != aws.StringValue(lag.LagId) {
return fmt.Errorf("[ERROR] Direct Connect LAG (%s) not found", d.Id())
}
if aws.StringValue(lag.LagState) == directconnect.LagStateDeleted {
log.Printf("[WARN] Direct Connect LAG (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
arn := arn.ARN{
Partition: meta.(*AWSClient).partition,
Region: meta.(*AWSClient).region,
Service: "directconnect",
AccountID: meta.(*AWSClient).accountid,
Resource: fmt.Sprintf("dxlag/%s", d.Id()),
}.String()
d.Set("arn", arn)
d.Set("name", lag.LagName)
d.Set("connections_bandwidth", lag.ConnectionsBandwidth)
d.Set("location", lag.Location)
if err := getTagsDX(conn, d, arn); err != nil {
return err
}
return nil
}
func resourceAwsDxLagUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).dxconn
input := &directconnect.UpdateLagInput{
LagId: aws.String(d.Id()),
}
d.Partial(true)
if d.HasChange("name") {
input.LagName = aws.String(d.Get("name").(string))
req := &directconnect.UpdateLagInput{
LagId: aws.String(d.Id()),
LagName: aws.String(d.Get("name").(string)),
}
log.Printf("[DEBUG] Updating Direct Connect LAG: %#v", req)
_, err := conn.UpdateLag(req)
if err != nil {
return err
} else {
d.SetPartial("name")
}
}
_, err := conn.UpdateLag(input)
if err != nil {
arn := arn.ARN{
Partition: meta.(*AWSClient).partition,
Region: meta.(*AWSClient).region,
Service: "directconnect",
AccountID: meta.(*AWSClient).accountid,
Resource: fmt.Sprintf("dxlag/%s", d.Id()),
}.String()
if err := setTagsDX(conn, d, arn); err != nil {
return err
} else {
d.SetPartial("tags")
}
return nil
d.Partial(false)
return resourceAwsDxLagRead(d, meta)
}
func resourceAwsDxLagDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).dxconn
if d.Get("force_destroy").(bool) {
input := &directconnect.DescribeLagsInput{
resp, err := conn.DescribeLags(&directconnect.DescribeLagsInput{
LagId: aws.String(d.Id()),
}
resp, err := conn.DescribeLags(input)
})
if err != nil {
if isNoSuchDxLagErr(err) {
return nil
}
return err
}
if len(resp.Lags) < 1 {
return nil
}
lag := resp.Lags[0]
for _, v := range lag.Connections {
dcinput := &directconnect.DeleteConnectionInput{
log.Printf("[DEBUG] Deleting Direct Connect connection: %s", aws.StringValue(v.ConnectionId))
_, err := conn.DeleteConnection(&directconnect.DeleteConnectionInput{
ConnectionId: v.ConnectionId,
}
if _, err := conn.DeleteConnection(dcinput); err != nil {
})
if err != nil && !isNoSuchDxConnectionErr(err) {
return err
}
}
}
input := &directconnect.DeleteLagInput{
log.Printf("[DEBUG] Deleting Direct Connect LAG: %s", d.Id())
_, err := conn.DeleteLag(&directconnect.DeleteLagInput{
LagId: aws.String(d.Id()),
}
_, err := conn.DeleteLag(input)
})
if err != nil {
if isNoSuchDxLagErr(err) {
return nil
}
return err
}
deleteStateConf := &resource.StateChangeConf{
Pending: []string{directconnect.LagStateAvailable, directconnect.LagStateRequested, directconnect.LagStatePending, directconnect.LagStateDeleting},
Target: []string{directconnect.LagStateDeleted},
@ -143,9 +231,9 @@ func resourceAwsDxLagDelete(d *schema.ResourceData, meta interface{}) error {
}
_, err = deleteStateConf.WaitForState()
if err != nil {
return fmt.Errorf("Error waiting for Dx Lag (%s) to be deleted: %s", d.Id(), err)
return fmt.Errorf("Error waiting for Direct Connect LAG (%s) to be deleted: %s", d.Id(), err)
}
d.SetId("")
return nil
}
@ -158,6 +246,13 @@ func dxLagRefreshStateFunc(conn *directconnect.DirectConnect, lagId string) reso
if err != nil {
return nil, "failed", err
}
if len(resp.Lags) < 1 {
return resp, directconnect.LagStateDeleted, nil
}
return resp, *resp.Lags[0].LagState, nil
}
}
func isNoSuchDxLagErr(err error) bool {
return isAWSErr(err, "DirectConnectClientException", "Could not find Lag with ID")
}

View File

@ -440,9 +440,7 @@ func resourceAwsDynamoDbTableDelete(d *schema.ResourceData, meta interface{}) er
log.Printf("[DEBUG] DynamoDB delete table: %s", d.Id())
_, err := conn.DeleteTable(&dynamodb.DeleteTableInput{
TableName: aws.String(d.Id()),
})
err := deleteAwsDynamoDbTable(d.Id(), conn)
if err != nil {
if isAWSErr(err, dynamodb.ErrCodeResourceNotFoundException, "Requested resource not found: Table: ") {
return nil
@ -477,6 +475,35 @@ func resourceAwsDynamoDbTableDelete(d *schema.ResourceData, meta interface{}) er
return err
}
func deleteAwsDynamoDbTable(tableName string, conn *dynamodb.DynamoDB) error {
input := &dynamodb.DeleteTableInput{
TableName: aws.String(tableName),
}
return resource.Retry(1*time.Minute, func() *resource.RetryError {
_, err := conn.DeleteTable(input)
if err != nil {
// Subscriber limit exceeded: Only 10 tables can be created, updated, or deleted simultaneously
if isAWSErr(err, dynamodb.ErrCodeLimitExceededException, "simultaneously") {
return resource.RetryableError(err)
}
// This handles multiple scenarios in the DynamoDB API:
// 1. Updating a table immediately before deletion may return:
// ResourceInUseException: Attempt to change a resource which is still in use: Table is being updated:
// 2. Removing a table from a DynamoDB global table may return:
// ResourceInUseException: Attempt to change a resource which is still in use: Table is being deleted:
if isAWSErr(err, dynamodb.ErrCodeResourceInUseException, "") {
return resource.RetryableError(err)
}
if isAWSErr(err, dynamodb.ErrCodeResourceNotFoundException, "Requested resource not found: Table: ") {
return resource.NonRetryableError(err)
}
return resource.NonRetryableError(err)
}
return nil
})
}
func updateDynamoDbTimeToLive(d *schema.ResourceData, conn *dynamodb.DynamoDB) error {
toBeEnabled := false
attributeName := ""

View File

@ -356,6 +356,16 @@ func resourceAwsEcsServiceRead(d *schema.ResourceData, meta interface{}) error {
}
return resource.NonRetryableError(err)
}
if d.IsNewResource() && len(out.Services) < 1 {
return resource.RetryableError(fmt.Errorf("No ECS service found: %q", d.Id()))
}
service := out.Services[0]
if d.IsNewResource() && *service.Status == "INACTIVE" {
return resource.RetryableError(fmt.Errorf("ECS service currently INACTIVE: %q", d.Id()))
}
return nil
})
if err != nil {
@ -363,7 +373,7 @@ func resourceAwsEcsServiceRead(d *schema.ResourceData, meta interface{}) error {
}
if len(out.Services) < 1 {
log.Printf("[DEBUG] Removing ECS service %s (%s) because it's gone", d.Get("name").(string), d.Id())
log.Printf("[WARN] Removing ECS service %s (%s) because it's gone", d.Get("name").(string), d.Id())
d.SetId("")
return nil
}
@ -372,7 +382,7 @@ func resourceAwsEcsServiceRead(d *schema.ResourceData, meta interface{}) error {
// Status==INACTIVE means deleted service
if *service.Status == "INACTIVE" {
log.Printf("[DEBUG] Removing ECS service %q because it's INACTIVE", *service.ServiceArn)
log.Printf("[WARN] Removing ECS service %q because it's INACTIVE", *service.ServiceArn)
d.SetId("")
return nil
}
@ -418,7 +428,7 @@ func resourceAwsEcsServiceRead(d *schema.ResourceData, meta interface{}) error {
}
if service.LoadBalancers != nil {
d.Set("load_balancers", flattenEcsLoadBalancers(service.LoadBalancers))
d.Set("load_balancer", flattenEcsLoadBalancers(service.LoadBalancers))
}
if err := d.Set("placement_strategy", flattenPlacementStrategy(service.PlacementStrategy)); err != nil {

View File

@ -405,6 +405,9 @@ func resourceAwsElasticSearchDomainCreate(d *schema.ResourceData, meta interface
if isAWSErr(err, "ValidationException", "enable a service-linked role to give Amazon ES permissions") {
return resource.RetryableError(err)
}
if isAWSErr(err, "ValidationException", "Domain is still being deleted") {
return resource.RetryableError(err)
}
return resource.NonRetryableError(err)
}

View File

@ -15,6 +15,8 @@ import (
"github.com/aws/aws-sdk-go/service/emr"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/structure"
"github.com/hashicorp/terraform/helper/validation"
)
func resourceAwsEMRCluster() *schema.Resource {
@ -173,6 +175,16 @@ func resourceAwsEMRCluster() *schema.Resource {
Optional: true,
Default: 0,
},
"autoscaling_policy": {
Type: schema.TypeString,
Optional: true,
DiffSuppressFunc: suppressEquivalentJsonDiffs,
ValidateFunc: validateJsonString,
StateFunc: func(v interface{}) string {
jsonString, _ := structure.NormalizeJsonString(v)
return jsonString
},
},
"instance_role": {
Type: schema.TypeString,
Required: true,
@ -225,6 +237,16 @@ func resourceAwsEMRCluster() *schema.Resource {
ForceNew: true,
Required: true,
},
"scale_down_behavior": {
Type: schema.TypeString,
ForceNew: true,
Optional: true,
Computed: true,
ValidateFunc: validation.StringInSlice([]string{
emr.ScaleDownBehaviorTerminateAtInstanceHour,
emr.ScaleDownBehaviorTerminateAtTaskCompletion,
}, false),
},
"security_configuration": {
Type: schema.TypeString,
ForceNew: true,
@ -352,10 +374,15 @@ func resourceAwsEMRClusterCreate(d *schema.ResourceData, meta interface{}) error
if v, ok := d.GetOk("log_uri"); ok {
params.LogUri = aws.String(v.(string))
}
if v, ok := d.GetOk("autoscaling_role"); ok {
params.AutoScalingRole = aws.String(v.(string))
}
if v, ok := d.GetOk("scale_down_behavior"); ok {
params.ScaleDownBehavior = aws.String(v.(string))
}
if v, ok := d.GetOk("security_configuration"); ok {
params.SecurityConfiguration = aws.String(v.(string))
}
@ -475,6 +502,7 @@ func resourceAwsEMRClusterRead(d *schema.ResourceData, meta interface{}) error {
}
d.Set("name", cluster.Name)
d.Set("service_role", cluster.ServiceRole)
d.Set("security_configuration", cluster.SecurityConfiguration)
d.Set("autoscaling_role", cluster.AutoScalingRole)
@ -484,6 +512,7 @@ func resourceAwsEMRClusterRead(d *schema.ResourceData, meta interface{}) error {
d.Set("visible_to_all_users", cluster.VisibleToAllUsers)
d.Set("tags", tagsToMapEMR(cluster.Tags))
d.Set("ebs_root_volume_size", cluster.EbsRootVolumeSize)
d.Set("scale_down_behavior", cluster.ScaleDownBehavior)
if cluster.CustomAmiId != nil {
d.Set("custom_ami_id", cluster.CustomAmiId)
@ -750,6 +779,13 @@ func flattenInstanceGroups(igs []*emr.InstanceGroup) []map[string]interface{} {
attrs["instance_count"] = *ig.RequestedInstanceCount
attrs["instance_role"] = *ig.InstanceGroupType
attrs["instance_type"] = *ig.InstanceType
if ig.AutoScalingPolicy != nil {
attrs["autoscaling_policy"] = *ig.AutoScalingPolicy
} else {
attrs["autoscaling_policy"] = ""
}
attrs["name"] = *ig.Name
result = append(result, attrs)
}
@ -897,7 +933,7 @@ func expandBootstrapActions(bootstrapActions []interface{}) []*emr.BootstrapActi
}
func expandInstanceGroupConfigs(instanceGroupConfigs []interface{}) []*emr.InstanceGroupConfig {
configsOut := []*emr.InstanceGroupConfig{}
instanceGroupConfig := []*emr.InstanceGroupConfig{}
for _, raw := range instanceGroupConfigs {
configAttributes := raw.(map[string]interface{})
@ -912,42 +948,68 @@ func expandInstanceGroupConfigs(instanceGroupConfigs []interface{}) []*emr.Insta
InstanceCount: aws.Int64(int64(configInstanceCount)),
}
if bidPrice, ok := configAttributes["bid_price"]; ok {
if bidPrice != "" {
config.BidPrice = aws.String(bidPrice.(string))
config.Market = aws.String("SPOT")
} else {
config.Market = aws.String("ON_DEMAND")
}
}
applyBidPrice(config, configAttributes)
applyEbsConfig(configAttributes, config)
applyAutoScalingPolicy(configAttributes, config)
if rawEbsConfigs, ok := configAttributes["ebs_config"]; ok {
ebsConfig := &emr.EbsConfiguration{}
ebsBlockDeviceConfigs := make([]*emr.EbsBlockDeviceConfig, 0)
for _, rawEbsConfig := range rawEbsConfigs.(*schema.Set).List() {
rawEbsConfig := rawEbsConfig.(map[string]interface{})
ebsBlockDeviceConfig := &emr.EbsBlockDeviceConfig{
VolumesPerInstance: aws.Int64(int64(rawEbsConfig["volumes_per_instance"].(int))),
VolumeSpecification: &emr.VolumeSpecification{
SizeInGB: aws.Int64(int64(rawEbsConfig["size"].(int))),
VolumeType: aws.String(rawEbsConfig["type"].(string)),
},
}
if v, ok := rawEbsConfig["iops"].(int); ok && v != 0 {
ebsBlockDeviceConfig.VolumeSpecification.Iops = aws.Int64(int64(v))
}
ebsBlockDeviceConfigs = append(ebsBlockDeviceConfigs, ebsBlockDeviceConfig)
}
ebsConfig.EbsBlockDeviceConfigs = ebsBlockDeviceConfigs
config.EbsConfiguration = ebsConfig
}
configsOut = append(configsOut, config)
instanceGroupConfig = append(instanceGroupConfig, config)
}
return configsOut
return instanceGroupConfig
}
func applyBidPrice(config *emr.InstanceGroupConfig, configAttributes map[string]interface{}) {
if bidPrice, ok := configAttributes["bid_price"]; ok {
if bidPrice != "" {
config.BidPrice = aws.String(bidPrice.(string))
config.Market = aws.String("SPOT")
} else {
config.Market = aws.String("ON_DEMAND")
}
}
}
func applyEbsConfig(configAttributes map[string]interface{}, config *emr.InstanceGroupConfig) {
if rawEbsConfigs, ok := configAttributes["ebs_config"]; ok {
ebsConfig := &emr.EbsConfiguration{}
ebsBlockDeviceConfigs := make([]*emr.EbsBlockDeviceConfig, 0)
for _, rawEbsConfig := range rawEbsConfigs.(*schema.Set).List() {
rawEbsConfig := rawEbsConfig.(map[string]interface{})
ebsBlockDeviceConfig := &emr.EbsBlockDeviceConfig{
VolumesPerInstance: aws.Int64(int64(rawEbsConfig["volumes_per_instance"].(int))),
VolumeSpecification: &emr.VolumeSpecification{
SizeInGB: aws.Int64(int64(rawEbsConfig["size"].(int))),
VolumeType: aws.String(rawEbsConfig["type"].(string)),
},
}
if v, ok := rawEbsConfig["iops"].(int); ok && v != 0 {
ebsBlockDeviceConfig.VolumeSpecification.Iops = aws.Int64(int64(v))
}
ebsBlockDeviceConfigs = append(ebsBlockDeviceConfigs, ebsBlockDeviceConfig)
}
ebsConfig.EbsBlockDeviceConfigs = ebsBlockDeviceConfigs
config.EbsConfiguration = ebsConfig
}
}
func applyAutoScalingPolicy(configAttributes map[string]interface{}, config *emr.InstanceGroupConfig) {
if rawAutoScalingPolicy, ok := configAttributes["autoscaling_policy"]; ok {
autoScalingConfig, _ := expandAutoScalingPolicy(rawAutoScalingPolicy.(string))
config.AutoScalingPolicy = autoScalingConfig
}
}
func expandAutoScalingPolicy(rawDefinitions string) (*emr.AutoScalingPolicy, error) {
var policy *emr.AutoScalingPolicy
err := json.Unmarshal([]byte(rawDefinitions), &policy)
if err != nil {
return nil, fmt.Errorf("Error decoding JSON: %s", err)
}
return policy, nil
}
func expandConfigures(input string) []*emr.Configuration {

View File

@ -0,0 +1,181 @@
package aws
import (
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/gamelift"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
)
func resourceAwsGameliftAlias() *schema.Resource {
return &schema.Resource{
Create: resourceAwsGameliftAliasCreate,
Read: resourceAwsGameliftAliasRead,
Update: resourceAwsGameliftAliasUpdate,
Delete: resourceAwsGameliftAliasDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringLenBetween(1, 1024),
},
"description": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringLenBetween(1, 1024),
},
"routing_strategy": {
Type: schema.TypeList,
Required: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"fleet_id": {
Type: schema.TypeString,
Optional: true,
},
"message": {
Type: schema.TypeString,
Optional: true,
},
"type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{
gamelift.RoutingStrategyTypeSimple,
gamelift.RoutingStrategyTypeTerminal,
}, false),
},
},
},
},
"arn": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceAwsGameliftAliasCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).gameliftconn
rs := expandGameliftRoutingStrategy(d.Get("routing_strategy").([]interface{}))
input := gamelift.CreateAliasInput{
Name: aws.String(d.Get("name").(string)),
RoutingStrategy: rs,
}
if v, ok := d.GetOk("description"); ok {
input.Description = aws.String(v.(string))
}
log.Printf("[INFO] Creating Gamelift Alias: %s", input)
out, err := conn.CreateAlias(&input)
if err != nil {
return err
}
d.SetId(*out.Alias.AliasId)
return resourceAwsGameliftAliasRead(d, meta)
}
func resourceAwsGameliftAliasRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).gameliftconn
log.Printf("[INFO] Describing Gamelift Alias: %s", d.Id())
out, err := conn.DescribeAlias(&gamelift.DescribeAliasInput{
AliasId: aws.String(d.Id()),
})
if err != nil {
if isAWSErr(err, gamelift.ErrCodeNotFoundException, "") {
d.SetId("")
log.Printf("[WARN] Gamelift Alias (%s) not found, removing from state", d.Id())
return nil
}
return err
}
a := out.Alias
d.Set("arn", a.AliasArn)
d.Set("description", a.Description)
d.Set("name", a.Name)
d.Set("routing_strategy", flattenGameliftRoutingStrategy(a.RoutingStrategy))
return nil
}
func resourceAwsGameliftAliasUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).gameliftconn
log.Printf("[INFO] Updating Gamelift Alias: %s", d.Id())
_, err := conn.UpdateAlias(&gamelift.UpdateAliasInput{
AliasId: aws.String(d.Id()),
Name: aws.String(d.Get("name").(string)),
Description: aws.String(d.Get("description").(string)),
RoutingStrategy: expandGameliftRoutingStrategy(d.Get("routing_strategy").([]interface{})),
})
if err != nil {
return err
}
return resourceAwsGameliftAliasRead(d, meta)
}
func resourceAwsGameliftAliasDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).gameliftconn
log.Printf("[INFO] Deleting Gamelift Alias: %s", d.Id())
_, err := conn.DeleteAlias(&gamelift.DeleteAliasInput{
AliasId: aws.String(d.Id()),
})
if err != nil {
return err
}
return nil
}
func expandGameliftRoutingStrategy(cfg []interface{}) *gamelift.RoutingStrategy {
if len(cfg) < 1 {
return nil
}
strategy := cfg[0].(map[string]interface{})
out := gamelift.RoutingStrategy{
Type: aws.String(strategy["type"].(string)),
}
if v, ok := strategy["fleet_id"].(string); ok && len(v) > 0 {
out.FleetId = aws.String(v)
}
if v, ok := strategy["message"].(string); ok && len(v) > 0 {
out.Message = aws.String(v)
}
return &out
}
func flattenGameliftRoutingStrategy(rs *gamelift.RoutingStrategy) []interface{} {
if rs == nil {
return []interface{}{}
}
m := make(map[string]interface{}, 0)
if rs.FleetId != nil {
m["fleet_id"] = *rs.FleetId
}
if rs.Message != nil {
m["message"] = *rs.Message
}
m["type"] = *rs.Type
return []interface{}{m}
}

View File

@ -0,0 +1,604 @@
package aws
import (
"fmt"
"log"
"reflect"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/gamelift"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
)
func resourceAwsGameliftFleet() *schema.Resource {
return &schema.Resource{
Create: resourceAwsGameliftFleetCreate,
Read: resourceAwsGameliftFleetRead,
Update: resourceAwsGameliftFleetUpdate,
Delete: resourceAwsGameliftFleetDelete,
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(70 * time.Minute),
Delete: schema.DefaultTimeout(5 * time.Minute),
},
Schema: map[string]*schema.Schema{
"arn": {
Type: schema.TypeString,
Computed: true,
},
"build_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"ec2_instance_type": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"name": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringLenBetween(1, 1024),
},
"description": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringLenBetween(1, 1024),
},
"ec2_inbound_permission": {
Type: schema.TypeList,
Optional: true,
MaxItems: 50,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"from_port": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntBetween(1, 60000),
},
"ip_range": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validateCIDRNetworkAddress,
},
"protocol": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{
gamelift.IpProtocolTcp,
gamelift.IpProtocolUdp,
}, false),
},
"to_port": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntBetween(1, 60000),
},
},
},
},
"log_paths": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"metric_groups": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validation.StringLenBetween(1, 255),
},
},
"new_game_session_protection_policy": {
Type: schema.TypeString,
Optional: true,
Default: gamelift.ProtectionPolicyNoProtection,
ValidateFunc: validation.StringInSlice([]string{
gamelift.ProtectionPolicyNoProtection,
gamelift.ProtectionPolicyFullProtection,
}, false),
},
"operating_system": {
Type: schema.TypeString,
Computed: true,
},
"resource_creation_limit_policy": {
Type: schema.TypeList,
MaxItems: 1,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"new_game_sessions_per_creator": {
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntAtLeast(0),
},
"policy_period_in_minutes": {
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntAtLeast(0),
},
},
},
},
"runtime_configuration": {
Type: schema.TypeList,
MaxItems: 1,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"game_session_activation_timeout_seconds": {
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntBetween(1, 600),
},
"max_concurrent_game_session_activations": {
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntBetween(1, 2147483647),
},
"server_process": {
Type: schema.TypeList,
Optional: true,
MaxItems: 50,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"concurrent_executions": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(1),
},
"launch_path": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringLenBetween(1, 1024),
},
"parameters": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringLenBetween(1, 1024),
},
},
},
},
},
},
},
},
}
}
func resourceAwsGameliftFleetCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).gameliftconn
input := gamelift.CreateFleetInput{
BuildId: aws.String(d.Get("build_id").(string)),
EC2InstanceType: aws.String(d.Get("ec2_instance_type").(string)),
Name: aws.String(d.Get("name").(string)),
}
if v, ok := d.GetOk("description"); ok {
input.Description = aws.String(v.(string))
}
if v, ok := d.GetOk("ec2_inbound_permission"); ok {
input.EC2InboundPermissions = expandGameliftIpPermissions(v.([]interface{}))
}
if v, ok := d.GetOk("metric_groups"); ok {
input.MetricGroups = expandStringList(v.([]interface{}))
}
if v, ok := d.GetOk("new_game_session_protection_policy"); ok {
input.NewGameSessionProtectionPolicy = aws.String(v.(string))
}
if v, ok := d.GetOk("resource_creation_limit_policy"); ok {
input.ResourceCreationLimitPolicy = expandGameliftResourceCreationLimitPolicy(v.([]interface{}))
}
if v, ok := d.GetOk("runtime_configuration"); ok {
input.RuntimeConfiguration = expandGameliftRuntimeConfiguration(v.([]interface{}))
}
log.Printf("[INFO] Creating Gamelift Fleet: %s", input)
out, err := conn.CreateFleet(&input)
if err != nil {
return err
}
d.SetId(*out.FleetAttributes.FleetId)
stateConf := &resource.StateChangeConf{
Pending: []string{
gamelift.FleetStatusActivating,
gamelift.FleetStatusBuilding,
gamelift.FleetStatusDownloading,
gamelift.FleetStatusNew,
gamelift.FleetStatusValidating,
},
Target: []string{gamelift.FleetStatusActive},
Timeout: d.Timeout(schema.TimeoutCreate),
Refresh: func() (interface{}, string, error) {
out, err := conn.DescribeFleetAttributes(&gamelift.DescribeFleetAttributesInput{
FleetIds: aws.StringSlice([]string{d.Id()}),
})
if err != nil {
return 42, "", err
}
attributes := out.FleetAttributes
if len(attributes) < 1 {
return nil, "", nil
}
if len(attributes) != 1 {
return 42, "", fmt.Errorf("Expected exactly 1 Gamelift fleet, found %d under %q",
len(attributes), d.Id())
}
fleet := attributes[0]
return fleet, *fleet.Status, nil
},
}
_, err = stateConf.WaitForState()
if err != nil {
events, fErr := getGameliftFleetFailures(conn, d.Id())
if fErr != nil {
log.Printf("[WARN] Failed to poll fleet failures: %s", fErr)
}
if len(events) > 0 {
return fmt.Errorf("%s Recent failures:\n%+v", err, events)
}
return err
}
return resourceAwsGameliftFleetRead(d, meta)
}
func resourceAwsGameliftFleetRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).gameliftconn
log.Printf("[INFO] Describing Gamelift Fleet: %s", d.Id())
out, err := conn.DescribeFleetAttributes(&gamelift.DescribeFleetAttributesInput{
FleetIds: aws.StringSlice([]string{d.Id()}),
})
if err != nil {
return err
}
attributes := out.FleetAttributes
if len(attributes) < 1 {
log.Printf("[WARN] Gamelift Fleet (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
if len(attributes) != 1 {
return fmt.Errorf("Expected exactly 1 Gamelift fleet, found %d under %q",
len(attributes), d.Id())
}
fleet := attributes[0]
d.Set("build_id", fleet.BuildId)
d.Set("description", fleet.Description)
d.Set("arn", fleet.FleetArn)
d.Set("log_paths", aws.StringValueSlice(fleet.LogPaths))
d.Set("metric_groups", flattenStringList(fleet.MetricGroups))
d.Set("name", fleet.Name)
d.Set("new_game_session_protection_policy", fleet.NewGameSessionProtectionPolicy)
d.Set("operating_system", fleet.OperatingSystem)
d.Set("resource_creation_limit_policy", flattenGameliftResourceCreationLimitPolicy(fleet.ResourceCreationLimitPolicy))
return nil
}
func resourceAwsGameliftFleetUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).gameliftconn
log.Printf("[INFO] Updating Gamelift Fleet: %s", d.Id())
if d.HasChange("description") || d.HasChange("metric_groups") || d.HasChange("name") ||
d.HasChange("new_game_session_protection_policy") || d.HasChange("resource_creation_limit_policy") {
_, err := conn.UpdateFleetAttributes(&gamelift.UpdateFleetAttributesInput{
Description: aws.String(d.Get("description").(string)),
FleetId: aws.String(d.Id()),
MetricGroups: expandStringList(d.Get("metric_groups").([]interface{})),
Name: aws.String(d.Get("name").(string)),
NewGameSessionProtectionPolicy: aws.String(d.Get("new_game_session_protection_policy").(string)),
ResourceCreationLimitPolicy: expandGameliftResourceCreationLimitPolicy(d.Get("resource_creation_limit_policy").([]interface{})),
})
if err != nil {
return err
}
}
if d.HasChange("ec2_inbound_permission") {
oldPerms, newPerms := d.GetChange("ec2_inbound_permission")
authorizations, revocations := diffGameliftPortSettings(oldPerms.([]interface{}), newPerms.([]interface{}))
_, err := conn.UpdateFleetPortSettings(&gamelift.UpdateFleetPortSettingsInput{
FleetId: aws.String(d.Id()),
InboundPermissionAuthorizations: authorizations,
InboundPermissionRevocations: revocations,
})
if err != nil {
return err
}
}
if d.HasChange("runtime_configuration") {
_, err := conn.UpdateRuntimeConfiguration(&gamelift.UpdateRuntimeConfigurationInput{
FleetId: aws.String(d.Id()),
RuntimeConfiguration: expandGameliftRuntimeConfiguration(d.Get("runtime_configuration").([]interface{})),
})
if err != nil {
return err
}
}
return resourceAwsGameliftFleetRead(d, meta)
}
func resourceAwsGameliftFleetDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).gameliftconn
log.Printf("[INFO] Deleting Gamelift Fleet: %s", d.Id())
// It can take ~ 1 hr as Gamelift will keep retrying on errors like
// invalid launch path and remain in state when it can't be deleted :/
err := resource.Retry(60*time.Minute, func() *resource.RetryError {
_, err := conn.DeleteFleet(&gamelift.DeleteFleetInput{
FleetId: aws.String(d.Id()),
})
if err != nil {
msg := fmt.Sprintf("Cannot delete fleet %s that is in status of ", d.Id())
if isAWSErr(err, gamelift.ErrCodeInvalidRequestException, msg) {
return resource.RetryableError(err)
}
return resource.NonRetryableError(err)
}
return nil
})
if err != nil {
return err
}
return waitForGameliftFleetToBeDeleted(conn, d.Id(), d.Timeout(schema.TimeoutDelete))
}
func waitForGameliftFleetToBeDeleted(conn *gamelift.GameLift, id string, timeout time.Duration) error {
stateConf := resource.StateChangeConf{
Pending: []string{
gamelift.FleetStatusActive,
gamelift.FleetStatusDeleting,
gamelift.FleetStatusError,
gamelift.FleetStatusTerminated,
},
Target: []string{},
Timeout: timeout,
Refresh: func() (interface{}, string, error) {
out, err := conn.DescribeFleetAttributes(&gamelift.DescribeFleetAttributesInput{
FleetIds: aws.StringSlice([]string{id}),
})
if err != nil {
return 42, "", err
}
attributes := out.FleetAttributes
if len(attributes) < 1 {
return nil, "", nil
}
if len(attributes) != 1 {
return 42, "", fmt.Errorf("Expected exactly 1 Gamelift fleet, found %d under %q",
len(attributes), id)
}
fleet := attributes[0]
return fleet, *fleet.Status, nil
},
}
_, err := stateConf.WaitForState()
if err != nil {
events, fErr := getGameliftFleetFailures(conn, id)
if fErr != nil {
log.Printf("[WARN] Failed to poll fleet failures: %s", fErr)
}
if len(events) > 0 {
return fmt.Errorf("%s Recent failures:\n%+v", err, events)
}
}
return err
}
func expandGameliftIpPermissions(cfgs []interface{}) []*gamelift.IpPermission {
if len(cfgs) < 1 {
return []*gamelift.IpPermission{}
}
perms := make([]*gamelift.IpPermission, len(cfgs), len(cfgs))
for i, rawCfg := range cfgs {
cfg := rawCfg.(map[string]interface{})
perms[i] = expandGameliftIpPermission(cfg)
}
return perms
}
func expandGameliftIpPermission(cfg map[string]interface{}) *gamelift.IpPermission {
return &gamelift.IpPermission{
FromPort: aws.Int64(int64(cfg["from_port"].(int))),
IpRange: aws.String(cfg["ip_range"].(string)),
Protocol: aws.String(cfg["protocol"].(string)),
ToPort: aws.Int64(int64(cfg["to_port"].(int))),
}
}
func flattenGameliftIpPermissions(ipps []*gamelift.IpPermission) []interface{} {
perms := make([]interface{}, len(ipps), len(ipps))
for i, ipp := range ipps {
m := make(map[string]interface{}, 0)
m["from_port"] = *ipp.FromPort
m["ip_range"] = *ipp.IpRange
m["protocol"] = *ipp.Protocol
m["to_port"] = *ipp.ToPort
perms[i] = m
}
return perms
}
func expandGameliftResourceCreationLimitPolicy(cfg []interface{}) *gamelift.ResourceCreationLimitPolicy {
if len(cfg) < 1 {
return nil
}
out := gamelift.ResourceCreationLimitPolicy{}
m := cfg[0].(map[string]interface{})
if v, ok := m["new_game_sessions_per_creator"]; ok {
out.NewGameSessionsPerCreator = aws.Int64(int64(v.(int)))
}
if v, ok := m["policy_period_in_minutes"]; ok {
out.PolicyPeriodInMinutes = aws.Int64(int64(v.(int)))
}
return &out
}
func flattenGameliftResourceCreationLimitPolicy(policy *gamelift.ResourceCreationLimitPolicy) []interface{} {
if policy == nil {
return []interface{}{}
}
m := make(map[string]interface{}, 0)
m["new_game_sessions_per_creator"] = *policy.NewGameSessionsPerCreator
m["policy_period_in_minutes"] = *policy.PolicyPeriodInMinutes
return []interface{}{m}
}
func expandGameliftRuntimeConfiguration(cfg []interface{}) *gamelift.RuntimeConfiguration {
if len(cfg) < 1 {
return nil
}
out := gamelift.RuntimeConfiguration{}
m := cfg[0].(map[string]interface{})
if v, ok := m["game_session_activation_timeout_seconds"].(int); ok && v > 0 {
out.GameSessionActivationTimeoutSeconds = aws.Int64(int64(v))
}
if v, ok := m["max_concurrent_game_session_activations"].(int); ok && v > 0 {
out.MaxConcurrentGameSessionActivations = aws.Int64(int64(v))
}
if v, ok := m["server_process"]; ok {
out.ServerProcesses = expandGameliftServerProcesses(v.([]interface{}))
}
return &out
}
func expandGameliftServerProcesses(cfgs []interface{}) []*gamelift.ServerProcess {
if len(cfgs) < 1 {
return []*gamelift.ServerProcess{}
}
processes := make([]*gamelift.ServerProcess, len(cfgs), len(cfgs))
for i, rawCfg := range cfgs {
cfg := rawCfg.(map[string]interface{})
process := &gamelift.ServerProcess{
ConcurrentExecutions: aws.Int64(int64(cfg["concurrent_executions"].(int))),
LaunchPath: aws.String(cfg["launch_path"].(string)),
}
if v, ok := cfg["parameters"].(string); ok && len(v) > 0 {
process.Parameters = aws.String(v)
}
processes[i] = process
}
return processes
}
func getGameliftFleetFailures(conn *gamelift.GameLift, id string) ([]*gamelift.Event, error) {
var events []*gamelift.Event
err := _getGameliftFleetFailures(conn, id, nil, &events)
return events, err
}
func _getGameliftFleetFailures(conn *gamelift.GameLift, id string, nextToken *string, events *[]*gamelift.Event) error {
eOut, err := conn.DescribeFleetEvents(&gamelift.DescribeFleetEventsInput{
FleetId: aws.String(id),
NextToken: nextToken,
})
if err != nil {
return err
}
for _, e := range eOut.Events {
if isGameliftEventFailure(e) {
*events = append(*events, e)
}
}
if eOut.NextToken != nil {
err := _getGameliftFleetFailures(conn, id, nextToken, events)
if err != nil {
return err
}
}
return nil
}
func isGameliftEventFailure(event *gamelift.Event) bool {
failureCodes := []string{
gamelift.EventCodeFleetActivationFailed,
gamelift.EventCodeFleetActivationFailedNoInstances,
gamelift.EventCodeFleetBinaryDownloadFailed,
gamelift.EventCodeFleetInitializationFailed,
gamelift.EventCodeFleetStateError,
gamelift.EventCodeFleetValidationExecutableRuntimeFailure,
gamelift.EventCodeFleetValidationLaunchPathNotFound,
gamelift.EventCodeFleetValidationTimedOut,
gamelift.EventCodeFleetVpcPeeringFailed,
gamelift.EventCodeGameSessionActivationTimeout,
gamelift.EventCodeServerProcessCrashed,
gamelift.EventCodeServerProcessForceTerminated,
gamelift.EventCodeServerProcessInvalidPath,
gamelift.EventCodeServerProcessProcessExitTimeout,
gamelift.EventCodeServerProcessProcessReadyTimeout,
gamelift.EventCodeServerProcessSdkInitializationTimeout,
gamelift.EventCodeServerProcessTerminatedUnhealthy,
}
for _, fc := range failureCodes {
if *event.EventCode == fc {
return true
}
}
return false
}
func diffGameliftPortSettings(oldPerms, newPerms []interface{}) (a []*gamelift.IpPermission, r []*gamelift.IpPermission) {
OUTER:
for i, op := range oldPerms {
oldPerm := op.(map[string]interface{})
for j, np := range newPerms {
newPerm := np.(map[string]interface{})
// Remove permissions which have not changed so we're not wasting
// API calls for removal & subseq. addition of the same ones
if reflect.DeepEqual(oldPerm, newPerm) {
oldPerms = append(oldPerms[:i], oldPerms[i+1:]...)
newPerms = append(newPerms[:j], newPerms[j+1:]...)
continue OUTER
}
}
// Add what's left for revocation
r = append(r, expandGameliftIpPermission(oldPerm))
}
for _, np := range newPerms {
newPerm := np.(map[string]interface{})
// Add what's left for authorization
a = append(a, expandGameliftIpPermission(newPerm))
}
return
}

View File

@ -68,10 +68,9 @@ func resourceAwsIamUserSshKeyCreate(d *schema.ResourceData, meta interface{}) er
return fmt.Errorf("Error creating IAM User SSH Key %s: %s", username, err)
}
d.Set("ssh_public_key_id", createResp.SSHPublicKey.SSHPublicKeyId)
d.SetId(*createResp.SSHPublicKey.SSHPublicKeyId)
return resourceAwsIamUserSshKeyRead(d, meta)
return resourceAwsIamUserSshKeyUpdate(d, meta)
}
func resourceAwsIamUserSshKeyRead(d *schema.ResourceData, meta interface{}) error {
@ -95,7 +94,7 @@ func resourceAwsIamUserSshKeyRead(d *schema.ResourceData, meta interface{}) erro
d.Set("fingerprint", getResp.SSHPublicKey.Fingerprint)
d.Set("status", getResp.SSHPublicKey.Status)
d.Set("ssh_public_key_id", getResp.SSHPublicKey.SSHPublicKeyId)
return nil
}
@ -119,9 +118,8 @@ func resourceAwsIamUserSshKeyUpdate(d *schema.ResourceData, meta interface{}) er
}
return fmt.Errorf("Error updating IAM User SSH Key %s: %s", d.Id(), err)
}
return resourceAwsIamUserRead(d, meta)
}
return nil
return resourceAwsIamUserSshKeyRead(d, meta)
}
func resourceAwsIamUserSshKeyDelete(d *schema.ResourceData, meta interface{}) error {

View File

@ -35,7 +35,7 @@ func resourceAwsInstance() *schema.Resource {
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(10 * time.Minute),
Update: schema.DefaultTimeout(10 * time.Minute),
Delete: schema.DefaultTimeout(10 * time.Minute),
Delete: schema.DefaultTimeout(20 * time.Minute),
},
Schema: map[string]*schema.Schema{
@ -552,7 +552,7 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error {
stateConf := &resource.StateChangeConf{
Pending: []string{"pending"},
Target: []string{"running"},
Refresh: InstanceStateRefreshFunc(conn, *instance.InstanceId, "terminated"),
Refresh: InstanceStateRefreshFunc(conn, *instance.InstanceId, []string{"terminated", "shutting-down"}),
Timeout: d.Timeout(schema.TimeoutCreate),
Delay: 10 * time.Second,
MinTimeout: 3 * time.Second,
@ -914,7 +914,7 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
}
}
if d.HasChange("vpc_security_group_ids") {
if d.HasChange("vpc_security_group_ids") && !d.IsNewResource() {
var groups []*string
if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 {
for _, v := range v.List() {
@ -965,7 +965,7 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
stateConf := &resource.StateChangeConf{
Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"},
Target: []string{"stopped"},
Refresh: InstanceStateRefreshFunc(conn, d.Id(), ""),
Refresh: InstanceStateRefreshFunc(conn, d.Id(), []string{}),
Timeout: d.Timeout(schema.TimeoutUpdate),
Delay: 10 * time.Second,
MinTimeout: 3 * time.Second,
@ -996,7 +996,7 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
stateConf = &resource.StateChangeConf{
Pending: []string{"pending", "stopped"},
Target: []string{"running"},
Refresh: InstanceStateRefreshFunc(conn, d.Id(), "terminated"),
Refresh: InstanceStateRefreshFunc(conn, d.Id(), []string{"terminated"}),
Timeout: d.Timeout(schema.TimeoutUpdate),
Delay: 10 * time.Second,
MinTimeout: 3 * time.Second,
@ -1074,7 +1074,7 @@ func resourceAwsInstanceDelete(d *schema.ResourceData, meta interface{}) error {
// InstanceStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch
// an EC2 instance.
func InstanceStateRefreshFunc(conn *ec2.EC2, instanceID, failState string) resource.StateRefreshFunc {
func InstanceStateRefreshFunc(conn *ec2.EC2, instanceID string, failStates []string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
resp, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{
InstanceIds: []*string{aws.String(instanceID)},
@ -1098,10 +1098,11 @@ func InstanceStateRefreshFunc(conn *ec2.EC2, instanceID, failState string) resou
i := resp.Reservations[0].Instances[0]
state := *i.State.Name
if state == failState {
return i, state, fmt.Errorf("Failed to reach target state. Reason: %s",
stringifyStateReason(i.StateReason))
for _, failState := range failStates {
if state == failState {
return i, state, fmt.Errorf("Failed to reach target state. Reason: %s",
stringifyStateReason(i.StateReason))
}
}
return i, state, nil
@ -1716,7 +1717,7 @@ func awsTerminateInstance(conn *ec2.EC2, id string, d *schema.ResourceData) erro
stateConf := &resource.StateChangeConf{
Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"},
Target: []string{"terminated"},
Refresh: InstanceStateRefreshFunc(conn, id, ""),
Refresh: InstanceStateRefreshFunc(conn, id, []string{}),
Timeout: d.Timeout(schema.TimeoutDelete),
Delay: 10 * time.Second,
MinTimeout: 3 * time.Second,

View File

@ -0,0 +1,202 @@
package aws
import (
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/iot"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
// https://docs.aws.amazon.com/iot/latest/apireference/API_CreateThingType.html
func resourceAwsIotThingType() *schema.Resource {
return &schema.Resource{
Create: resourceAwsIotThingTypeCreate,
Read: resourceAwsIotThingTypeRead,
Update: resourceAwsIotThingTypeUpdate,
Delete: resourceAwsIotThingTypeDelete,
Importer: &schema.ResourceImporter{
State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
d.Set("name", d.Id())
return []*schema.ResourceData{d}, nil
},
},
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateIotThingTypeName,
},
"properties": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"description": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validateIotThingTypeDescription,
},
"searchable_attributes": {
Type: schema.TypeSet,
Optional: true,
Computed: true,
ForceNew: true,
MaxItems: 3,
Elem: &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validateIotThingTypeSearchableAttribute,
},
},
},
},
},
"deprecated": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"arn": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceAwsIotThingTypeCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).iotconn
params := &iot.CreateThingTypeInput{
ThingTypeName: aws.String(d.Get("name").(string)),
}
if v, ok := d.GetOk("properties"); ok {
configs := v.([]interface{})
config, ok := configs[0].(map[string]interface{})
if ok && config != nil {
params.ThingTypeProperties = expandIotThingTypeProperties(config)
}
}
log.Printf("[DEBUG] Creating IoT Thing Type: %s", params)
out, err := conn.CreateThingType(params)
if err != nil {
return err
}
d.SetId(*out.ThingTypeName)
if v := d.Get("deprecated").(bool); v {
params := &iot.DeprecateThingTypeInput{
ThingTypeName: aws.String(d.Id()),
UndoDeprecate: aws.Bool(false),
}
log.Printf("[DEBUG] Deprecating IoT Thing Type: %s", params)
_, err := conn.DeprecateThingType(params)
if err != nil {
return err
}
}
return resourceAwsIotThingTypeRead(d, meta)
}
func resourceAwsIotThingTypeRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).iotconn
params := &iot.DescribeThingTypeInput{
ThingTypeName: aws.String(d.Id()),
}
log.Printf("[DEBUG] Reading IoT Thing Type: %s", params)
out, err := conn.DescribeThingType(params)
if err != nil {
if isAWSErr(err, iot.ErrCodeResourceNotFoundException, "") {
log.Printf("[WARN] IoT Thing Type %q not found, removing from state", d.Id())
d.SetId("")
}
return err
}
if out.ThingTypeMetadata != nil {
d.Set("deprecated", out.ThingTypeMetadata.Deprecated)
}
d.Set("arn", out.ThingTypeArn)
d.Set("properties", flattenIotThingTypeProperties(out.ThingTypeProperties))
return nil
}
func resourceAwsIotThingTypeUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).iotconn
if d.HasChange("deprecated") {
params := &iot.DeprecateThingTypeInput{
ThingTypeName: aws.String(d.Id()),
UndoDeprecate: aws.Bool(!d.Get("deprecated").(bool)),
}
log.Printf("[DEBUG] Updating IoT Thing Type: %s", params)
_, err := conn.DeprecateThingType(params)
if err != nil {
return err
}
}
return resourceAwsIotThingTypeRead(d, meta)
}
func resourceAwsIotThingTypeDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).iotconn
// In order to delete an IoT Thing Type, you must deprecate it first and wait
// at least 5 minutes.
deprecateParams := &iot.DeprecateThingTypeInput{
ThingTypeName: aws.String(d.Id()),
}
log.Printf("[DEBUG] Deprecating IoT Thing Type: %s", deprecateParams)
_, err := conn.DeprecateThingType(deprecateParams)
if err != nil {
return err
}
deleteParams := &iot.DeleteThingTypeInput{
ThingTypeName: aws.String(d.Id()),
}
log.Printf("[DEBUG] Deleting IoT Thing Type: %s", deleteParams)
return resource.Retry(6*time.Minute, func() *resource.RetryError {
_, err := conn.DeleteThingType(deleteParams)
if err != nil {
if isAWSErr(err, iot.ErrCodeInvalidRequestException, "Please wait for 5 minutes after deprecation and then retry") {
return resource.RetryableError(err)
}
// As the delay post-deprecation is about 5 minutes, it may have been
// deleted in between, thus getting a Not Found Exception.
if isAWSErr(err, iot.ErrCodeResourceNotFoundException, "") {
return nil
}
return resource.NonRetryableError(err)
}
return nil
})
}

View File

@ -9,7 +9,6 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/firehose"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/resource"
@ -1320,13 +1319,15 @@ func resourceAwsKinesisFirehoseDeliveryStreamCreate(d *schema.ResourceData, meta
log.Printf("[DEBUG] Error creating Firehose Delivery Stream: %s", err)
lastError = err
if awsErr, ok := err.(awserr.Error); ok {
// IAM roles can take ~10 seconds to propagate in AWS:
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console
if awsErr.Code() == "InvalidArgumentException" && strings.Contains(awsErr.Message(), "Firehose is unable to assume role") {
log.Printf("[DEBUG] Firehose could not assume role referenced, retrying...")
return resource.RetryableError(awsErr)
}
// Retry for IAM eventual consistency
if isAWSErr(err, firehose.ErrCodeInvalidArgumentException, "is not authorized to perform") {
return resource.RetryableError(err)
}
// IAM roles can take ~10 seconds to propagate in AWS:
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console
if isAWSErr(err, firehose.ErrCodeInvalidArgumentException, "Firehose is unable to assume role") {
log.Printf("[DEBUG] Firehose could not assume role referenced, retrying...")
return resource.RetryableError(err)
}
// Not retryable
return resource.NonRetryableError(err)
@ -1335,10 +1336,7 @@ func resourceAwsKinesisFirehoseDeliveryStreamCreate(d *schema.ResourceData, meta
return nil
})
if err != nil {
if awsErr, ok := lastError.(awserr.Error); ok {
return fmt.Errorf("[WARN] Error creating Kinesis Firehose Delivery Stream: %s", awsErr.Error())
}
return err
return fmt.Errorf("error creating Kinesis Firehose Delivery Stream: %s", err)
}
stateConf := &resource.StateChangeConf{
@ -1459,14 +1457,12 @@ func resourceAwsKinesisFirehoseDeliveryStreamRead(d *schema.ResourceData, meta i
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "ResourceNotFoundException" {
d.SetId("")
return nil
}
return fmt.Errorf("[WARN] Error reading Kinesis Firehose Delivery Stream: %s", awsErr.Error())
if isAWSErr(err, firehose.ErrCodeResourceNotFoundException, "") {
log.Printf("[WARN] Kinesis Firehose Delivery Stream (%s) not found, removing from state", d.Get("name").(string))
d.SetId("")
return nil
}
return err
return fmt.Errorf("error reading Kinesis Firehose Delivery Stream: %s", err)
}
s := resp.DeliveryStreamDescription
@ -1517,11 +1513,8 @@ func firehoseStreamStateRefreshFunc(conn *firehose.Firehose, sn string) resource
}
resp, err := conn.DescribeDeliveryStream(describeOpts)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "ResourceNotFoundException" {
return 42, "DESTROYED", nil
}
return nil, awsErr.Code(), err
if isAWSErr(err, firehose.ErrCodeResourceNotFoundException, "") {
return 42, "DESTROYED", nil
}
return nil, "failed", err
}

View File

@ -200,9 +200,23 @@ func resourceAwsLambdaFunction() *schema.Resource {
"tags": tagsSchema(),
},
CustomizeDiff: updateComputedAttributesOnPublish,
}
}
func updateComputedAttributesOnPublish(d *schema.ResourceDiff, meta interface{}) error {
if needsFunctionCodeUpdate(d) {
d.SetNewComputed("last_modified")
publish := d.Get("publish").(bool)
if publish {
d.SetNewComputed("version")
d.SetNewComputed("qualified_arn")
}
}
return nil
}
// resourceAwsLambdaFunction maps to:
// CreateFunction in the API / SDK
func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) error {
@ -525,6 +539,14 @@ func resourceAwsLambdaFunctionDelete(d *schema.ResourceData, meta interface{}) e
return nil
}
type resourceDiffer interface {
HasChange(string) bool
}
func needsFunctionCodeUpdate(d resourceDiffer) bool {
return d.HasChange("filename") || d.HasChange("source_code_hash") || d.HasChange("s3_bucket") || d.HasChange("s3_key") || d.HasChange("s3_object_version")
}
// resourceAwsLambdaFunctionUpdate maps to:
// UpdateFunctionCode in the API / SDK
func resourceAwsLambdaFunctionUpdate(d *schema.ResourceData, meta interface{}) error {
@ -672,7 +694,7 @@ func resourceAwsLambdaFunctionUpdate(d *schema.ResourceData, meta interface{}) e
d.SetPartial("timeout")
}
if d.HasChange("filename") || d.HasChange("source_code_hash") || d.HasChange("s3_bucket") || d.HasChange("s3_key") || d.HasChange("s3_object_version") {
if needsFunctionCodeUpdate(d) {
codeReq := &lambda.UpdateFunctionCodeInput{
FunctionName: aws.String(d.Id()),
Publish: aws.Bool(d.Get("publish").(bool)),

View File

@ -0,0 +1,157 @@
package aws
import (
"errors"
"fmt"
"log"
"time"
"github.com/hashicorp/terraform/helper/resource"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/elbv2"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsLbListenerCertificate() *schema.Resource {
return &schema.Resource{
Create: resourceAwsLbListenerCertificateCreate,
Read: resourceAwsLbListenerCertificateRead,
Delete: resourceAwsLbListenerCertificateDelete,
Schema: map[string]*schema.Schema{
"listener_arn": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"certificate_arn": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
},
}
}
func resourceAwsLbListenerCertificateCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).elbv2conn
params := &elbv2.AddListenerCertificatesInput{
ListenerArn: aws.String(d.Get("listener_arn").(string)),
Certificates: []*elbv2.Certificate{
&elbv2.Certificate{
CertificateArn: aws.String(d.Get("certificate_arn").(string)),
},
},
}
log.Printf("[DEBUG] Adding certificate: %s of listener: %s", d.Get("certificate_arn").(string), d.Get("listener_arn").(string))
resp, err := conn.AddListenerCertificates(params)
if err != nil {
return fmt.Errorf("Error creating LB Listener Certificate: %s", err)
}
if len(resp.Certificates) == 0 {
return errors.New("Error creating LB Listener Certificate: no certificates returned in response")
}
d.SetId(d.Get("listener_arn").(string) + "_" + d.Get("certificate_arn").(string))
return resourceAwsLbListenerCertificateRead(d, meta)
}
func resourceAwsLbListenerCertificateRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).elbv2conn
certificateArn := d.Get("certificate_arn").(string)
listenerArn := d.Get("listener_arn").(string)
log.Printf("[DEBUG] Reading certificate: %s of listener: %s", certificateArn, listenerArn)
var certificate *elbv2.Certificate
err := resource.Retry(1*time.Minute, func() *resource.RetryError {
var err error
certificate, err = findAwsLbListenerCertificate(certificateArn, listenerArn, true, nil, conn)
if err != nil {
return resource.NonRetryableError(err)
}
if certificate == nil {
err = fmt.Errorf("certificate not found: %s", certificateArn)
if d.IsNewResource() {
return resource.RetryableError(err)
}
return resource.NonRetryableError(err)
}
return nil
})
if err != nil {
if certificate == nil {
log.Printf("[WARN] %s - removing from state", err)
d.SetId("")
return nil
}
return err
}
return nil
}
func resourceAwsLbListenerCertificateDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).elbv2conn
log.Printf("[DEBUG] Deleting certificate: %s of listener: %s", d.Get("certificate_arn").(string), d.Get("listener_arn").(string))
params := &elbv2.RemoveListenerCertificatesInput{
ListenerArn: aws.String(d.Get("listener_arn").(string)),
Certificates: []*elbv2.Certificate{
&elbv2.Certificate{
CertificateArn: aws.String(d.Get("certificate_arn").(string)),
},
},
}
_, err := conn.RemoveListenerCertificates(params)
if err != nil {
if isAWSErr(err, elbv2.ErrCodeCertificateNotFoundException, "") {
return nil
}
if isAWSErr(err, elbv2.ErrCodeListenerNotFoundException, "") {
return nil
}
return fmt.Errorf("Error removing LB Listener Certificate: %s", err)
}
return nil
}
func findAwsLbListenerCertificate(certificateArn, listenerArn string, skipDefault bool, nextMarker *string, conn *elbv2.ELBV2) (*elbv2.Certificate, error) {
params := &elbv2.DescribeListenerCertificatesInput{
ListenerArn: aws.String(listenerArn),
PageSize: aws.Int64(400),
}
if nextMarker != nil {
params.Marker = nextMarker
}
resp, err := conn.DescribeListenerCertificates(params)
if err != nil {
return nil, err
}
for _, cert := range resp.Certificates {
if skipDefault && *cert.IsDefault {
continue
}
if *cert.CertificateArn == certificateArn {
return cert, nil
}
}
if resp.NextMarker != nil {
return findAwsLbListenerCertificate(certificateArn, listenerArn, skipDefault, resp.NextMarker, conn)
}
return nil, nil
}

View File

@ -5,12 +5,15 @@ import (
"fmt"
"log"
"regexp"
"sort"
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/elbv2"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
@ -36,7 +39,8 @@ func resourceAwsLbbListenerRule() *schema.Resource {
},
"priority": {
Type: schema.TypeInt,
Required: true,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validateAwsLbListenerRulePriority,
},
@ -82,10 +86,10 @@ func resourceAwsLbbListenerRule() *schema.Resource {
func resourceAwsLbListenerRuleCreate(d *schema.ResourceData, meta interface{}) error {
elbconn := meta.(*AWSClient).elbv2conn
listenerArn := d.Get("listener_arn").(string)
params := &elbv2.CreateRuleInput{
ListenerArn: aws.String(d.Get("listener_arn").(string)),
Priority: aws.Int64(int64(d.Get("priority").(int))),
ListenerArn: aws.String(listenerArn),
}
actions := d.Get("action").([]interface{})
@ -112,9 +116,34 @@ func resourceAwsLbListenerRuleCreate(d *schema.ResourceData, meta interface{}) e
}
}
resp, err := elbconn.CreateRule(params)
if err != nil {
return errwrap.Wrapf("Error creating LB Listener Rule: {{err}}", err)
var resp *elbv2.CreateRuleOutput
if v, ok := d.GetOk("priority"); ok {
var err error
params.Priority = aws.Int64(int64(v.(int)))
resp, err = elbconn.CreateRule(params)
if err != nil {
return fmt.Errorf("Error creating LB Listener Rule: %v", err)
}
} else {
err := resource.Retry(5*time.Minute, func() *resource.RetryError {
var err error
priority, err := highestListenerRulePriority(elbconn, listenerArn)
if err != nil {
return resource.NonRetryableError(err)
}
params.Priority = aws.Int64(priority + 1)
resp, err = elbconn.CreateRule(params)
if err != nil {
if isAWSErr(err, elbv2.ErrCodePriorityInUseException, "") {
return resource.RetryableError(err)
}
return resource.NonRetryableError(err)
}
return nil
})
if err != nil {
return fmt.Errorf("Error creating LB Listener Rule: %v", err)
}
}
if len(resp.Rules) == 0 {
@ -157,7 +186,7 @@ func resourceAwsLbListenerRuleRead(d *schema.ResourceData, meta interface{}) err
d.Set("priority", 99999)
} else {
if priority, err := strconv.Atoi(*rule.Priority); err != nil {
return errwrap.Wrapf("Cannot convert rule priority %q to int: {{err}}", err)
return fmt.Errorf("Cannot convert rule priority %q to int: {{err}}", err)
} else {
d.Set("priority", priority)
}
@ -278,8 +307,8 @@ func resourceAwsLbListenerRuleDelete(d *schema.ResourceData, meta interface{}) e
func validateAwsLbListenerRulePriority(v interface{}, k string) (ws []string, errors []error) {
value := v.(int)
if value < 1 || value > 99999 {
errors = append(errors, fmt.Errorf("%q must be in the range 1-99999", k))
if value < 1 || (value > 50000 && value != 99999) {
errors = append(errors, fmt.Errorf("%q must be in the range 1-50000 for normal rule or 99999 for default rule", k))
}
return
}
@ -312,3 +341,39 @@ func isRuleNotFound(err error) bool {
elberr, ok := err.(awserr.Error)
return ok && elberr.Code() == "RuleNotFound"
}
func highestListenerRulePriority(conn *elbv2.ELBV2, arn string) (priority int64, err error) {
var priorities []int
var nextMarker *string
for {
out, aerr := conn.DescribeRules(&elbv2.DescribeRulesInput{
ListenerArn: aws.String(arn),
Marker: nextMarker,
})
if aerr != nil {
err = aerr
return
}
for _, rule := range out.Rules {
if *rule.Priority != "default" {
p, _ := strconv.Atoi(*rule.Priority)
priorities = append(priorities, p)
}
}
if out.NextMarker == nil {
break
}
nextMarker = out.NextMarker
}
if len(priorities) == 0 {
priority = 0
return
}
sort.IntSlice(priorities).Sort()
priority = int64(priorities[len(priorities)-1])
return
}

View File

@ -330,7 +330,11 @@ func resourceAwsLbTargetGroupUpdate(d *schema.ResourceData, meta interface{}) er
})
}
if d.HasChange("stickiness") {
// In CustomizeDiff we allow LB stickiness to be declared for TCP target
// groups, so long as it's not enabled. This allows for better support for
// modules, but also means we need to completely skip sending the data to the
// API if it's defined on a TCP target group.
if d.HasChange("stickiness") && d.Get("protocol") != "TCP" {
stickinessBlocks := d.Get("stickiness").([]interface{})
if len(stickinessBlocks) == 1 {
stickiness := stickinessBlocks[0].(map[string]interface{})
@ -541,8 +545,45 @@ func flattenAwsLbTargetGroupResource(d *schema.ResourceData, meta interface{}, t
return errwrap.Wrapf("Error retrieving Target Group Attributes: {{err}}", err)
}
// We only read in the stickiness attributes if the target group is not
// TCP-based. This ensures we don't end up causing a spurious diff if someone
// has defined the stickiness block on a TCP target group (albeit with
// false), for which this update would clobber the state coming from config
// for.
//
// This is a workaround to support module design where the module needs to
// support HTTP and TCP target groups.
switch {
case *targetGroup.Protocol != "TCP":
if err = flattenAwsLbTargetGroupStickiness(d, attrResp.Attributes); err != nil {
return err
}
case *targetGroup.Protocol == "TCP" && len(d.Get("stickiness").([]interface{})) < 1:
if err = d.Set("stickiness", []interface{}{}); err != nil {
return err
}
}
tagsResp, err := elbconn.DescribeTags(&elbv2.DescribeTagsInput{
ResourceArns: []*string{aws.String(d.Id())},
})
if err != nil {
return errwrap.Wrapf("Error retrieving Target Group Tags: {{err}}", err)
}
for _, t := range tagsResp.TagDescriptions {
if *t.ResourceArn == d.Id() {
if err := d.Set("tags", tagsToMapELBv2(t.Tags)); err != nil {
return err
}
}
}
return nil
}
func flattenAwsLbTargetGroupStickiness(d *schema.ResourceData, attributes []*elbv2.TargetGroupAttribute) error {
stickinessMap := map[string]interface{}{}
for _, attr := range attrResp.Attributes {
for _, attr := range attributes {
switch *attr.Key {
case "stickiness.enabled":
enabled, err := strconv.ParseBool(*attr.Value)
@ -574,21 +615,6 @@ func flattenAwsLbTargetGroupResource(d *schema.ResourceData, meta interface{}, t
if err := d.Set("stickiness", setStickyMap); err != nil {
return err
}
tagsResp, err := elbconn.DescribeTags(&elbv2.DescribeTagsInput{
ResourceArns: []*string{aws.String(d.Id())},
})
if err != nil {
return errwrap.Wrapf("Error retrieving Target Group Tags: {{err}}", err)
}
for _, t := range tagsResp.TagDescriptions {
if *t.ResourceArn == d.Id() {
if err := d.Set("tags", tagsToMapELBv2(t.Tags)); err != nil {
return err
}
}
}
return nil
}
@ -596,9 +622,11 @@ func resourceAwsLbTargetGroupCustomizeDiff(diff *schema.ResourceDiff, v interfac
protocol := diff.Get("protocol").(string)
if protocol == "TCP" {
// TCP load balancers do not support stickiness
stickinessBlocks := diff.Get("stickiness").([]interface{})
if len(stickinessBlocks) != 0 {
return fmt.Errorf("Network Load Balancers do not support Stickiness")
if stickinessBlocks := diff.Get("stickiness").([]interface{}); len(stickinessBlocks) == 1 {
stickiness := stickinessBlocks[0].(map[string]interface{})
if val := stickiness["enabled"].(bool); val {
return fmt.Errorf("Network Load Balancers do not support Stickiness")
}
}
}

View File

@ -95,6 +95,11 @@ func resourceAwsRDSCluster() *schema.Resource {
Computed: true,
},
"hosted_zone_id": {
Type: schema.TypeString,
Computed: true,
},
"engine": {
Type: schema.TypeString,
Optional: true,
@ -249,6 +254,12 @@ func resourceAwsRDSCluster() *schema.Resource {
Computed: true,
},
"source_region": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"tags": tagsSchema(),
},
}
@ -408,6 +419,10 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error
createOpts.KmsKeyId = aws.String(attr.(string))
}
if attr, ok := d.GetOk("source_region"); ok {
createOpts.SourceRegion = aws.String(attr.(string))
}
log.Printf("[DEBUG] Create RDS Cluster as read replica: %s", createOpts)
resp, err := conn.CreateDBCluster(createOpts)
if err != nil {
@ -590,6 +605,7 @@ func flattenAwsRdsClusterResource(d *schema.ResourceData, meta interface{}, dbc
d.Set("reader_endpoint", dbc.ReaderEndpoint)
d.Set("replication_source_identifier", dbc.ReplicationSourceIdentifier)
d.Set("iam_database_authentication_enabled", dbc.IAMDatabaseAuthenticationEnabled)
d.Set("hosted_zone_id", dbc.HostedZoneId)
var vpcg []string
for _, g := range dbc.VpcSecurityGroups {

View File

@ -176,6 +176,8 @@ func resourceAwsRDSClusterInstance() *schema.Resource {
"availability_zone": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
},
@ -211,6 +213,10 @@ func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{
Tags: tags,
}
if attr, ok := d.GetOk("availability_zone"); ok {
createOpts.AvailabilityZone = aws.String(attr.(string))
}
if attr, ok := d.GetOk("db_parameter_group_name"); ok {
createOpts.DBParameterGroupName = aws.String(attr.(string))
}

View File

@ -8,7 +8,6 @@ import (
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/redshift"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
@ -484,7 +483,7 @@ func resourceAwsRedshiftClusterCreate(d *schema.ResourceData, meta interface{})
stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "backing-up", "modifying", "restoring"},
Target: []string{"available"},
Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta),
Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d.Id(), conn),
Timeout: 75 * time.Minute,
MinTimeout: 10 * time.Second,
}
@ -523,12 +522,10 @@ func resourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) er
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if "ClusterNotFound" == awsErr.Code() {
d.SetId("")
log.Printf("[DEBUG] Redshift Cluster (%s) not found", d.Id())
return nil
}
if isAWSErr(err, redshift.ErrCodeClusterNotFoundFault, "") {
d.SetId("")
log.Printf("[DEBUG] Redshift Cluster (%s) not found", d.Id())
return nil
}
log.Printf("[DEBUG] Error describing Redshift Cluster (%s)", d.Id())
return err
@ -766,7 +763,7 @@ func resourceAwsRedshiftClusterUpdate(d *schema.ResourceData, meta interface{})
stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "deleting", "rebooting", "resizing", "renaming", "modifying"},
Target: []string{"available"},
Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta),
Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d.Id(), conn),
Timeout: 40 * time.Minute,
MinTimeout: 10 * time.Second,
}
@ -901,33 +898,10 @@ func resourceAwsRedshiftClusterDelete(d *schema.ResourceData, meta interface{})
}
}
log.Printf("[DEBUG] Redshift Cluster delete options: %s", deleteOpts)
err := resource.Retry(15*time.Minute, func() *resource.RetryError {
_, err := conn.DeleteCluster(&deleteOpts)
awsErr, ok := err.(awserr.Error)
if ok && awsErr.Code() == "InvalidClusterState" {
return resource.RetryableError(err)
}
return resource.NonRetryableError(err)
})
log.Printf("[DEBUG] Deleting Redshift Cluster: %s", deleteOpts)
_, err := deleteAwsRedshiftCluster(&deleteOpts, conn)
if err != nil {
return fmt.Errorf("[ERROR] Error deleting Redshift Cluster (%s): %s", d.Id(), err)
}
stateConf := &resource.StateChangeConf{
Pending: []string{"available", "creating", "deleting", "rebooting", "resizing", "renaming", "final-snapshot"},
Target: []string{"destroyed"},
Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta),
Timeout: 40 * time.Minute,
MinTimeout: 5 * time.Second,
}
// Wait, catching any errors
_, err = stateConf.WaitForState()
if err != nil {
return fmt.Errorf("[ERROR] Error deleting Redshift Cluster (%s): %s", d.Id(), err)
return err
}
log.Printf("[INFO] Redshift Cluster %s successfully deleted", d.Id())
@ -935,29 +909,52 @@ func resourceAwsRedshiftClusterDelete(d *schema.ResourceData, meta interface{})
return nil
}
func resourceAwsRedshiftClusterStateRefreshFunc(d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
conn := meta.(*AWSClient).redshiftconn
func deleteAwsRedshiftCluster(opts *redshift.DeleteClusterInput, conn *redshift.Redshift) (interface{}, error) {
id := *opts.ClusterIdentifier
log.Printf("[INFO] Deleting Redshift Cluster %q", id)
err := resource.Retry(15*time.Minute, func() *resource.RetryError {
_, err := conn.DeleteCluster(opts)
if isAWSErr(err, redshift.ErrCodeInvalidClusterStateFault, "") {
return resource.RetryableError(err)
}
log.Printf("[INFO] Reading Redshift Cluster Information: %s", d.Id())
return resource.NonRetryableError(err)
})
if err != nil {
return nil, fmt.Errorf("[ERROR] Error deleting Redshift Cluster (%s): %s",
id, err)
}
stateConf := &resource.StateChangeConf{
Pending: []string{"available", "creating", "deleting", "rebooting", "resizing", "renaming", "final-snapshot"},
Target: []string{"destroyed"},
Refresh: resourceAwsRedshiftClusterStateRefreshFunc(id, conn),
Timeout: 40 * time.Minute,
MinTimeout: 5 * time.Second,
}
return stateConf.WaitForState()
}
func resourceAwsRedshiftClusterStateRefreshFunc(id string, conn *redshift.Redshift) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
log.Printf("[INFO] Reading Redshift Cluster Information: %s", id)
resp, err := conn.DescribeClusters(&redshift.DescribeClustersInput{
ClusterIdentifier: aws.String(d.Id()),
ClusterIdentifier: aws.String(id),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if "ClusterNotFound" == awsErr.Code() {
return 42, "destroyed", nil
}
if isAWSErr(err, redshift.ErrCodeClusterNotFoundFault, "") {
return 42, "destroyed", nil
}
log.Printf("[WARN] Error on retrieving Redshift Cluster (%s) when waiting: %s", d.Id(), err)
log.Printf("[WARN] Error on retrieving Redshift Cluster (%s) when waiting: %s", id, err)
return nil, "", err
}
var rsc *redshift.Cluster
for _, c := range resp.Clusters {
if *c.ClusterIdentifier == d.Id() {
if *c.ClusterIdentifier == id {
rsc = c
}
}
@ -967,7 +964,7 @@ func resourceAwsRedshiftClusterStateRefreshFunc(d *schema.ResourceData, meta int
}
if rsc.ClusterStatus != nil {
log.Printf("[DEBUG] Redshift Cluster status (%s): %s", d.Id(), *rsc.ClusterStatus)
log.Printf("[DEBUG] Redshift Cluster status (%s): %s", id, *rsc.ClusterStatus)
}
return rsc, *rsc.ClusterStatus, nil

View File

@ -235,6 +235,12 @@ func resourceAwsRoute53Record() *schema.Resource {
Optional: true,
Set: schema.HashString,
},
"allow_overwrite": {
Type: schema.TypeBool,
Optional: true,
Default: true,
},
},
}
}
@ -382,6 +388,16 @@ func resourceAwsRoute53RecordCreate(d *schema.ResourceData, meta interface{}) er
return err
}
// Protect existing DNS records which might be managed in another way
// Use UPSERT only if the overwrite flag is true or if the current action is an update
// Else CREATE is used and fail if the same record exists
var action string
if d.Get("allow_overwrite").(bool) || !d.IsNewResource() {
action = "UPSERT"
} else {
action = "CREATE"
}
// Create the new records. We abuse StateChangeConf for this to
// retry for us since Route53 sometimes returns errors about another
// operation happening at the same time.
@ -389,7 +405,7 @@ func resourceAwsRoute53RecordCreate(d *schema.ResourceData, meta interface{}) er
Comment: aws.String("Managed by Terraform"),
Changes: []*route53.Change{
{
Action: aws.String("UPSERT"),
Action: aws.String(action),
ResourceRecordSet: rec,
},
},
@ -922,5 +938,6 @@ func parseRecordId(id string) [4]string {
}
}
}
recName = strings.TrimSuffix(recName, ".")
return [4]string{recZone, recName, recType, recSet}
}

View File

@ -373,6 +373,36 @@ func resourceAwsS3Bucket() *schema.Resource {
Optional: true,
ValidateFunc: validateS3BucketReplicationDestinationStorageClass,
},
"replica_kms_key_id": {
Type: schema.TypeString,
Optional: true,
},
},
},
},
"source_selection_criteria": {
Type: schema.TypeSet,
Optional: true,
MinItems: 1,
MaxItems: 1,
Set: sourceSelectionCriteriaHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sse_kms_encrypted_objects": {
Type: schema.TypeSet,
Optional: true,
MinItems: 1,
MaxItems: 1,
Set: sourceSseKmsObjectsHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"enabled": {
Type: schema.TypeBool,
Required: true,
},
},
},
},
},
},
},
@ -970,19 +1000,17 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error {
Bucket: aws.String(d.Id()),
})
})
replication := replicationResponse.(*s3.GetBucketReplicationOutput)
if err != nil {
if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 {
return err
}
}
replication := replicationResponse.(*s3.GetBucketReplicationOutput)
log.Printf("[DEBUG] S3 Bucket: %s, read replication configuration: %v", d.Id(), replication)
if r := replication.ReplicationConfiguration; r != nil {
if err := d.Set("replication_configuration", flattenAwsS3BucketReplicationConfiguration(replication.ReplicationConfiguration)); err != nil {
log.Printf("[DEBUG] Error setting replication configuration: %s", err)
return err
}
if err := d.Set("replication_configuration", flattenAwsS3BucketReplicationConfiguration(replication.ReplicationConfiguration)); err != nil {
log.Printf("[DEBUG] Error setting replication configuration: %s", err)
return err
}
// Read the bucket server side encryption configuration
@ -1019,10 +1047,10 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error {
},
)
})
location := locationResponse.(*s3.GetBucketLocationOutput)
if err != nil {
return err
}
location := locationResponse.(*s3.GetBucketLocationOutput)
var region string
if location.LocationConstraint != nil {
region = *location.LocationConstraint
@ -1373,10 +1401,10 @@ func websiteEndpoint(s3conn *s3.S3, d *schema.ResourceData) (*S3Website, error)
},
)
})
location := locationResponse.(*s3.GetBucketLocationOutput)
if err != nil {
return nil, err
}
location := locationResponse.(*s3.GetBucketLocationOutput)
var region string
if location.LocationConstraint != nil {
region = *location.LocationConstraint
@ -1682,17 +1710,37 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(s3conn *s3.S3, d *schema.
}
ruleDestination := &s3.Destination{}
if destination, ok := rr["destination"]; ok {
dest := destination.(*schema.Set).List()
bd := dest[0].(map[string]interface{})
if dest, ok := rr["destination"].(*schema.Set); ok && dest.Len() > 0 {
bd := dest.List()[0].(map[string]interface{})
ruleDestination.Bucket = aws.String(bd["bucket"].(string))
if storageClass, ok := bd["storage_class"]; ok && storageClass != "" {
ruleDestination.StorageClass = aws.String(storageClass.(string))
}
if replicaKmsKeyId, ok := bd["replica_kms_key_id"]; ok && replicaKmsKeyId != "" {
ruleDestination.EncryptionConfiguration = &s3.EncryptionConfiguration{
ReplicaKmsKeyID: aws.String(replicaKmsKeyId.(string)),
}
}
}
rcRule.Destination = ruleDestination
if ssc, ok := rr["source_selection_criteria"].(*schema.Set); ok && ssc.Len() > 0 {
sscValues := ssc.List()[0].(map[string]interface{})
ruleSsc := &s3.SourceSelectionCriteria{}
if sseKms, ok := sscValues["sse_kms_encrypted_objects"].(*schema.Set); ok && sseKms.Len() > 0 {
sseKmsValues := sseKms.List()[0].(map[string]interface{})
sseKmsEncryptedObjects := &s3.SseKmsEncryptedObjects{}
if sseKmsValues["enabled"].(bool) {
sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusEnabled)
} else {
sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusDisabled)
}
ruleSsc.SseKmsEncryptedObjects = sseKmsEncryptedObjects
}
rcRule.SourceSelectionCriteria = ruleSsc
}
rules = append(rules, rcRule)
}
@ -1893,6 +1941,11 @@ func flattenAwsS3ServerSideEncryptionConfiguration(c *s3.ServerSideEncryptionCon
func flattenAwsS3BucketReplicationConfiguration(r *s3.ReplicationConfiguration) []map[string]interface{} {
replication_configuration := make([]map[string]interface{}, 0, 1)
if r == nil {
return replication_configuration
}
m := make(map[string]interface{})
if r.Role != nil && *r.Role != "" {
@ -1910,6 +1963,11 @@ func flattenAwsS3BucketReplicationConfiguration(r *s3.ReplicationConfiguration)
if v.Destination.StorageClass != nil {
rd["storage_class"] = *v.Destination.StorageClass
}
if v.Destination.EncryptionConfiguration != nil {
if v.Destination.EncryptionConfiguration.ReplicaKmsKeyID != nil {
rd["replica_kms_key_id"] = *v.Destination.EncryptionConfiguration.ReplicaKmsKeyID
}
}
t["destination"] = schema.NewSet(destinationHash, []interface{}{rd})
}
@ -1922,6 +1980,19 @@ func flattenAwsS3BucketReplicationConfiguration(r *s3.ReplicationConfiguration)
if v.Status != nil {
t["status"] = *v.Status
}
if vssc := v.SourceSelectionCriteria; vssc != nil {
tssc := make(map[string]interface{})
if vssc.SseKmsEncryptedObjects != nil {
tSseKms := make(map[string]interface{})
if *vssc.SseKmsEncryptedObjects.Status == s3.SseKmsEncryptedObjectsStatusEnabled {
tSseKms["enabled"] = true
} else if *vssc.SseKmsEncryptedObjects.Status == s3.SseKmsEncryptedObjectsStatusDisabled {
tSseKms["enabled"] = false
}
tssc["sse_kms_encrypted_objects"] = schema.NewSet(sourceSseKmsObjectsHash, []interface{}{tSseKms})
}
t["source_selection_criteria"] = schema.NewSet(sourceSelectionCriteriaHash, []interface{}{tssc})
}
rules = append(rules, t)
}
m["rules"] = schema.NewSet(rulesHash, rules)
@ -2083,6 +2154,12 @@ func rulesHash(v interface{}) int {
if v, ok := m["status"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
if v, ok := m["destination"].(*schema.Set); ok && v.Len() > 0 {
buf.WriteString(fmt.Sprintf("%d-", destinationHash(v.List()[0])))
}
if v, ok := m["source_selection_criteria"].(*schema.Set); ok && v.Len() > 0 && v.List()[0] != nil {
buf.WriteString(fmt.Sprintf("%d-", sourceSelectionCriteriaHash(v.List()[0])))
}
return hashcode.String(buf.String())
}
@ -2096,6 +2173,33 @@ func destinationHash(v interface{}) int {
if v, ok := m["storage_class"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
if v, ok := m["replica_kms_key_id"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
return hashcode.String(buf.String())
}
func sourceSelectionCriteriaHash(v interface{}) int {
// v is nil if empty source_selection_criteria is given.
if v == nil {
return 0
}
var buf bytes.Buffer
m := v.(map[string]interface{})
if v, ok := m["sse_kms_encrypted_objects"].(*schema.Set); ok && v.Len() > 0 {
buf.WriteString(fmt.Sprintf("%d-", sourceSseKmsObjectsHash(v.List()[0])))
}
return hashcode.String(buf.String())
}
func sourceSseKmsObjectsHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
if v, ok := m["enabled"]; ok {
buf.WriteString(fmt.Sprintf("%t-", v.(bool)))
}
return hashcode.String(buf.String())
}

View File

@ -0,0 +1,218 @@
package aws
import (
"fmt"
"log"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsS3BucketMetric() *schema.Resource {
return &schema.Resource{
Create: resourceAwsS3BucketMetricPut,
Read: resourceAwsS3BucketMetricRead,
Update: resourceAwsS3BucketMetricPut,
Delete: resourceAwsS3BucketMetricDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"bucket": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"filter": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"prefix": {
Type: schema.TypeString,
Optional: true,
},
"tags": tagsSchema(),
},
},
},
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
},
}
}
func resourceAwsS3BucketMetricPut(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).s3conn
bucket := d.Get("bucket").(string)
name := d.Get("name").(string)
metricsConfiguration := &s3.MetricsConfiguration{
Id: aws.String(name),
}
if v, ok := d.GetOk("filter"); ok {
filterList := v.([]interface{})
filterMap := filterList[0].(map[string]interface{})
metricsConfiguration.Filter = expandS3MetricsFilter(filterMap)
}
input := &s3.PutBucketMetricsConfigurationInput{
Bucket: aws.String(bucket),
Id: aws.String(name),
MetricsConfiguration: metricsConfiguration,
}
log.Printf("[DEBUG] Putting metric configuration: %s", input)
err := resource.Retry(1*time.Minute, func() *resource.RetryError {
_, err := conn.PutBucketMetricsConfiguration(input)
if err != nil {
if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") {
return resource.RetryableError(err)
}
return resource.NonRetryableError(err)
}
return nil
})
if err != nil {
return fmt.Errorf("Error putting S3 metric configuration: %s", err)
}
d.SetId(fmt.Sprintf("%s:%s", bucket, name))
return resourceAwsS3BucketMetricRead(d, meta)
}
func resourceAwsS3BucketMetricDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).s3conn
bucket, name, err := resourceAwsS3BucketMetricParseID(d.Id())
if err != nil {
return err
}
input := &s3.DeleteBucketMetricsConfigurationInput{
Bucket: aws.String(bucket),
Id: aws.String(name),
}
log.Printf("[DEBUG] Deleting S3 bucket metric configuration: %s", input)
_, err = conn.DeleteBucketMetricsConfiguration(input)
if err != nil {
if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") || isAWSErr(err, "NoSuchConfiguration", "The specified configuration does not exist.") {
log.Printf("[WARN] %s S3 bucket metrics configuration not found, removing from state.", d.Id())
d.SetId("")
return nil
}
return fmt.Errorf("Error deleting S3 metric configuration: %s", err)
}
d.SetId("")
return nil
}
func resourceAwsS3BucketMetricRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).s3conn
bucket, name, err := resourceAwsS3BucketMetricParseID(d.Id())
if err != nil {
return err
}
d.Set("bucket", bucket)
d.Set("name", name)
input := &s3.GetBucketMetricsConfigurationInput{
Bucket: aws.String(bucket),
Id: aws.String(name),
}
log.Printf("[DEBUG] Reading S3 bucket metrics configuration: %s", input)
output, err := conn.GetBucketMetricsConfiguration(input)
if err != nil {
if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") || isAWSErr(err, "NoSuchConfiguration", "The specified configuration does not exist.") {
log.Printf("[WARN] %s S3 bucket metrics configuration not found, removing from state.", d.Id())
d.SetId("")
return nil
}
return err
}
if output.MetricsConfiguration.Filter != nil {
if err := d.Set("filter", []interface{}{flattenS3MetricsFilter(output.MetricsConfiguration.Filter)}); err != nil {
return err
}
}
return nil
}
func expandS3MetricsFilter(m map[string]interface{}) *s3.MetricsFilter {
var prefix string
if v, ok := m["prefix"]; ok {
prefix = v.(string)
}
var tags []*s3.Tag
if v, ok := m["tags"]; ok {
tags = tagsFromMapS3(v.(map[string]interface{}))
}
metricsFilter := &s3.MetricsFilter{}
if prefix != "" && len(tags) > 0 {
metricsFilter.And = &s3.MetricsAndOperator{
Prefix: aws.String(prefix),
Tags: tags,
}
} else if len(tags) > 1 {
metricsFilter.And = &s3.MetricsAndOperator{
Tags: tags,
}
} else if len(tags) == 1 {
metricsFilter.Tag = tags[0]
} else {
metricsFilter.Prefix = aws.String(prefix)
}
return metricsFilter
}
func flattenS3MetricsFilter(metricsFilter *s3.MetricsFilter) map[string]interface{} {
m := make(map[string]interface{})
if metricsFilter.And != nil {
and := *metricsFilter.And
if and.Prefix != nil {
m["prefix"] = *and.Prefix
}
if and.Tags != nil {
m["tags"] = tagsToMapS3(and.Tags)
}
} else if metricsFilter.Prefix != nil {
m["prefix"] = *metricsFilter.Prefix
} else if metricsFilter.Tag != nil {
tags := []*s3.Tag{
metricsFilter.Tag,
}
m["tags"] = tagsToMapS3(tags)
}
return m
}
func resourceAwsS3BucketMetricParseID(id string) (string, string, error) {
idParts := strings.Split(id, ":")
if len(idParts) != 2 {
return "", "", fmt.Errorf("please make sure the ID is in the form BUCKET:NAME (i.e. my-bucket:EntireBucket")
}
bucket := idParts[0]
name := idParts[1]
return bucket, name, nil
}

View File

@ -52,14 +52,29 @@ func resourceAwsServiceDiscoveryService() *schema.Resource {
Required: true,
},
"type": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateServiceDiscoveryServiceDnsRecordsType,
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateStringIn(
servicediscovery.RecordTypeSrv,
servicediscovery.RecordTypeA,
servicediscovery.RecordTypeAaaa,
servicediscovery.RecordTypeCname,
),
},
},
},
},
"routing_policy": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: servicediscovery.RoutingPolicyMultivalue,
ValidateFunc: validateStringIn(
servicediscovery.RoutingPolicyMultivalue,
servicediscovery.RoutingPolicyWeighted,
),
},
},
},
},
@ -78,10 +93,14 @@ func resourceAwsServiceDiscoveryService() *schema.Resource {
Optional: true,
},
"type": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validateServiceDiscoveryServiceHealthCheckConfigType,
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validateStringIn(
servicediscovery.HealthCheckTypeHttp,
servicediscovery.HealthCheckTypeHttps,
servicediscovery.HealthCheckTypeTcp,
),
},
},
},
@ -220,6 +239,9 @@ func expandServiceDiscoveryDnsConfig(configured map[string]interface{}) *service
drs[i] = dr
}
result.DnsRecords = drs
if v, ok := configured["routing_policy"]; ok && v != "" {
result.RoutingPolicy = aws.String(v.(string))
}
return result
}
@ -228,6 +250,7 @@ func flattenServiceDiscoveryDnsConfig(config *servicediscovery.DnsConfig) []map[
result := map[string]interface{}{}
result["namespace_id"] = *config.NamespaceId
result["routing_policy"] = *config.RoutingPolicy
drs := make([]map[string]interface{}, 0)
for _, v := range config.DnsRecords {
dr := map[string]interface{}{}

View File

@ -0,0 +1,36 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/service/servicediscovery"
"github.com/hashicorp/terraform/terraform"
)
func resourceAwsServiceDiscoveryServiceMigrateState(
v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
switch v {
case 0:
log.Println("[INFO] Found AWS ServiceDiscovery Service State v0; migrating to v1")
return migrateServiceDiscoveryServiceStateV0toV1(is)
default:
return is, fmt.Errorf("Unexpected schema version: %d", v)
}
}
func migrateServiceDiscoveryServiceStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) {
if is.Empty() {
log.Println("[DEBUG] Empty InstanceState; nothing to migrate.")
return is, nil
}
log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes)
if v, ok := is.Attributes["dns_config.0.routing_policy"]; !ok && v == "" {
is.Attributes["dns_config.0.routing_policy"] = servicediscovery.RoutingPolicyMultivalue
}
log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes)
return is, nil
}

View File

@ -0,0 +1,110 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ses"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsSesDomainMailFrom() *schema.Resource {
return &schema.Resource{
Create: resourceAwsSesDomainMailFromSet,
Read: resourceAwsSesDomainMailFromRead,
Update: resourceAwsSesDomainMailFromSet,
Delete: resourceAwsSesDomainMailFromDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"domain": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"mail_from_domain": {
Type: schema.TypeString,
Required: true,
},
"behavior_on_mx_failure": {
Type: schema.TypeString,
Optional: true,
Default: ses.BehaviorOnMXFailureUseDefaultValue,
},
},
}
}
func resourceAwsSesDomainMailFromSet(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).sesConn
behaviorOnMxFailure := d.Get("behavior_on_mx_failure").(string)
domainName := d.Get("domain").(string)
mailFromDomain := d.Get("mail_from_domain").(string)
input := &ses.SetIdentityMailFromDomainInput{
BehaviorOnMXFailure: aws.String(behaviorOnMxFailure),
Identity: aws.String(domainName),
MailFromDomain: aws.String(mailFromDomain),
}
_, err := conn.SetIdentityMailFromDomain(input)
if err != nil {
return fmt.Errorf("Error setting MAIL FROM domain: %s", err)
}
d.SetId(domainName)
return resourceAwsSesDomainMailFromRead(d, meta)
}
func resourceAwsSesDomainMailFromRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).sesConn
domainName := d.Id()
readOpts := &ses.GetIdentityMailFromDomainAttributesInput{
Identities: []*string{
aws.String(domainName),
},
}
out, err := conn.GetIdentityMailFromDomainAttributes(readOpts)
if err != nil {
log.Printf("error fetching MAIL FROM domain attributes for %s: %s", domainName, err)
return err
}
d.Set("domain", domainName)
if v, ok := out.MailFromDomainAttributes[domainName]; ok {
d.Set("behavior_on_mx_failure", v.BehaviorOnMXFailure)
d.Set("mail_from_domain", v.MailFromDomain)
} else {
d.Set("behavior_on_mx_failure", v.BehaviorOnMXFailure)
d.Set("mail_from_domain", "")
}
return nil
}
func resourceAwsSesDomainMailFromDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).sesConn
domainName := d.Id()
deleteOpts := &ses.SetIdentityMailFromDomainInput{
Identity: aws.String(domainName),
MailFromDomain: nil,
}
_, err := conn.SetIdentityMailFromDomain(deleteOpts)
if err != nil {
return fmt.Errorf("Error deleting SES domain identity: %s", err)
}
return nil
}

View File

@ -6,9 +6,7 @@ import (
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/sns"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/structure"
@ -16,22 +14,23 @@ import (
// Mutable attributes
var SNSAttributeMap = map[string]string{
"arn": "TopicArn",
"display_name": "DisplayName",
"policy": "Policy",
"delivery_policy": "DeliveryPolicy",
"application_failure_feedback_role_arn": "ApplicationFailureFeedbackRoleArn",
"application_success_feedback_role_arn": "ApplicationSuccessFeedbackRoleArn",
"application_success_feedback_sample_rate": "ApplicationSuccessFeedbackSampleRate",
"application_failure_feedback_role_arn": "ApplicationFailureFeedbackRoleArn",
"http_success_feedback_role_arn": "HTTPSuccessFeedbackRoleArn",
"http_success_feedback_sample_rate": "HTTPSuccessFeedbackSampleRate",
"http_failure_feedback_role_arn": "HTTPFailureFeedbackRoleArn",
"lambda_success_feedback_role_arn": "LambdaSuccessFeedbackRoleArn",
"lambda_success_feedback_sample_rate": "LambdaSuccessFeedbackSampleRate",
"lambda_failure_feedback_role_arn": "LambdaFailureFeedbackRoleArn",
"sqs_success_feedback_role_arn": "SQSSuccessFeedbackRoleArn",
"sqs_success_feedback_sample_rate": "SQSSuccessFeedbackSampleRate",
"sqs_failure_feedback_role_arn": "SQSFailureFeedbackRoleArn"}
"arn": "TopicArn",
"delivery_policy": "DeliveryPolicy",
"display_name": "DisplayName",
"http_failure_feedback_role_arn": "HTTPFailureFeedbackRoleArn",
"http_success_feedback_role_arn": "HTTPSuccessFeedbackRoleArn",
"http_success_feedback_sample_rate": "HTTPSuccessFeedbackSampleRate",
"lambda_failure_feedback_role_arn": "LambdaFailureFeedbackRoleArn",
"lambda_success_feedback_role_arn": "LambdaSuccessFeedbackRoleArn",
"lambda_success_feedback_sample_rate": "LambdaSuccessFeedbackSampleRate",
"policy": "Policy",
"sqs_failure_feedback_role_arn": "SQSFailureFeedbackRoleArn",
"sqs_success_feedback_role_arn": "SQSSuccessFeedbackRoleArn",
"sqs_success_feedback_sample_rate": "SQSSuccessFeedbackSampleRate",
}
func resourceAwsSnsTopic() *schema.Resource {
return &schema.Resource{
@ -44,24 +43,23 @@ func resourceAwsSnsTopic() *schema.Resource {
},
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
"name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ConflictsWith: []string{"name_prefix"},
},
"name_prefix": &schema.Schema{
"name_prefix": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"display_name": &schema.Schema{
"display_name": {
Type: schema.TypeString,
Optional: true,
ForceNew: false,
},
"policy": &schema.Schema{
"policy": {
Type: schema.TypeString,
Optional: true,
Computed: true,
@ -72,7 +70,7 @@ func resourceAwsSnsTopic() *schema.Resource {
return json
},
},
"delivery_policy": &schema.Schema{
"delivery_policy": {
Type: schema.TypeString,
Optional: true,
ForceNew: false,
@ -135,7 +133,7 @@ func resourceAwsSnsTopic() *schema.Resource {
Type: schema.TypeString,
Optional: true,
},
"arn": &schema.Schema{
"arn": {
Type: schema.TypeString,
Computed: true,
},
@ -168,37 +166,18 @@ func resourceAwsSnsTopicCreate(d *schema.ResourceData, meta interface{}) error {
d.SetId(*output.TopicArn)
// Write the ARN to the 'arn' field for export
d.Set("arn", *output.TopicArn)
return resourceAwsSnsTopicUpdate(d, meta)
}
func resourceAwsSnsTopicUpdate(d *schema.ResourceData, meta interface{}) error {
r := *resourceAwsSnsTopic()
conn := meta.(*AWSClient).snsconn
for k, _ := range r.Schema {
if attrKey, ok := SNSAttributeMap[k]; ok {
if d.HasChange(k) {
log.Printf("[DEBUG] Updating %s", attrKey)
_, n := d.GetChange(k)
// Ignore an empty policy
if !(k == "policy" && n == "") {
// Make API call to update attributes
req := sns.SetTopicAttributesInput{
TopicArn: aws.String(d.Id()),
AttributeName: aws.String(attrKey),
AttributeValue: aws.String(fmt.Sprintf("%v", n)),
}
conn := meta.(*AWSClient).snsconn
// Retry the update in the event of an eventually consistent style of
// error, where say an IAM resource is successfully created but not
// actually available. See https://github.com/hashicorp/terraform/issues/3660
_, err := retryOnAwsCode("InvalidParameter", func() (interface{}, error) {
return conn.SetTopicAttributes(&req)
})
return err
}
for terraformAttrName, snsAttrName := range SNSAttributeMap {
if d.HasChange(terraformAttrName) {
_, terraformAttrValue := d.GetChange(terraformAttrName)
err := updateAwsSnsTopicAttribute(d.Id(), snsAttrName, terraformAttrValue, conn)
if err != nil {
return err
}
}
}
@ -209,11 +188,12 @@ func resourceAwsSnsTopicUpdate(d *schema.ResourceData, meta interface{}) error {
func resourceAwsSnsTopicRead(d *schema.ResourceData, meta interface{}) error {
snsconn := meta.(*AWSClient).snsconn
log.Printf("[DEBUG] Reading SNS Topic Attributes for %s", d.Id())
attributeOutput, err := snsconn.GetTopicAttributes(&sns.GetTopicAttributesInput{
TopicArn: aws.String(d.Id()),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFound" {
if isAWSErr(err, sns.ErrCodeNotFoundException, "") {
log.Printf("[WARN] SNS Topic (%s) not found, error code (404)", d.Id())
d.SetId("")
return nil
@ -224,28 +204,12 @@ func resourceAwsSnsTopicRead(d *schema.ResourceData, meta interface{}) error {
if attributeOutput.Attributes != nil && len(attributeOutput.Attributes) > 0 {
attrmap := attributeOutput.Attributes
resource := *resourceAwsSnsTopic()
// iKey = internal struct key, oKey = AWS Attribute Map key
for iKey, oKey := range SNSAttributeMap {
log.Printf("[DEBUG] Reading %s => %s", iKey, oKey)
if attrmap[oKey] != nil {
// Some of the fetched attributes are stateful properties such as
// the number of subscriptions, the owner, etc. skip those
if resource.Schema[iKey] != nil {
var value string
if iKey == "policy" {
value, err = structure.NormalizeJsonString(*attrmap[oKey])
if err != nil {
return errwrap.Wrapf("policy contains an invalid JSON: {{err}}", err)
}
} else {
value = *attrmap[oKey]
}
log.Printf("[DEBUG] Reading %s => %s -> %s", iKey, oKey, value)
d.Set(iKey, value)
}
}
for terraformAttrName, snsAttrName := range SNSAttributeMap {
d.Set(terraformAttrName, attrmap[snsAttrName])
}
} else {
for terraformAttrName, _ := range SNSAttributeMap {
d.Set(terraformAttrName, "")
}
}
@ -275,3 +239,29 @@ func resourceAwsSnsTopicDelete(d *schema.ResourceData, meta interface{}) error {
}
return nil
}
func updateAwsSnsTopicAttribute(topicArn, name string, value interface{}, conn *sns.SNS) error {
// Ignore an empty policy
if name == "Policy" && value == "" {
return nil
}
log.Printf("[DEBUG] Updating SNS Topic Attribute: %s", name)
// Make API call to update attributes
req := sns.SetTopicAttributesInput{
TopicArn: aws.String(topicArn),
AttributeName: aws.String(name),
AttributeValue: aws.String(fmt.Sprintf("%v", value)),
}
// Retry the update in the event of an eventually consistent style of
// error, where say an IAM resource is successfully created but not
// actually available. See https://github.com/hashicorp/terraform/issues/3660
_, err := retryOnAwsCode(sns.ErrCodeInvalidParameterException, func() (interface{}, error) {
return conn.SetTopicAttributes(&req)
})
if err != nil {
return err
}
return nil
}

View File

@ -31,9 +31,10 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
Schema: map[string]*schema.Schema{
"iam_fleet_role": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateArn,
},
"replace_unhealthy_instances": {
Type: schema.TypeBool,

View File

@ -21,7 +21,7 @@ func resourceAwsSpotInstanceRequest() *schema.Resource {
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(10 * time.Minute),
Delete: schema.DefaultTimeout(10 * time.Minute),
Delete: schema.DefaultTimeout(20 * time.Minute),
},
Schema: func() map[string]*schema.Schema {

View File

@ -53,6 +53,7 @@ func resourceAwsSqsQueue() *schema.Resource {
ForceNew: true,
Computed: true,
ConflictsWith: []string{"name_prefix"},
ValidateFunc: validateSQSQueueName,
},
"name_prefix": {
Type: schema.TypeString,
@ -149,7 +150,7 @@ func resourceAwsSqsQueueCreate(d *schema.ResourceData, meta interface{}) error {
return fmt.Errorf("Error validating the FIFO queue name: %v", errors)
}
} else {
if errors := validateSQSQueueName(name, "name"); len(errors) > 0 {
if errors := validateSQSNonFifoQueueName(name, "name"); len(errors) > 0 {
return fmt.Errorf("Error validating SQS queue name: %v", errors)
}
}
@ -311,13 +312,17 @@ func resourceAwsSqsQueueRead(d *schema.ResourceData, meta interface{}) error {
d.Set("fifo_queue", d.Get("fifo_queue").(bool))
d.Set("content_based_deduplication", d.Get("content_based_deduplication").(bool))
listTagsOutput, err := sqsconn.ListQueueTags(&sqs.ListQueueTagsInput{
QueueUrl: aws.String(d.Id()),
})
if err != nil {
return err
tags := make(map[string]string)
if !meta.(*AWSClient).IsGovCloud() {
listTagsOutput, err := sqsconn.ListQueueTags(&sqs.ListQueueTagsInput{
QueueUrl: aws.String(d.Id()),
})
if err != nil {
return err
}
tags = tagsToMapGeneric(listTagsOutput.Tags)
}
d.Set("tags", tagsToMapGeneric(listTagsOutput.Tags))
d.Set("tags", tags)
return nil
}

View File

@ -73,7 +73,6 @@ func resourceAwsSsmAssociation() *schema.Resource {
"targets": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Computed: true,
MaxItems: 5,
Elem: &schema.Resource{
@ -213,6 +212,10 @@ func resourceAwsSsmAssocationUpdate(d *schema.ResourceData, meta interface{}) er
associationInput.OutputLocation = expandSSMAssociationOutputLocation(d.Get("output_location").([]interface{}))
}
if d.HasChange("targets") {
associationInput.Targets = expandAwsSsmTargets(d)
}
_, err := ssmconn.UpdateAssociation(associationInput)
if err != nil {
return errwrap.Wrapf("[ERROR] Error updating SSM association: {{err}}", err)

View File

@ -82,7 +82,7 @@ func resourceAwsVolumeAttachmentCreate(d *schema.ResourceData, meta interface{})
stateConf := &resource.StateChangeConf{
Pending: []string{"pending", "stopping"},
Target: []string{"running", "stopped"},
Refresh: InstanceStateRefreshFunc(conn, iID, "terminated"),
Refresh: InstanceStateRefreshFunc(conn, iID, []string{"terminated"}),
Timeout: 10 * time.Minute,
Delay: 10 * time.Second,
MinTimeout: 3 * time.Second,

View File

@ -257,7 +257,7 @@ func resourceAwsVpcEndpointDelete(d *schema.ResourceData, meta interface{}) erro
}
stateConf := &resource.StateChangeConf{
Pending: []string{"available", "deleting"},
Pending: []string{"available", "pending", "deleting"},
Target: []string{"deleted"},
Refresh: vpcEndpointStateRefresh(conn, d.Id()),
Timeout: 10 * time.Minute,

View File

@ -109,7 +109,7 @@ func resourceAwsVPCPeeringRead(d *schema.ResourceData, meta interface{}) error {
pcRaw, status, err := resourceAwsVPCPeeringConnectionStateRefreshFunc(conn, d.Id())()
// Allow a failed VPC Peering Connection to fallthrough,
// to allow rest of the logic below to do its work.
if err != nil && status != "failed" {
if err != nil && status != ec2.VpcPeeringConnectionStateReasonCodeFailed {
return err
}
@ -125,11 +125,11 @@ func resourceAwsVPCPeeringRead(d *schema.ResourceData, meta interface{}) error {
// just "falls off" the console. See GH-2322
if pc.Status != nil {
status := map[string]bool{
"deleted": true,
"deleting": true,
"expired": true,
"failed": true,
"rejected": true,
ec2.VpcPeeringConnectionStateReasonCodeDeleted: true,
ec2.VpcPeeringConnectionStateReasonCodeDeleting: true,
ec2.VpcPeeringConnectionStateReasonCodeExpired: true,
ec2.VpcPeeringConnectionStateReasonCodeFailed: true,
ec2.VpcPeeringConnectionStateReasonCodeRejected: true,
}
if _, ok := status[*pc.Status.Code]; ok {
log.Printf("[DEBUG] VPC Peering Connection (%s) in state (%s), removing.",
@ -249,7 +249,7 @@ func resourceAwsVPCPeeringUpdate(d *schema.ResourceData, meta interface{}) error
pc := pcRaw.(*ec2.VpcPeeringConnection)
if _, ok := d.GetOk("auto_accept"); ok {
if pc.Status != nil && *pc.Status.Code == "pending-acceptance" {
if pc.Status != nil && *pc.Status.Code == ec2.VpcPeeringConnectionStateReasonCodePendingAcceptance {
status, err := resourceVPCPeeringConnectionAccept(conn, d.Id())
if err != nil {
return errwrap.Wrapf("Unable to accept VPC Peering Connection: {{err}}", err)
@ -290,8 +290,14 @@ func resourceAwsVPCPeeringDelete(d *schema.ResourceData, meta interface{}) error
// Wait for the vpc peering connection to become available
log.Printf("[DEBUG] Waiting for VPC Peering Connection (%s) to delete.", d.Id())
stateConf := &resource.StateChangeConf{
Pending: []string{"deleting"},
Target: []string{"rejecting", "deleted"},
Pending: []string{
ec2.VpcPeeringConnectionStateReasonCodePendingAcceptance,
ec2.VpcPeeringConnectionStateReasonCodeDeleting,
},
Target: []string{
ec2.VpcPeeringConnectionStateReasonCodeRejected,
ec2.VpcPeeringConnectionStateReasonCodeDeleted,
},
Refresh: resourceAwsVPCPeeringConnectionStateRefreshFunc(conn, d.Id()),
Timeout: 1 * time.Minute,
}
@ -408,8 +414,14 @@ func checkVpcPeeringConnectionAvailable(conn *ec2.EC2, id string) error {
// Wait for the vpc peering connection to become available
log.Printf("[DEBUG] Waiting for VPC Peering Connection (%s) to become available.", id)
stateConf := &resource.StateChangeConf{
Pending: []string{"initiating-request", "provisioning", "pending"},
Target: []string{"pending-acceptance", "active"},
Pending: []string{
ec2.VpcPeeringConnectionStateReasonCodeInitiatingRequest,
ec2.VpcPeeringConnectionStateReasonCodeProvisioning,
},
Target: []string{
ec2.VpcPeeringConnectionStateReasonCodePendingAcceptance,
ec2.VpcPeeringConnectionStateReasonCodeActive,
},
Refresh: resourceAwsVPCPeeringConnectionStateRefreshFunc(conn, id),
Timeout: 1 * time.Minute,
}

View File

@ -19,6 +19,7 @@ import (
"github.com/aws/aws-sdk-go/service/cognitoidentity"
"github.com/aws/aws-sdk-go/service/cognitoidentityprovider"
"github.com/aws/aws-sdk-go/service/configservice"
"github.com/aws/aws-sdk-go/service/dax"
"github.com/aws/aws-sdk-go/service/directoryservice"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/ec2"
@ -854,6 +855,16 @@ func flattenElastiCacheSecurityGroupIds(securityGroups []*elasticache.SecurityGr
return result
}
func flattenDaxSecurityGroupIds(securityGroups []*dax.SecurityGroupMembership) []string {
result := make([]string, 0, len(securityGroups))
for _, sg := range securityGroups {
if sg.SecurityGroupIdentifier != nil {
result = append(result, *sg.SecurityGroupIdentifier)
}
}
return result
}
// Flattens step adjustments into a list of map[string]interface.
func flattenStepAdjustments(adjustments []*autoscaling.StepAdjustment) []map[string]interface{} {
result := make([]map[string]interface{}, 0, len(adjustments))
@ -3667,3 +3678,30 @@ func flattenDynamoDbTableItemAttributes(attrs map[string]*dynamodb.AttributeValu
return rawBuffer.String(), nil
}
func expandIotThingTypeProperties(config map[string]interface{}) *iot.ThingTypeProperties {
properties := &iot.ThingTypeProperties{
SearchableAttributes: expandStringList(config["searchable_attributes"].(*schema.Set).List()),
}
if v, ok := config["description"]; ok && v.(string) != "" {
properties.ThingTypeDescription = aws.String(v.(string))
}
return properties
}
func flattenIotThingTypeProperties(s *iot.ThingTypeProperties) []map[string]interface{} {
m := map[string]interface{}{}
if s == nil {
return nil
}
if s.ThingTypeDescription != nil {
m["description"] = *s.ThingTypeDescription
}
m["searchable_attributes"] = flattenStringList(s.SearchableAttributes)
return []map[string]interface{}{m}
}

View File

@ -0,0 +1,115 @@
package aws
import (
"log"
"regexp"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dax"
"github.com/hashicorp/terraform/helper/schema"
)
// setTags is a helper to set the tags for a resource. It expects the
// tags field to be named "tags"
func setTagsDax(conn *dax.DAX, d *schema.ResourceData, arn string) error {
if d.HasChange("tags") {
oraw, nraw := d.GetChange("tags")
o := oraw.(map[string]interface{})
n := nraw.(map[string]interface{})
create, remove := diffTagsDax(tagsFromMapDax(o), tagsFromMapDax(n))
// Set tags
if len(remove) > 0 {
log.Printf("[DEBUG] Removing tags: %#v", remove)
k := make([]*string, len(remove), len(remove))
for i, t := range remove {
k[i] = t.Key
}
_, err := conn.UntagResource(&dax.UntagResourceInput{
ResourceName: aws.String(arn),
TagKeys: k,
})
if err != nil {
return err
}
}
if len(create) > 0 {
log.Printf("[DEBUG] Creating tags: %#v", create)
_, err := conn.TagResource(&dax.TagResourceInput{
ResourceName: aws.String(arn),
Tags: create,
})
if err != nil {
return err
}
}
}
return nil
}
// diffTags takes our tags locally and the ones remotely and returns
// the set of tags that must be created, and the set of tags that must
// be destroyed.
func diffTagsDax(oldTags, newTags []*dax.Tag) ([]*dax.Tag, []*dax.Tag) {
// First, we're creating everything we have
create := make(map[string]interface{})
for _, t := range newTags {
create[*t.Key] = *t.Value
}
// Build the list of what to remove
var remove []*dax.Tag
for _, t := range oldTags {
old, ok := create[*t.Key]
if !ok || old != *t.Value {
// Delete it!
remove = append(remove, t)
}
}
return tagsFromMapDax(create), remove
}
// tagsFromMap returns the tags for the given map of data.
func tagsFromMapDax(m map[string]interface{}) []*dax.Tag {
result := make([]*dax.Tag, 0, len(m))
for k, v := range m {
t := &dax.Tag{
Key: aws.String(k),
Value: aws.String(v.(string)),
}
if !tagIgnoredDax(t) {
result = append(result, t)
}
}
return result
}
// tagsToMap turns the list of tags into a map.
func tagsToMapDax(ts []*dax.Tag) map[string]string {
result := make(map[string]string)
for _, t := range ts {
if !tagIgnoredDax(t) {
result[*t.Key] = *t.Value
}
}
return result
}
// compare a tag against a list of strings and checks if it should
// be ignored or not
func tagIgnoredDax(t *dax.Tag) bool {
filter := []string{"^aws:"}
for _, v := range filter {
log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key)
if r, _ := regexp.MatchString(v, *t.Key); r == true {
log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value)
return true
}
}
return false
}

View File

@ -0,0 +1,137 @@
package aws
import (
"log"
"regexp"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/directconnect"
"github.com/hashicorp/terraform/helper/schema"
)
// getTags is a helper to get the tags for a resource. It expects the
// tags field to be named "tags"
func getTagsDX(conn *directconnect.DirectConnect, d *schema.ResourceData, arn string) error {
resp, err := conn.DescribeTags(&directconnect.DescribeTagsInput{
ResourceArns: aws.StringSlice([]string{arn}),
})
if err != nil {
return err
}
var tags []*directconnect.Tag
if len(resp.ResourceTags) == 1 && aws.StringValue(resp.ResourceTags[0].ResourceArn) == arn {
tags = resp.ResourceTags[0].Tags
}
if err := d.Set("tags", tagsToMapDX(tags)); err != nil {
return err
}
return nil
}
// setTags is a helper to set the tags for a resource. It expects the
// tags field to be named "tags"
func setTagsDX(conn *directconnect.DirectConnect, d *schema.ResourceData, arn string) error {
if d.HasChange("tags") {
oraw, nraw := d.GetChange("tags")
o := oraw.(map[string]interface{})
n := nraw.(map[string]interface{})
create, remove := diffTagsDX(tagsFromMapDX(o), tagsFromMapDX(n))
// Set tags
if len(remove) > 0 {
log.Printf("[DEBUG] Removing tags: %#v", remove)
k := make([]*string, len(remove), len(remove))
for i, t := range remove {
k[i] = t.Key
}
_, err := conn.UntagResource(&directconnect.UntagResourceInput{
ResourceArn: aws.String(arn),
TagKeys: k,
})
if err != nil {
return err
}
}
if len(create) > 0 {
log.Printf("[DEBUG] Creating tags: %#v", create)
_, err := conn.TagResource(&directconnect.TagResourceInput{
ResourceArn: aws.String(arn),
Tags: create,
})
if err != nil {
return err
}
}
}
return nil
}
// diffTags takes our tags locally and the ones remotely and returns
// the set of tags that must be created, and the set of tags that must
// be destroyed.
func diffTagsDX(oldTags, newTags []*directconnect.Tag) ([]*directconnect.Tag, []*directconnect.Tag) {
// First, we're creating everything we have
create := make(map[string]interface{})
for _, t := range newTags {
create[aws.StringValue(t.Key)] = aws.StringValue(t.Value)
}
// Build the list of what to remove
var remove []*directconnect.Tag
for _, t := range oldTags {
old, ok := create[aws.StringValue(t.Key)]
if !ok || old != aws.StringValue(t.Value) {
// Delete it!
remove = append(remove, t)
}
}
return tagsFromMapDX(create), remove
}
// tagsFromMap returns the tags for the given map of data.
func tagsFromMapDX(m map[string]interface{}) []*directconnect.Tag {
result := make([]*directconnect.Tag, 0, len(m))
for k, v := range m {
t := &directconnect.Tag{
Key: aws.String(k),
Value: aws.String(v.(string)),
}
if !tagIgnoredDX(t) {
result = append(result, t)
}
}
return result
}
// tagsToMap turns the list of tags into a map.
func tagsToMapDX(ts []*directconnect.Tag) map[string]string {
result := make(map[string]string)
for _, t := range ts {
if !tagIgnoredDX(t) {
result[aws.StringValue(t.Key)] = aws.StringValue(t.Value)
}
}
return result
}
// compare a tag against a list of strings and checks if it should
// be ignored or not
func tagIgnoredDX(t *directconnect.Tag) bool {
filter := []string{"^aws:"}
for _, v := range filter {
log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key)
if r, _ := regexp.MatchString(v, *t.Key); r == true {
log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value)
return true
}
}
return false
}

View File

@ -24,9 +24,10 @@ import (
func validateInstanceUserDataSize(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
length := len(value)
if len(value) > 16384 {
errors = append(errors, fmt.Errorf("%q cannot be longer than 16384 bytes", k))
if length > 16384 {
errors = append(errors, fmt.Errorf("%q is %d bytes, cannot be longer than 16384 bytes", k, length))
}
return
}
@ -80,8 +81,8 @@ func validateRdsEngine(v interface{}, k string) (ws []string, errors []error) {
if _, ok := validTypes[value]; !ok {
errors = append(errors, fmt.Errorf(
"%q contains an invalid engine type %q. Valid types are either %q or %q.",
k, value, "aurora", "aurora-postgresql"))
"%q contains an invalid engine type %q. Valid types are either %q, %q or %q.",
k, value, "aurora", "aurora-mysql", "aurora-postgresql"))
}
return
}
@ -135,9 +136,9 @@ func validateTagFilters(v interface{}, k string) (ws []string, errors []error) {
func validateDbParamGroupName(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(`^[0-9a-z-_]+$`).MatchString(value) {
if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"only lowercase alphanumeric characters, underscores and hyphens allowed in %q", k))
"only lowercase alphanumeric characters and hyphens allowed in %q", k))
}
if !regexp.MustCompile(`^[a-z]`).MatchString(value) {
errors = append(errors, fmt.Errorf(
@ -147,10 +148,6 @@ func validateDbParamGroupName(v interface{}, k string) (ws []string, errors []er
errors = append(errors, fmt.Errorf(
"%q cannot contain two consecutive hyphens", k))
}
if regexp.MustCompile(`__`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q cannot contain two consecutive underscores", k))
}
if regexp.MustCompile(`-$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q cannot end with a hyphen", k))
@ -790,7 +787,19 @@ func validateApiGatewayIntegrationContentHandling(v interface{}, k string) (ws [
return
}
func validateSQSQueueName(v interface{}, k string) (errors []error) {
func validateSQSQueueName(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if len(value) > 80 {
errors = append(errors, fmt.Errorf("%q cannot be longer than 80 characters", k))
}
if !regexp.MustCompile(`^[0-9A-Za-z-_]+(\.fifo)?$`).MatchString(value) {
errors = append(errors, fmt.Errorf("only alphanumeric characters and hyphens allowed in %q", k))
}
return
}
func validateSQSNonFifoQueueName(v interface{}, k string) (errors []error) {
value := v.(string)
if len(value) > 80 {
errors = append(errors, fmt.Errorf("%q cannot be longer than 80 characters", k))
@ -1878,8 +1887,9 @@ func validateSecurityGroupRuleDescription(v interface{}, k string) (ws []string,
"%q cannot be longer than 255 characters: %q", k, value))
}
// https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IpRange.html
pattern := `^[A-Za-z0-9 \.\_\-\:\/\(\)\#\,\@\[\]\+\=\;\{\}\!\$\*]+$`
// https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IpRange.html. Note that
// "" is an allowable description value.
pattern := `^[A-Za-z0-9 \.\_\-\:\/\(\)\#\,\@\[\]\+\=\;\{\}\!\$\*]*$`
if !regexp.MustCompile(pattern).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q doesn't comply with restrictions (%q): %q",
@ -2147,30 +2157,6 @@ func validateAwsElastiCacheReplicationGroupAuthToken(v interface{}, k string) (w
return
}
func validateServiceDiscoveryServiceDnsRecordsType(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
validType := []string{"SRV", "A", "AAAA"}
for _, str := range validType {
if value == str {
return
}
}
errors = append(errors, fmt.Errorf("expected %s to be one of %v, got %s", k, validType, value))
return
}
func validateServiceDiscoveryServiceHealthCheckConfigType(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
validType := []string{"HTTP", "HTTPS", "TCP"}
for _, str := range validType {
if value == str {
return
}
}
errors = append(errors, fmt.Errorf("expected %s to be one of %v, got %s", k, validType, value))
return
}
func validateGameliftOperatingSystem(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
operatingSystems := map[string]bool{
@ -2271,3 +2257,38 @@ func validateAmazonSideAsn(v interface{}, k string) (ws []string, errors []error
}
return
}
func validateIotThingTypeName(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(`[a-zA-Z0-9:_-]+`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"only alphanumeric characters, colons, underscores and hyphens allowed in %q", k))
}
return
}
func validateIotThingTypeDescription(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if len(value) > 2028 {
errors = append(errors, fmt.Errorf(
"%q cannot be longer than 2028 characters", k))
}
if !regexp.MustCompile(`[\\p{Graph}\\x20]*`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q must match pattern [\\p{Graph}\\x20]*", k))
}
return
}
func validateIotThingTypeSearchableAttribute(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if len(value) > 128 {
errors = append(errors, fmt.Errorf(
"%q cannot be longer than 128 characters", k))
}
if !regexp.MustCompile(`[a-zA-Z0-9_.,@/:#-]+`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"only alphanumeric characters, underscores, dots, commas, arobases, slashes, colons, hashes and hyphens allowed in %q", k))
}
return
}

804
vendor/vendor.json vendored

File diff suppressed because it is too large Load Diff