Merge branch 'master' of github.com:hashicorp/terraform
This commit is contained in:
commit
313ec1252c
|
@ -18,10 +18,12 @@ IMPROVEMENTS
|
||||||
* provider/aws: Add support for Enhanced monitoring to `aws_rds_cluster_instance` [GH-8038]
|
* provider/aws: Add support for Enhanced monitoring to `aws_rds_cluster_instance` [GH-8038]
|
||||||
* provider/aws: Add ability to set Requests Payer in `aws_s3_bucket` [GH-8065]
|
* provider/aws: Add ability to set Requests Payer in `aws_s3_bucket` [GH-8065]
|
||||||
* provider/aws: Add ability to set canned ACL in `aws_s3_bucket_object` [GH-8091]
|
* provider/aws: Add ability to set canned ACL in `aws_s3_bucket_object` [GH-8091]
|
||||||
|
* provider/aws: Allow skipping credentials validation, requesting Account ID and/or metadata API check [GH-7874]
|
||||||
* provider/azurerm: Adds support for uploading blobs to azure storage from local source [GH-7994]
|
* provider/azurerm: Adds support for uploading blobs to azure storage from local source [GH-7994]
|
||||||
* provider/google: allows atomic Cloud DNS record changes [GH-6575]
|
* provider/google: allows atomic Cloud DNS record changes [GH-6575]
|
||||||
* provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472]
|
* provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472]
|
||||||
* provider/google: Support static private IP addresses in `resource_compute_instance` [GH-6310]
|
* provider/google: Support static private IP addresses in `resource_compute_instance` [GH-6310]
|
||||||
|
* provider/openstack: Support pdating the External Gateway assigned to a Neutron router [GH-8070]
|
||||||
* provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908]
|
* provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908]
|
||||||
* provider/vsphere: Adding disk type of `Thick Lazy` to `vsphere_virtual_disk` and `vsphere_virtual_machine` [GH-7916]
|
* provider/vsphere: Adding disk type of `Thick Lazy` to `vsphere_virtual_disk` and `vsphere_virtual_machine` [GH-7916]
|
||||||
|
|
||||||
|
|
|
@ -86,18 +86,18 @@ func parseAccountIdFromArn(arn string) (string, error) {
|
||||||
// This function is responsible for reading credentials from the
|
// This function is responsible for reading credentials from the
|
||||||
// environment in the case that they're not explicitly specified
|
// environment in the case that they're not explicitly specified
|
||||||
// in the Terraform configuration.
|
// in the Terraform configuration.
|
||||||
func GetCredentials(key, secret, token, profile, credsfile string) *awsCredentials.Credentials {
|
func GetCredentials(c *Config) *awsCredentials.Credentials {
|
||||||
// build a chain provider, lazy-evaulated by aws-sdk
|
// build a chain provider, lazy-evaulated by aws-sdk
|
||||||
providers := []awsCredentials.Provider{
|
providers := []awsCredentials.Provider{
|
||||||
&awsCredentials.StaticProvider{Value: awsCredentials.Value{
|
&awsCredentials.StaticProvider{Value: awsCredentials.Value{
|
||||||
AccessKeyID: key,
|
AccessKeyID: c.AccessKey,
|
||||||
SecretAccessKey: secret,
|
SecretAccessKey: c.SecretKey,
|
||||||
SessionToken: token,
|
SessionToken: c.Token,
|
||||||
}},
|
}},
|
||||||
&awsCredentials.EnvProvider{},
|
&awsCredentials.EnvProvider{},
|
||||||
&awsCredentials.SharedCredentialsProvider{
|
&awsCredentials.SharedCredentialsProvider{
|
||||||
Filename: credsfile,
|
Filename: c.CredsFilename,
|
||||||
Profile: profile,
|
Profile: c.Profile,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,19 +114,21 @@ func GetCredentials(key, secret, token, profile, credsfile string) *awsCredentia
|
||||||
// Real AWS should reply to a simple metadata request.
|
// Real AWS should reply to a simple metadata request.
|
||||||
// We check it actually does to ensure something else didn't just
|
// We check it actually does to ensure something else didn't just
|
||||||
// happen to be listening on the same IP:Port
|
// happen to be listening on the same IP:Port
|
||||||
metadataClient := ec2metadata.New(session.New(cfg))
|
if c.SkipMetadataApiCheck == false {
|
||||||
if metadataClient.Available() {
|
metadataClient := ec2metadata.New(session.New(cfg))
|
||||||
providers = append(providers, &ec2rolecreds.EC2RoleProvider{
|
if metadataClient.Available() {
|
||||||
Client: metadataClient,
|
providers = append(providers, &ec2rolecreds.EC2RoleProvider{
|
||||||
})
|
Client: metadataClient,
|
||||||
log.Printf("[INFO] AWS EC2 instance detected via default metadata" +
|
})
|
||||||
" API endpoint, EC2RoleProvider added to the auth chain")
|
log.Printf("[INFO] AWS EC2 instance detected via default metadata" +
|
||||||
} else {
|
" API endpoint, EC2RoleProvider added to the auth chain")
|
||||||
if usedEndpoint == "" {
|
} else {
|
||||||
usedEndpoint = "default location"
|
if usedEndpoint == "" {
|
||||||
|
usedEndpoint = "default location"
|
||||||
|
}
|
||||||
|
log.Printf("[WARN] Ignoring AWS metadata API endpoint at %s "+
|
||||||
|
"as it doesn't return any instance-id", usedEndpoint)
|
||||||
}
|
}
|
||||||
log.Printf("[WARN] Ignoring AWS metadata API endpoint at %s "+
|
|
||||||
"as it doesn't return any instance-id", usedEndpoint)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return awsCredentials.NewChainCredentials(providers)
|
return awsCredentials.NewChainCredentials(providers)
|
||||||
|
|
|
@ -218,7 +218,7 @@ func TestAWSGetCredentials_shouldError(t *testing.T) {
|
||||||
defer resetEnv()
|
defer resetEnv()
|
||||||
cfg := Config{}
|
cfg := Config{}
|
||||||
|
|
||||||
c := GetCredentials(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename)
|
c := GetCredentials(&cfg)
|
||||||
_, err := c.Get()
|
_, err := c.Get()
|
||||||
if awsErr, ok := err.(awserr.Error); ok {
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
if awsErr.Code() != "NoCredentialProviders" {
|
if awsErr.Code() != "NoCredentialProviders" {
|
||||||
|
@ -251,7 +251,7 @@ func TestAWSGetCredentials_shouldBeStatic(t *testing.T) {
|
||||||
Token: c.Token,
|
Token: c.Token,
|
||||||
}
|
}
|
||||||
|
|
||||||
creds := GetCredentials(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename)
|
creds := GetCredentials(&cfg)
|
||||||
if creds == nil {
|
if creds == nil {
|
||||||
t.Fatalf("Expected a static creds provider to be returned")
|
t.Fatalf("Expected a static creds provider to be returned")
|
||||||
}
|
}
|
||||||
|
@ -286,7 +286,7 @@ func TestAWSGetCredentials_shouldIAM(t *testing.T) {
|
||||||
// An empty config, no key supplied
|
// An empty config, no key supplied
|
||||||
cfg := Config{}
|
cfg := Config{}
|
||||||
|
|
||||||
creds := GetCredentials(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename)
|
creds := GetCredentials(&cfg)
|
||||||
if creds == nil {
|
if creds == nil {
|
||||||
t.Fatalf("Expected a static creds provider to be returned")
|
t.Fatalf("Expected a static creds provider to be returned")
|
||||||
}
|
}
|
||||||
|
@ -335,7 +335,7 @@ func TestAWSGetCredentials_shouldIgnoreIAM(t *testing.T) {
|
||||||
Token: c.Token,
|
Token: c.Token,
|
||||||
}
|
}
|
||||||
|
|
||||||
creds := GetCredentials(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename)
|
creds := GetCredentials(&cfg)
|
||||||
if creds == nil {
|
if creds == nil {
|
||||||
t.Fatalf("Expected a static creds provider to be returned")
|
t.Fatalf("Expected a static creds provider to be returned")
|
||||||
}
|
}
|
||||||
|
@ -362,7 +362,7 @@ func TestAWSGetCredentials_shouldErrorWithInvalidEndpoint(t *testing.T) {
|
||||||
ts := invalidAwsEnv(t)
|
ts := invalidAwsEnv(t)
|
||||||
defer ts()
|
defer ts()
|
||||||
|
|
||||||
creds := GetCredentials("", "", "", "", "")
|
creds := GetCredentials(&Config{})
|
||||||
v, err := creds.Get()
|
v, err := creds.Get()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Expected error returned when getting creds w/ invalid EC2 endpoint")
|
t.Fatal("Expected error returned when getting creds w/ invalid EC2 endpoint")
|
||||||
|
@ -380,7 +380,7 @@ func TestAWSGetCredentials_shouldIgnoreInvalidEndpoint(t *testing.T) {
|
||||||
ts := invalidAwsEnv(t)
|
ts := invalidAwsEnv(t)
|
||||||
defer ts()
|
defer ts()
|
||||||
|
|
||||||
creds := GetCredentials("accessKey", "secretKey", "", "", "")
|
creds := GetCredentials(&Config{AccessKey: "accessKey", SecretKey: "secretKey"})
|
||||||
v, err := creds.Get()
|
v, err := creds.Get()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Getting static credentials w/ invalid EC2 endpoint failed: %s", err)
|
t.Fatalf("Getting static credentials w/ invalid EC2 endpoint failed: %s", err)
|
||||||
|
@ -406,7 +406,7 @@ func TestAWSGetCredentials_shouldCatchEC2RoleProvider(t *testing.T) {
|
||||||
ts := awsEnv(t)
|
ts := awsEnv(t)
|
||||||
defer ts()
|
defer ts()
|
||||||
|
|
||||||
creds := GetCredentials("", "", "", "", "")
|
creds := GetCredentials(&Config{})
|
||||||
if creds == nil {
|
if creds == nil {
|
||||||
t.Fatalf("Expected an EC2Role creds provider to be returned")
|
t.Fatalf("Expected an EC2Role creds provider to be returned")
|
||||||
}
|
}
|
||||||
|
@ -452,7 +452,7 @@ func TestAWSGetCredentials_shouldBeShared(t *testing.T) {
|
||||||
t.Fatalf("Error resetting env var AWS_SHARED_CREDENTIALS_FILE: %s", err)
|
t.Fatalf("Error resetting env var AWS_SHARED_CREDENTIALS_FILE: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
creds := GetCredentials("", "", "", "myprofile", file.Name())
|
creds := GetCredentials(&Config{Profile: "myprofile", CredsFilename: file.Name()})
|
||||||
if creds == nil {
|
if creds == nil {
|
||||||
t.Fatalf("Expected a provider chain to be returned")
|
t.Fatalf("Expected a provider chain to be returned")
|
||||||
}
|
}
|
||||||
|
@ -479,7 +479,7 @@ func TestAWSGetCredentials_shouldBeENV(t *testing.T) {
|
||||||
defer resetEnv()
|
defer resetEnv()
|
||||||
|
|
||||||
cfg := Config{}
|
cfg := Config{}
|
||||||
creds := GetCredentials(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename)
|
creds := GetCredentials(&cfg)
|
||||||
if creds == nil {
|
if creds == nil {
|
||||||
t.Fatalf("Expected a static creds provider to be returned")
|
t.Fatalf("Expected a static creds provider to be returned")
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,12 +70,16 @@ type Config struct {
|
||||||
AllowedAccountIds []interface{}
|
AllowedAccountIds []interface{}
|
||||||
ForbiddenAccountIds []interface{}
|
ForbiddenAccountIds []interface{}
|
||||||
|
|
||||||
DynamoDBEndpoint string
|
DynamoDBEndpoint string
|
||||||
KinesisEndpoint string
|
KinesisEndpoint string
|
||||||
Ec2Endpoint string
|
Ec2Endpoint string
|
||||||
IamEndpoint string
|
IamEndpoint string
|
||||||
ElbEndpoint string
|
ElbEndpoint string
|
||||||
Insecure bool
|
S3Endpoint string
|
||||||
|
Insecure bool
|
||||||
|
SkipIamCredsValidation bool
|
||||||
|
SkipIamAccountId bool
|
||||||
|
SkipMetadataApiCheck bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type AWSClient struct {
|
type AWSClient struct {
|
||||||
|
@ -141,7 +145,7 @@ func (c *Config) Client() (interface{}, error) {
|
||||||
client.region = c.Region
|
client.region = c.Region
|
||||||
|
|
||||||
log.Println("[INFO] Building AWS auth structure")
|
log.Println("[INFO] Building AWS auth structure")
|
||||||
creds := GetCredentials(c.AccessKey, c.SecretKey, c.Token, c.Profile, c.CredsFilename)
|
creds := GetCredentials(c)
|
||||||
// Call Get to check for credential provider. If nothing found, we'll get an
|
// Call Get to check for credential provider. If nothing found, we'll get an
|
||||||
// error, and we can present it nicely to the user
|
// error, and we can present it nicely to the user
|
||||||
cp, err := creds.Get()
|
cp, err := creds.Get()
|
||||||
|
@ -199,19 +203,24 @@ func (c *Config) Client() (interface{}, error) {
|
||||||
client.iamconn = iam.New(awsIamSess)
|
client.iamconn = iam.New(awsIamSess)
|
||||||
client.stsconn = sts.New(sess)
|
client.stsconn = sts.New(sess)
|
||||||
|
|
||||||
err = c.ValidateCredentials(client.stsconn)
|
if c.SkipIamCredsValidation == false {
|
||||||
if err != nil {
|
err = c.ValidateCredentials(client.stsconn)
|
||||||
errs = append(errs, err)
|
if err != nil {
|
||||||
return nil, &multierror.Error{Errors: errs}
|
errs = append(errs, err)
|
||||||
}
|
return nil, &multierror.Error{Errors: errs}
|
||||||
accountId, err := GetAccountId(client.iamconn, client.stsconn, cp.ProviderName)
|
}
|
||||||
if err == nil {
|
|
||||||
client.accountid = accountId
|
|
||||||
}
|
}
|
||||||
|
|
||||||
authErr := c.ValidateAccountId(client.accountid)
|
if c.SkipIamAccountId == false {
|
||||||
if authErr != nil {
|
accountId, err := GetAccountId(client.iamconn, client.stsconn, cp.ProviderName)
|
||||||
errs = append(errs, authErr)
|
if err == nil {
|
||||||
|
client.accountid = accountId
|
||||||
|
}
|
||||||
|
|
||||||
|
authErr := c.ValidateAccountId(client.accountid)
|
||||||
|
if authErr != nil {
|
||||||
|
errs = append(errs, authErr)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
client.apigateway = apigateway.New(sess)
|
client.apigateway = apigateway.New(sess)
|
||||||
|
|
|
@ -1,28 +0,0 @@
|
||||||
package aws
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAccAWSRole_importBasic(t *testing.T) {
|
|
||||||
resourceName := "aws_iam_role.role"
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckAWSRoleDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccAWSRoleConfig,
|
|
||||||
},
|
|
||||||
|
|
||||||
resource.TestStep{
|
|
||||||
ResourceName: resourceName,
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -100,6 +100,7 @@ func Provider() terraform.ResourceProvider {
|
||||||
Default: "",
|
Default: "",
|
||||||
Description: descriptions["kinesis_endpoint"],
|
Description: descriptions["kinesis_endpoint"],
|
||||||
},
|
},
|
||||||
|
|
||||||
"endpoints": endpointsSchema(),
|
"endpoints": endpointsSchema(),
|
||||||
|
|
||||||
"insecure": &schema.Schema{
|
"insecure": &schema.Schema{
|
||||||
|
@ -108,6 +109,27 @@ func Provider() terraform.ResourceProvider {
|
||||||
Default: false,
|
Default: false,
|
||||||
Description: descriptions["insecure"],
|
Description: descriptions["insecure"],
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"skip_iam_creds_validation": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
Description: descriptions["skip_iam_creds_validation"],
|
||||||
|
},
|
||||||
|
|
||||||
|
"skip_iam_account_id": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
Description: descriptions["skip_iam_account_id"],
|
||||||
|
},
|
||||||
|
|
||||||
|
"skip_metadata_api_check": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
Description: descriptions["skip_metadata_api_check"],
|
||||||
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
DataSourcesMap: map[string]*schema.Resource{
|
DataSourcesMap: map[string]*schema.Resource{
|
||||||
|
@ -333,21 +355,33 @@ func init() {
|
||||||
|
|
||||||
"insecure": "Explicitly allow the provider to perform \"insecure\" SSL requests. If omitted," +
|
"insecure": "Explicitly allow the provider to perform \"insecure\" SSL requests. If omitted," +
|
||||||
"default value is `false`",
|
"default value is `false`",
|
||||||
|
|
||||||
|
"skip_iam_creds_validation": "Skip the IAM/STS credentials validation. " +
|
||||||
|
"Used for AWS API implementations that do not use IAM.",
|
||||||
|
|
||||||
|
"skip_iam_account_id": "Skip the request of account id to IAM/STS. " +
|
||||||
|
"Used for AWS API implementations that do not use IAM.",
|
||||||
|
|
||||||
|
"skip_medatadata_api_check": "Skip the AWS Metadata API check. " +
|
||||||
|
"Used for AWS API implementations that do not have a metadata api endpoint.",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
||||||
config := Config{
|
config := Config{
|
||||||
AccessKey: d.Get("access_key").(string),
|
AccessKey: d.Get("access_key").(string),
|
||||||
SecretKey: d.Get("secret_key").(string),
|
SecretKey: d.Get("secret_key").(string),
|
||||||
Profile: d.Get("profile").(string),
|
Profile: d.Get("profile").(string),
|
||||||
CredsFilename: d.Get("shared_credentials_file").(string),
|
CredsFilename: d.Get("shared_credentials_file").(string),
|
||||||
Token: d.Get("token").(string),
|
Token: d.Get("token").(string),
|
||||||
Region: d.Get("region").(string),
|
Region: d.Get("region").(string),
|
||||||
MaxRetries: d.Get("max_retries").(int),
|
MaxRetries: d.Get("max_retries").(int),
|
||||||
DynamoDBEndpoint: d.Get("dynamodb_endpoint").(string),
|
DynamoDBEndpoint: d.Get("dynamodb_endpoint").(string),
|
||||||
KinesisEndpoint: d.Get("kinesis_endpoint").(string),
|
KinesisEndpoint: d.Get("kinesis_endpoint").(string),
|
||||||
Insecure: d.Get("insecure").(bool),
|
Insecure: d.Get("insecure").(bool),
|
||||||
|
SkipIamCredsValidation: d.Get("skip_iam_creds_validation").(bool),
|
||||||
|
SkipIamAccountId: d.Get("skip_iam_account_id").(bool),
|
||||||
|
SkipMetadataApiCheck: d.Get("skip_metadata_api_check").(bool),
|
||||||
}
|
}
|
||||||
|
|
||||||
endpointsSet := d.Get("endpoints").(*schema.Set)
|
endpointsSet := d.Get("endpoints").(*schema.Set)
|
||||||
|
|
|
@ -111,7 +111,16 @@ resource "aws_iam_group" "group" {
|
||||||
resource "aws_iam_group_policy" "foo" {
|
resource "aws_iam_group_policy" "foo" {
|
||||||
name = "foo_policy"
|
name = "foo_policy"
|
||||||
group = "${aws_iam_group.group.name}"
|
group = "${aws_iam_group.group.name}"
|
||||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": {
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "*",
|
||||||
|
"Resource": "*"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
|
|
@ -120,8 +120,8 @@ func testAccCheckAWSInstanceProfileExists(n string, res *iam.GetInstanceProfileO
|
||||||
|
|
||||||
const testAccAwsIamInstanceProfileConfig = `
|
const testAccAwsIamInstanceProfileConfig = `
|
||||||
resource "aws_iam_role" "test" {
|
resource "aws_iam_role" "test" {
|
||||||
name = "test"
|
name = "test"
|
||||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ec2.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_instance_profile" "test" {
|
resource "aws_iam_instance_profile" "test" {
|
||||||
|
@ -132,8 +132,8 @@ resource "aws_iam_instance_profile" "test" {
|
||||||
|
|
||||||
const testAccAWSInstanceProfilePrefixNameConfig = `
|
const testAccAWSInstanceProfilePrefixNameConfig = `
|
||||||
resource "aws_iam_role" "test" {
|
resource "aws_iam_role" "test" {
|
||||||
name = "test"
|
name = "test"
|
||||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ec2.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_instance_profile" "test" {
|
resource "aws_iam_instance_profile" "test" {
|
||||||
|
|
|
@ -113,8 +113,22 @@ resource "aws_iam_user" "user" {
|
||||||
name = "test-user"
|
name = "test-user"
|
||||||
}
|
}
|
||||||
resource "aws_iam_role" "role" {
|
resource "aws_iam_role" "role" {
|
||||||
name = "test-role"
|
name = "test-role"
|
||||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
assume_role_policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Action": "sts:AssumeRole",
|
||||||
|
"Principal": {
|
||||||
|
"Service": "ec2.amazonaws.com"
|
||||||
|
},
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Sid": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_group" "group" {
|
resource "aws_iam_group" "group" {
|
||||||
|
@ -160,16 +174,61 @@ resource "aws_iam_user" "user3" {
|
||||||
name = "test-user3"
|
name = "test-user3"
|
||||||
}
|
}
|
||||||
resource "aws_iam_role" "role" {
|
resource "aws_iam_role" "role" {
|
||||||
name = "test-role"
|
name = "test-role"
|
||||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
assume_role_policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Action": "sts:AssumeRole",
|
||||||
|
"Principal": {
|
||||||
|
"Service": "ec2.amazonaws.com"
|
||||||
|
},
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Sid": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
resource "aws_iam_role" "role2" {
|
resource "aws_iam_role" "role2" {
|
||||||
name = "test-role2"
|
name = "test-role2"
|
||||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
assume_role_policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Action": "sts:AssumeRole",
|
||||||
|
"Principal": {
|
||||||
|
"Service": "ec2.amazonaws.com"
|
||||||
|
},
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Sid": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
}
|
}
|
||||||
resource "aws_iam_role" "role3" {
|
resource "aws_iam_role" "role3" {
|
||||||
name = "test-role3"
|
name = "test-role3"
|
||||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
assume_role_policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Action": "sts:AssumeRole",
|
||||||
|
"Principal": {
|
||||||
|
"Service": "ec2.amazonaws.com"
|
||||||
|
},
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Sid": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
}
|
}
|
||||||
resource "aws_iam_group" "group" {
|
resource "aws_iam_group" "group" {
|
||||||
name = "test-group"
|
name = "test-group"
|
||||||
|
|
|
@ -2,7 +2,6 @@ package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -21,10 +20,6 @@ func resourceAwsIamRole() *schema.Resource {
|
||||||
Update: resourceAwsIamRoleUpdate,
|
Update: resourceAwsIamRoleUpdate,
|
||||||
Delete: resourceAwsIamRoleDelete,
|
Delete: resourceAwsIamRoleDelete,
|
||||||
|
|
||||||
Importer: &schema.ResourceImporter{
|
|
||||||
State: schema.ImportStatePassthrough,
|
|
||||||
},
|
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"arn": &schema.Schema{
|
"arn": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
|
@ -179,10 +174,6 @@ func resourceAwsIamRoleReadResult(d *schema.ResourceData, role *iam.Role) error
|
||||||
if err := d.Set("unique_id", role.RoleId); err != nil {
|
if err := d.Set("unique_id", role.RoleId); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
policy, _ := url.QueryUnescape(*role.AssumeRolePolicyDocument)
|
|
||||||
if err := d.Set("assume_role_policy", aws.String(policy)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -90,8 +90,22 @@ func testAccCheckAWSRolePolicyAttachmentAttributes(policies []string, out *iam.L
|
||||||
|
|
||||||
const testAccAWSRolePolicyAttachConfig = `
|
const testAccAWSRolePolicyAttachConfig = `
|
||||||
resource "aws_iam_role" "role" {
|
resource "aws_iam_role" "role" {
|
||||||
name = "test-role"
|
name = "test-role"
|
||||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
assume_role_policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Action": "sts:AssumeRole",
|
||||||
|
"Principal": {
|
||||||
|
"Service": "ec2.amazonaws.com"
|
||||||
|
},
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Sid": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_policy" "policy" {
|
resource "aws_iam_policy" "policy" {
|
||||||
|
@ -121,8 +135,22 @@ resource "aws_iam_role_policy_attachment" "test-attach" {
|
||||||
|
|
||||||
const testAccAWSRolePolicyAttachConfigUpdate = `
|
const testAccAWSRolePolicyAttachConfigUpdate = `
|
||||||
resource "aws_iam_role" "role" {
|
resource "aws_iam_role" "role" {
|
||||||
name = "test-role"
|
name = "test-role"
|
||||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
assume_role_policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Action": "sts:AssumeRole",
|
||||||
|
"Principal": {
|
||||||
|
"Service": "ec2.amazonaws.com"
|
||||||
|
},
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Sid": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_policy" "policy" {
|
resource "aws_iam_policy" "policy" {
|
||||||
|
|
|
@ -113,15 +113,38 @@ func testAccCheckIAMRolePolicy(
|
||||||
func testAccIAMRolePolicyConfig(role, policy1 string) string {
|
func testAccIAMRolePolicyConfig(role, policy1 string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "aws_iam_role" "role" {
|
resource "aws_iam_role" "role" {
|
||||||
name = "tf_test_role_%s"
|
name = "tf_test_role_%s"
|
||||||
path = "/"
|
path = "/"
|
||||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
assume_role_policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Action": "sts:AssumeRole",
|
||||||
|
"Principal": {
|
||||||
|
"Service": "ec2.amazonaws.com"
|
||||||
|
},
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Sid": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_role_policy" "foo" {
|
resource "aws_iam_role_policy" "foo" {
|
||||||
name = "tf_test_policy_%s"
|
name = "tf_test_policy_%s"
|
||||||
role = "${aws_iam_role.role.name}"
|
role = "${aws_iam_role.role.name}"
|
||||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": {
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "*",
|
||||||
|
"Resource": "*"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
}
|
}
|
||||||
`, role, policy1)
|
`, role, policy1)
|
||||||
}
|
}
|
||||||
|
@ -129,21 +152,53 @@ resource "aws_iam_role_policy" "foo" {
|
||||||
func testAccIAMRolePolicyConfigUpdate(role, policy1, policy2 string) string {
|
func testAccIAMRolePolicyConfigUpdate(role, policy1, policy2 string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "aws_iam_role" "role" {
|
resource "aws_iam_role" "role" {
|
||||||
name = "tf_test_role_%s"
|
name = "tf_test_role_%s"
|
||||||
path = "/"
|
path = "/"
|
||||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
assume_role_policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Action": "sts:AssumeRole",
|
||||||
|
"Principal": {
|
||||||
|
"Service": "ec2.amazonaws.com"
|
||||||
|
},
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Sid": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_role_policy" "foo" {
|
resource "aws_iam_role_policy" "foo" {
|
||||||
name = "tf_test_policy_%s"
|
name = "tf_test_policy_%s"
|
||||||
role = "${aws_iam_role.role.name}"
|
role = "${aws_iam_role.role.name}"
|
||||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": {
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "*",
|
||||||
|
"Resource": "*"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_role_policy" "bar" {
|
resource "aws_iam_role_policy" "bar" {
|
||||||
name = "tf_test_policy_2_%s"
|
name = "tf_test_policy_2_%s"
|
||||||
role = "${aws_iam_role.role.name}"
|
role = "${aws_iam_role.role.name}"
|
||||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": {
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "*",
|
||||||
|
"Resource": "*"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
}
|
}
|
||||||
`, role, policy1, policy2)
|
`, role, policy1, policy2)
|
||||||
}
|
}
|
||||||
|
|
|
@ -165,24 +165,39 @@ func testAccCheckAWSRoleAttributes(role *iam.GetRoleOutput) resource.TestCheckFu
|
||||||
|
|
||||||
const testAccAWSRoleConfig = `
|
const testAccAWSRoleConfig = `
|
||||||
resource "aws_iam_role" "role" {
|
resource "aws_iam_role" "role" {
|
||||||
name = "test-role"
|
name = "test-role"
|
||||||
path = "/"
|
path = "/"
|
||||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ec2.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
const testAccAWSRolePrefixNameConfig = `
|
const testAccAWSRolePrefixNameConfig = `
|
||||||
resource "aws_iam_role" "role" {
|
resource "aws_iam_role" "role" {
|
||||||
name_prefix = "test-role-"
|
name_prefix = "test-role-"
|
||||||
path = "/"
|
path = "/"
|
||||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ec2.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
const testAccAWSRolePre = `
|
const testAccAWSRolePre = `
|
||||||
resource "aws_iam_role" "role_update_test" {
|
resource "aws_iam_role" "role_update_test" {
|
||||||
name = "tf_old_name"
|
name = "tf_old_name"
|
||||||
path = "/test/"
|
path = "/test/"
|
||||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
assume_role_policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Action": "sts:AssumeRole",
|
||||||
|
"Principal": {
|
||||||
|
"Service": "ec2.amazonaws.com"
|
||||||
|
},
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Sid": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_role_policy" "role_update_test" {
|
resource "aws_iam_role_policy" "role_update_test" {
|
||||||
|
@ -217,7 +232,21 @@ const testAccAWSRolePost = `
|
||||||
resource "aws_iam_role" "role_update_test" {
|
resource "aws_iam_role" "role_update_test" {
|
||||||
name = "tf_new_name"
|
name = "tf_new_name"
|
||||||
path = "/test/"
|
path = "/test/"
|
||||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
assume_role_policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Action": "sts:AssumeRole",
|
||||||
|
"Principal": {
|
||||||
|
"Service": "ec2.amazonaws.com"
|
||||||
|
},
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Sid": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_role_policy" "role_update_test" {
|
resource "aws_iam_role_policy" "role_update_test" {
|
||||||
|
|
|
@ -178,6 +178,97 @@ func TestAccComputeV2Instance_volumeDetachPostCreation(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccComputeV2Instance_additionalVolumeDetachPostCreation(t *testing.T) {
|
||||||
|
var instance servers.Server
|
||||||
|
var volume volumes.Volume
|
||||||
|
|
||||||
|
var testAccComputeV2Instance_volumeDetachPostCreationInstanceAndAdditionalVolume = fmt.Sprintf(`
|
||||||
|
|
||||||
|
resource "openstack_blockstorage_volume_v1" "root_volume" {
|
||||||
|
name = "root_volume"
|
||||||
|
size = 1
|
||||||
|
image_id = "%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_blockstorage_volume_v1" "additional_volume" {
|
||||||
|
name = "additional_volume"
|
||||||
|
size = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "foo" {
|
||||||
|
name = "terraform-test"
|
||||||
|
security_groups = ["default"]
|
||||||
|
|
||||||
|
block_device {
|
||||||
|
uuid = "${openstack_blockstorage_volume_v1.root_volume.id}"
|
||||||
|
source_type = "volume"
|
||||||
|
boot_index = 0
|
||||||
|
destination_type = "volume"
|
||||||
|
delete_on_termination = false
|
||||||
|
}
|
||||||
|
|
||||||
|
volume {
|
||||||
|
volume_id = "${openstack_blockstorage_volume_v1.additional_volume.id}"
|
||||||
|
}
|
||||||
|
}`,
|
||||||
|
os.Getenv("OS_IMAGE_ID"))
|
||||||
|
|
||||||
|
var testAccComputeV2Instance_volumeDetachPostCreationInstance = fmt.Sprintf(`
|
||||||
|
|
||||||
|
resource "openstack_blockstorage_volume_v1" "root_volume" {
|
||||||
|
name = "root_volume"
|
||||||
|
size = 1
|
||||||
|
image_id = "%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_blockstorage_volume_v1" "additional_volume" {
|
||||||
|
name = "additional_volume"
|
||||||
|
size = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "foo" {
|
||||||
|
name = "terraform-test"
|
||||||
|
security_groups = ["default"]
|
||||||
|
|
||||||
|
block_device {
|
||||||
|
uuid = "${openstack_blockstorage_volume_v1.root_volume.id}"
|
||||||
|
source_type = "volume"
|
||||||
|
boot_index = 0
|
||||||
|
destination_type = "volume"
|
||||||
|
delete_on_termination = false
|
||||||
|
}
|
||||||
|
}`,
|
||||||
|
os.Getenv("OS_IMAGE_ID"))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeV2InstanceDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeV2Instance_volumeDetachPostCreationInstanceAndAdditionalVolume,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.root_volume", &volume),
|
||||||
|
testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.additional_volume", &volume),
|
||||||
|
testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.foo", &instance),
|
||||||
|
testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume),
|
||||||
|
testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeV2Instance_volumeDetachPostCreationInstance,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.root_volume", &volume),
|
||||||
|
testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.additional_volume", &volume),
|
||||||
|
testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.foo", &instance),
|
||||||
|
testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume),
|
||||||
|
testAccCheckComputeV2InstanceVolumeDetached(&instance, "openstack_blockstorage_volume_v1.additional_volume"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccComputeV2Instance_floatingIPAttachGlobally(t *testing.T) {
|
func TestAccComputeV2Instance_floatingIPAttachGlobally(t *testing.T) {
|
||||||
var instance servers.Server
|
var instance servers.Server
|
||||||
var fip floatingip.FloatingIP
|
var fip floatingip.FloatingIP
|
||||||
|
@ -993,3 +1084,41 @@ func TestAccComputeV2Instance_stop_before_destroy(t *testing.T) {
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeV2InstanceVolumeDetached(instance *servers.Server, volume_id string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
var attachments []volumeattach.VolumeAttachment
|
||||||
|
|
||||||
|
rs, ok := s.RootModule().Resources[volume_id]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", volume_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
computeClient, err := config.computeV2Client(OS_REGION_NAME)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = volumeattach.List(computeClient, instance.ID).EachPage(func(page pagination.Page) (bool, error) {
|
||||||
|
actual, err := volumeattach.ExtractVolumeAttachments(page)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("Unable to lookup attachment: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
attachments = actual
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, attachment := range attachments {
|
||||||
|
if attachment.VolumeID == rs.Primary.ID {
|
||||||
|
return fmt.Errorf("Volume is still attached.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -213,6 +213,15 @@ func resourceNetworkingRouterV2Update(d *schema.ResourceData, meta interface{})
|
||||||
asu := d.Get("admin_state_up").(bool)
|
asu := d.Get("admin_state_up").(bool)
|
||||||
updateOpts.AdminStateUp = &asu
|
updateOpts.AdminStateUp = &asu
|
||||||
}
|
}
|
||||||
|
if d.HasChange("external_gateway") {
|
||||||
|
externalGateway := d.Get("external_gateway").(string)
|
||||||
|
if externalGateway != "" {
|
||||||
|
gatewayInfo := routers.GatewayInfo{
|
||||||
|
NetworkID: externalGateway,
|
||||||
|
}
|
||||||
|
updateOpts.GatewayInfo = &gatewayInfo
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] Updating Router %s with options: %+v", d.Id(), updateOpts)
|
log.Printf("[DEBUG] Updating Router %s with options: %+v", d.Id(), updateOpts)
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@ package openstack
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
@ -34,6 +35,46 @@ func TestAccNetworkingV2Router_basic(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccNetworkingV2Router_update_external_gw(t *testing.T) {
|
||||||
|
var router routers.Router
|
||||||
|
externalGateway := os.Getenv("OS_EXTGW_ID")
|
||||||
|
|
||||||
|
var testAccNetworkingV2Router_update_external_gw_1 = fmt.Sprintf(`
|
||||||
|
resource "openstack_networking_router_v2" "foo" {
|
||||||
|
name = "router"
|
||||||
|
admin_state_up = "true"
|
||||||
|
distributed = "false"
|
||||||
|
}`)
|
||||||
|
|
||||||
|
var testAccNetworkingV2Router_update_external_gw_2 = fmt.Sprintf(`
|
||||||
|
resource "openstack_networking_router_v2" "foo" {
|
||||||
|
name = "router"
|
||||||
|
admin_state_up = "true"
|
||||||
|
distributed = "false"
|
||||||
|
external_gateway = "%s"
|
||||||
|
}`, externalGateway)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckNetworkingV2RouterDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccNetworkingV2Router_update_external_gw_1,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckNetworkingV2RouterExists(t, "openstack_networking_router_v2.foo", &router),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccNetworkingV2Router_update_external_gw_2,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
resource.TestCheckResourceAttr("openstack_networking_router_v2.foo", "external_gateway", externalGateway),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckNetworkingV2RouterDestroy(s *terraform.State) error {
|
func testAccCheckNetworkingV2RouterDestroy(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
networkingClient, err := config.networkingV2Client(OS_REGION_NAME)
|
networkingClient, err := config.networkingV2Client(OS_REGION_NAME)
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
@ -276,9 +277,26 @@ func (c *AtlasClient) http() (*retryablehttp.Client, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rc := retryablehttp.NewClient()
|
rc := retryablehttp.NewClient()
|
||||||
|
|
||||||
|
rc.CheckRetry = func(resp *http.Response, err error) (bool, error) {
|
||||||
|
if err != nil {
|
||||||
|
// don't bother retrying if the certs don't match
|
||||||
|
if err, ok := err.(*url.Error); ok {
|
||||||
|
if _, ok := err.Err.(x509.UnknownAuthorityError); ok {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// continue retrying
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return retryablehttp.DefaultRetryPolicy(resp, err)
|
||||||
|
}
|
||||||
|
|
||||||
t := cleanhttp.DefaultTransport()
|
t := cleanhttp.DefaultTransport()
|
||||||
t.TLSClientConfig = tlsConfig
|
t.TLSClientConfig = tlsConfig
|
||||||
rc.HTTPClient.Transport = t
|
rc.HTTPClient.Transport = t
|
||||||
|
|
||||||
|
c.HTTPClient = rc
|
||||||
return rc, nil
|
return rc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,8 +3,11 @@ package remote
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -36,6 +39,53 @@ func TestAtlasClient(t *testing.T) {
|
||||||
testClient(t, client)
|
testClient(t, client)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAtlasClient_noRetryOnBadCerts(t *testing.T) {
|
||||||
|
acctest.RemoteTestPrecheck(t)
|
||||||
|
|
||||||
|
client, err := atlasFactory(map[string]string{
|
||||||
|
"access_token": "NOT_REQUIRED",
|
||||||
|
"name": "hashicorp/test-remote-state",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("bad: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ac := client.(*AtlasClient)
|
||||||
|
// trigger the AtlasClient to build the http client and assign HTTPClient
|
||||||
|
httpClient, err := ac.http()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove the CA certs from the client
|
||||||
|
brokenCfg := &tls.Config{
|
||||||
|
RootCAs: new(x509.CertPool),
|
||||||
|
}
|
||||||
|
httpClient.HTTPClient.Transport.(*http.Transport).TLSClientConfig = brokenCfg
|
||||||
|
|
||||||
|
// Instrument CheckRetry to make sure we didn't retry
|
||||||
|
retries := 0
|
||||||
|
oldCheck := httpClient.CheckRetry
|
||||||
|
httpClient.CheckRetry = func(resp *http.Response, err error) (bool, error) {
|
||||||
|
if retries > 0 {
|
||||||
|
t.Fatal("retried after certificate error")
|
||||||
|
}
|
||||||
|
retries++
|
||||||
|
return oldCheck(resp, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = client.Get()
|
||||||
|
if err != nil {
|
||||||
|
if err, ok := err.(*url.Error); ok {
|
||||||
|
if _, ok := err.Err.(x509.UnknownAuthorityError); ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Fatalf("expected x509.UnknownAuthorityError, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestAtlasClient_ReportedConflictEqualStates(t *testing.T) {
|
func TestAtlasClient_ReportedConflictEqualStates(t *testing.T) {
|
||||||
fakeAtlas := newFakeAtlas(t, testStateModuleOrderChange)
|
fakeAtlas := newFakeAtlas(t, testStateModuleOrderChange)
|
||||||
srv := fakeAtlas.Server()
|
srv := fakeAtlas.Server()
|
||||||
|
|
|
@ -60,7 +60,13 @@ func s3Factory(conf map[string]string) (Client, error) {
|
||||||
kmsKeyID := conf["kms_key_id"]
|
kmsKeyID := conf["kms_key_id"]
|
||||||
|
|
||||||
var errs []error
|
var errs []error
|
||||||
creds := terraformAws.GetCredentials(conf["access_key"], conf["secret_key"], conf["token"], conf["profile"], conf["shared_credentials_file"])
|
creds := terraformAws.GetCredentials(&terraformAws.Config{
|
||||||
|
AccessKey: conf["access_key"],
|
||||||
|
SecretKey: conf["secret_key"],
|
||||||
|
Token: conf["token"],
|
||||||
|
Profile: conf["profile"],
|
||||||
|
CredsFilename: conf["shared_credentials_file"],
|
||||||
|
})
|
||||||
// Call Get to check for credential provider. If nothing found, we'll get an
|
// Call Get to check for credential provider. If nothing found, we'll get an
|
||||||
// error, and we can present it nicely to the user
|
// error, and we can present it nicely to the user
|
||||||
_, err := creds.Get()
|
_, err := creds.Get()
|
||||||
|
|
|
@ -317,16 +317,18 @@ func (c *Context) Input(mode InputMode) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var valueType config.VariableType
|
||||||
|
|
||||||
v := m[n]
|
v := m[n]
|
||||||
switch v.Type() {
|
switch valueType = v.Type(); valueType {
|
||||||
case config.VariableTypeUnknown:
|
case config.VariableTypeUnknown:
|
||||||
continue
|
continue
|
||||||
case config.VariableTypeMap:
|
case config.VariableTypeMap:
|
||||||
continue
|
// OK
|
||||||
case config.VariableTypeList:
|
case config.VariableTypeList:
|
||||||
continue
|
// OK
|
||||||
case config.VariableTypeString:
|
case config.VariableTypeString:
|
||||||
// Good!
|
// OK
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
|
panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
|
||||||
}
|
}
|
||||||
|
@ -340,6 +342,12 @@ func (c *Context) Input(mode InputMode) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// this should only happen during tests
|
||||||
|
if c.uiInput == nil {
|
||||||
|
log.Println("[WARN] Content.uiInput is nil")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// Ask the user for a value for this variable
|
// Ask the user for a value for this variable
|
||||||
var value string
|
var value string
|
||||||
retry := 0
|
retry := 0
|
||||||
|
@ -365,17 +373,21 @@ func (c *Context) Input(mode InputMode) error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if value == "" {
|
|
||||||
// No value, just exit the loop. With no value, we just
|
|
||||||
// use whatever is currently set in variables.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if value != "" {
|
// no value provided, so don't set the variable at all
|
||||||
c.variables[n] = value
|
if value == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
decoded, err := parseVariableAsHCL(n, value, valueType)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if decoded != nil {
|
||||||
|
c.variables[n] = decoded
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -656,9 +668,20 @@ func (c *Context) walk(
|
||||||
// the name of the variable. In order to get around the restriction of HCL requiring a
|
// the name of the variable. In order to get around the restriction of HCL requiring a
|
||||||
// top level object, we prepend a sentinel key, decode the user-specified value as its
|
// top level object, we prepend a sentinel key, decode the user-specified value as its
|
||||||
// value and pull the value back out of the resulting map.
|
// value and pull the value back out of the resulting map.
|
||||||
func parseVariableAsHCL(name string, input interface{}, targetType config.VariableType) (interface{}, error) {
|
func parseVariableAsHCL(name string, input string, targetType config.VariableType) (interface{}, error) {
|
||||||
|
// expecting a string so don't decode anything, just strip quotes
|
||||||
if targetType == config.VariableTypeString {
|
if targetType == config.VariableTypeString {
|
||||||
return input, nil
|
return strings.Trim(input, `"`), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// return empty types
|
||||||
|
if strings.TrimSpace(input) == "" {
|
||||||
|
switch targetType {
|
||||||
|
case config.VariableTypeList:
|
||||||
|
return []interface{}{}, nil
|
||||||
|
case config.VariableTypeMap:
|
||||||
|
return make(map[string]interface{}), nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const sentinelValue = "SENTINEL_TERRAFORM_VAR_OVERRIDE_KEY"
|
const sentinelValue = "SENTINEL_TERRAFORM_VAR_OVERRIDE_KEY"
|
||||||
|
|
|
@ -617,3 +617,44 @@ func TestContext2Input_interpolateVar(t *testing.T) {
|
||||||
t.Fatalf("err: %s", err)
|
t.Fatalf("err: %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestContext2Input_hcl(t *testing.T) {
|
||||||
|
input := new(MockUIInput)
|
||||||
|
m := testModule(t, "input-hcl")
|
||||||
|
p := testProvider("hcl")
|
||||||
|
p.ApplyFn = testApplyFn
|
||||||
|
p.DiffFn = testDiffFn
|
||||||
|
ctx := testContext2(t, &ContextOpts{
|
||||||
|
Module: m,
|
||||||
|
Providers: map[string]ResourceProviderFactory{
|
||||||
|
"hcl": testProviderFuncFixed(p),
|
||||||
|
},
|
||||||
|
Variables: map[string]interface{}{},
|
||||||
|
UIInput: input,
|
||||||
|
})
|
||||||
|
|
||||||
|
input.InputReturnMap = map[string]string{
|
||||||
|
"var.listed": `["a", "b"]`,
|
||||||
|
"var.mapped": `{x = "y", w = "z"}`,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ctx.Input(InputModeVar | InputModeVarUnset); err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := ctx.Plan(); err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
state, err := ctx.Apply()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
actualStr := strings.TrimSpace(state.String())
|
||||||
|
expectedStr := strings.TrimSpace(testTerraformInputHCL)
|
||||||
|
if actualStr != expectedStr {
|
||||||
|
t.Logf("expected: \n%s", expectedStr)
|
||||||
|
t.Fatalf("bad: \n%s", actualStr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1223,6 +1223,7 @@ func TestContext2Plan_countZero(t *testing.T) {
|
||||||
actual := strings.TrimSpace(plan.String())
|
actual := strings.TrimSpace(plan.String())
|
||||||
expected := strings.TrimSpace(testTerraformPlanCountZeroStr)
|
expected := strings.TrimSpace(testTerraformPlanCountZeroStr)
|
||||||
if actual != expected {
|
if actual != expected {
|
||||||
|
t.Logf("expected:\n%s", expected)
|
||||||
t.Fatalf("bad:\n%s", actual)
|
t.Fatalf("bad:\n%s", actual)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,10 +2,11 @@ package terraform
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/flatmap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewContextState(t *testing.T) {
|
func TestNewContextState(t *testing.T) {
|
||||||
|
@ -165,23 +166,15 @@ func testDiffFn(
|
||||||
v = c.Config[k]
|
v = c.Config[k]
|
||||||
}
|
}
|
||||||
|
|
||||||
attrDiff := &ResourceAttrDiff{
|
for k, attrDiff := range testFlatAttrDiffs(k, v) {
|
||||||
Old: "",
|
if k == "require_new" {
|
||||||
|
attrDiff.RequiresNew = true
|
||||||
|
}
|
||||||
|
if _, ok := c.Raw["__"+k+"_requires_new"]; ok {
|
||||||
|
attrDiff.RequiresNew = true
|
||||||
|
}
|
||||||
|
diff.Attributes[k] = attrDiff
|
||||||
}
|
}
|
||||||
|
|
||||||
if reflect.DeepEqual(v, []interface{}{}) {
|
|
||||||
attrDiff.New = ""
|
|
||||||
} else {
|
|
||||||
attrDiff.New = v.(string)
|
|
||||||
}
|
|
||||||
|
|
||||||
if k == "require_new" {
|
|
||||||
attrDiff.RequiresNew = true
|
|
||||||
}
|
|
||||||
if _, ok := c.Raw["__"+k+"_requires_new"]; ok {
|
|
||||||
attrDiff.RequiresNew = true
|
|
||||||
}
|
|
||||||
diff.Attributes[k] = attrDiff
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, k := range c.ComputedKeys {
|
for _, k := range c.ComputedKeys {
|
||||||
|
@ -219,6 +212,39 @@ func testDiffFn(
|
||||||
return diff, nil
|
return diff, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// generate ResourceAttrDiffs for nested data structures in tests
|
||||||
|
func testFlatAttrDiffs(k string, i interface{}) map[string]*ResourceAttrDiff {
|
||||||
|
diffs := make(map[string]*ResourceAttrDiff)
|
||||||
|
// check for strings and empty containers first
|
||||||
|
switch t := i.(type) {
|
||||||
|
case string:
|
||||||
|
diffs[k] = &ResourceAttrDiff{New: t}
|
||||||
|
return diffs
|
||||||
|
case map[string]interface{}:
|
||||||
|
if len(t) == 0 {
|
||||||
|
diffs[k] = &ResourceAttrDiff{New: ""}
|
||||||
|
return diffs
|
||||||
|
}
|
||||||
|
case []interface{}:
|
||||||
|
if len(t) == 0 {
|
||||||
|
diffs[k] = &ResourceAttrDiff{New: ""}
|
||||||
|
return diffs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
flat := flatmap.Flatten(map[string]interface{}{k: i})
|
||||||
|
|
||||||
|
for k, v := range flat {
|
||||||
|
attrDiff := &ResourceAttrDiff{
|
||||||
|
Old: "",
|
||||||
|
New: v,
|
||||||
|
}
|
||||||
|
diffs[k] = attrDiff
|
||||||
|
}
|
||||||
|
|
||||||
|
return diffs
|
||||||
|
}
|
||||||
|
|
||||||
func testProvider(prefix string) *MockResourceProvider {
|
func testProvider(prefix string) *MockResourceProvider {
|
||||||
p := new(MockResourceProvider)
|
p := new(MockResourceProvider)
|
||||||
p.RefreshFn = func(info *InstanceInfo, s *InstanceState) (*InstanceState, error) {
|
p.RefreshFn = func(info *InstanceInfo, s *InstanceState) (*InstanceState, error) {
|
||||||
|
|
|
@ -1413,3 +1413,14 @@ module.mod2:
|
||||||
STATE:
|
STATE:
|
||||||
|
|
||||||
<no state>`
|
<no state>`
|
||||||
|
|
||||||
|
const testTerraformInputHCL = `
|
||||||
|
hcl_instance.hcltest:
|
||||||
|
ID = foo
|
||||||
|
bar.w = z
|
||||||
|
bar.x = y
|
||||||
|
foo.# = 2
|
||||||
|
foo.0 = a
|
||||||
|
foo.1 = b
|
||||||
|
type = hcl_instance
|
||||||
|
`
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
variable "mapped" {
|
||||||
|
type = "map"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "listed" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcl_instance" "hcltest" {
|
||||||
|
foo = "${var.listed}"
|
||||||
|
bar = "${var.mapped}"
|
||||||
|
}
|
|
@ -1,3 +0,0 @@
|
||||||
.idea/
|
|
||||||
*.iml
|
|
||||||
*.test
|
|
|
@ -1,12 +0,0 @@
|
||||||
sudo: false
|
|
||||||
|
|
||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.5.1
|
|
||||||
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- master
|
|
||||||
|
|
||||||
script: make updatedeps test
|
|
|
@ -38,6 +38,10 @@ var (
|
||||||
// defaultClient is used for performing requests without explicitly making
|
// defaultClient is used for performing requests without explicitly making
|
||||||
// a new client. It is purposely private to avoid modifications.
|
// a new client. It is purposely private to avoid modifications.
|
||||||
defaultClient = NewClient()
|
defaultClient = NewClient()
|
||||||
|
|
||||||
|
// We need to consume response bodies to maintain http connections, but
|
||||||
|
// limit the size we consume to respReadLimit.
|
||||||
|
respReadLimit = int64(4096)
|
||||||
)
|
)
|
||||||
|
|
||||||
// LenReader is an interface implemented by many in-memory io.Reader's. Used
|
// LenReader is an interface implemented by many in-memory io.Reader's. Used
|
||||||
|
@ -86,6 +90,23 @@ func NewRequest(method, url string, body io.ReadSeeker) (*Request, error) {
|
||||||
// consumers.
|
// consumers.
|
||||||
type RequestLogHook func(*log.Logger, *http.Request, int)
|
type RequestLogHook func(*log.Logger, *http.Request, int)
|
||||||
|
|
||||||
|
// ResponseLogHook is like RequestLogHook, but allows running a function
|
||||||
|
// on each HTTP response. This function will be invoked at the end of
|
||||||
|
// every HTTP request executed, regardless of whether a subsequent retry
|
||||||
|
// needs to be performed or not. If the response body is read or closed
|
||||||
|
// from this method, this will affect the response returned from Do().
|
||||||
|
type ResponseLogHook func(*log.Logger, *http.Response)
|
||||||
|
|
||||||
|
// CheckRetry specifies a policy for handling retries. It is called
|
||||||
|
// following each request with the response and error values returned by
|
||||||
|
// the http.Client. If CheckRetry returns false, the Client stops retrying
|
||||||
|
// and returns the response to the caller. If CheckRetry returns an error,
|
||||||
|
// that error value is returned in lieu of the error from the request. The
|
||||||
|
// Client will close any response body when retrying, but if the retry is
|
||||||
|
// aborted it is up to the CheckResponse callback to properly close any
|
||||||
|
// response body before returning.
|
||||||
|
type CheckRetry func(resp *http.Response, err error) (bool, error)
|
||||||
|
|
||||||
// Client is used to make HTTP requests. It adds additional functionality
|
// Client is used to make HTTP requests. It adds additional functionality
|
||||||
// like automatic retries to tolerate minor outages.
|
// like automatic retries to tolerate minor outages.
|
||||||
type Client struct {
|
type Client struct {
|
||||||
|
@ -99,6 +120,14 @@ type Client struct {
|
||||||
// RequestLogHook allows a user-supplied function to be called
|
// RequestLogHook allows a user-supplied function to be called
|
||||||
// before each retry.
|
// before each retry.
|
||||||
RequestLogHook RequestLogHook
|
RequestLogHook RequestLogHook
|
||||||
|
|
||||||
|
// ResponseLogHook allows a user-supplied function to be called
|
||||||
|
// with the response from each HTTP request executed.
|
||||||
|
ResponseLogHook ResponseLogHook
|
||||||
|
|
||||||
|
// CheckRetry specifies the policy for handling retries, and is called
|
||||||
|
// after each request. The default policy is DefaultRetryPolicy.
|
||||||
|
CheckRetry CheckRetry
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient creates a new Client with default settings.
|
// NewClient creates a new Client with default settings.
|
||||||
|
@ -109,9 +138,27 @@ func NewClient() *Client {
|
||||||
RetryWaitMin: defaultRetryWaitMin,
|
RetryWaitMin: defaultRetryWaitMin,
|
||||||
RetryWaitMax: defaultRetryWaitMax,
|
RetryWaitMax: defaultRetryWaitMax,
|
||||||
RetryMax: defaultRetryMax,
|
RetryMax: defaultRetryMax,
|
||||||
|
CheckRetry: DefaultRetryPolicy,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which
|
||||||
|
// will retry on connection errors and server errors.
|
||||||
|
func DefaultRetryPolicy(resp *http.Response, err error) (bool, error) {
|
||||||
|
if err != nil {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
// Check the response code. We retry on 500-range responses to allow
|
||||||
|
// the server time to recover, as 500's are typically not permanent
|
||||||
|
// errors and may relate to outages on the server side. This will catch
|
||||||
|
// invalid response codes as well, like 0 and 999.
|
||||||
|
if resp.StatusCode == 0 || resp.StatusCode >= 500 {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Do wraps calling an HTTP method with retries.
|
// Do wraps calling an HTTP method with retries.
|
||||||
func (c *Client) Do(req *Request) (*http.Response, error) {
|
func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||||
c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL)
|
c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL)
|
||||||
|
@ -132,23 +179,36 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||||
|
|
||||||
// Attempt the request
|
// Attempt the request
|
||||||
resp, err := c.HTTPClient.Do(req.Request)
|
resp, err := c.HTTPClient.Do(req.Request)
|
||||||
|
|
||||||
|
// Check if we should continue with retries.
|
||||||
|
checkOK, checkErr := c.CheckRetry(resp, err)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err)
|
c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err)
|
||||||
goto RETRY
|
} else {
|
||||||
|
// Call this here to maintain the behavior of logging all requests,
|
||||||
|
// even if CheckRetry signals to stop.
|
||||||
|
if c.ResponseLogHook != nil {
|
||||||
|
// Call the response logger function if provided.
|
||||||
|
c.ResponseLogHook(c.Logger, resp)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
code = resp.StatusCode
|
|
||||||
|
|
||||||
// Check the response code. We retry on 500-range responses to allow
|
// Now decide if we should continue.
|
||||||
// the server time to recover, as 500's are typically not permanent
|
if !checkOK {
|
||||||
// errors and may relate to outages on the server side.
|
if checkErr != nil {
|
||||||
if code%500 < 100 {
|
err = checkErr
|
||||||
resp.Body.Close()
|
}
|
||||||
goto RETRY
|
return resp, err
|
||||||
}
|
}
|
||||||
return resp, nil
|
|
||||||
|
|
||||||
RETRY:
|
// We're going to retry, consume any response to reuse the connection.
|
||||||
if i == c.RetryMax {
|
if err == nil {
|
||||||
|
c.drainBody(resp.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
remain := c.RetryMax - i
|
||||||
|
if remain == 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
wait := backoff(c.RetryWaitMin, c.RetryWaitMax, i)
|
wait := backoff(c.RetryWaitMin, c.RetryWaitMax, i)
|
||||||
|
@ -156,7 +216,7 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||||
if code > 0 {
|
if code > 0 {
|
||||||
desc = fmt.Sprintf("%s (status: %d)", desc, code)
|
desc = fmt.Sprintf("%s (status: %d)", desc, code)
|
||||||
}
|
}
|
||||||
c.Logger.Printf("[DEBUG] %s: retrying in %s", desc, wait)
|
c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain)
|
||||||
time.Sleep(wait)
|
time.Sleep(wait)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -165,6 +225,15 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||||
req.Method, req.URL, c.RetryMax+1)
|
req.Method, req.URL, c.RetryMax+1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Try to read the response body so we can reuse this connection.
|
||||||
|
func (c *Client) drainBody(body io.ReadCloser) {
|
||||||
|
defer body.Close()
|
||||||
|
_, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit))
|
||||||
|
if err != nil {
|
||||||
|
c.Logger.Printf("[ERR] error reading response body: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Get is a shortcut for doing a GET request without making a new client.
|
// Get is a shortcut for doing a GET request without making a new client.
|
||||||
func Get(url string) (*http.Response, error) {
|
func Get(url string) (*http.Response, error) {
|
||||||
return defaultClient.Get(url)
|
return defaultClient.Get(url)
|
||||||
|
|
|
@ -1108,8 +1108,10 @@
|
||||||
"revision": "cccb4a1328abbb89898f3ecf4311a05bddc4de6d"
|
"revision": "cccb4a1328abbb89898f3ecf4311a05bddc4de6d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
"checksumSHA1": "GBDE1KDl/7c5hlRPYRZ7+C0WQ0g=",
|
||||||
"path": "github.com/hashicorp/go-retryablehttp",
|
"path": "github.com/hashicorp/go-retryablehttp",
|
||||||
"revision": "5ec125ef739293cb4d57c3456dd92ba9af29ed6e"
|
"revision": "f4ed9b0fa01a2ac614afe7c897ed2e3d8208f3e8",
|
||||||
|
"revisionTime": "2016-08-10T17:22:55Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"path": "github.com/hashicorp/go-rootcerts",
|
"path": "github.com/hashicorp/go-rootcerts",
|
||||||
|
|
|
@ -92,7 +92,7 @@ The `ingress` block supports:
|
||||||
EC2-Classic, or Group IDs if using a VPC.
|
EC2-Classic, or Group IDs if using a VPC.
|
||||||
* `self` - (Optional) If true, the security group itself will be added as
|
* `self` - (Optional) If true, the security group itself will be added as
|
||||||
a source to this ingress rule.
|
a source to this ingress rule.
|
||||||
* `to_port` - (Required) The end range port.
|
* `to_port` - (Required) The end range port (or ICMP code if protocol is "icmp").
|
||||||
|
|
||||||
The `egress` block supports:
|
The `egress` block supports:
|
||||||
|
|
||||||
|
@ -105,7 +105,7 @@ The `egress` block supports:
|
||||||
EC2-Classic, or Group IDs if using a VPC.
|
EC2-Classic, or Group IDs if using a VPC.
|
||||||
* `self` - (Optional) If true, the security group itself will be added as
|
* `self` - (Optional) If true, the security group itself will be added as
|
||||||
a source to this egress rule.
|
a source to this egress rule.
|
||||||
* `to_port` - (Required) The end range port.
|
* `to_port` - (Required) The end range port (or ICMP code if protocol is "icmp").
|
||||||
|
|
||||||
~> **NOTE on Egress rules:** By default, AWS creates an `ALLOW ALL` egress rule when creating a
|
~> **NOTE on Egress rules:** By default, AWS creates an `ALLOW ALL` egress rule when creating a
|
||||||
new Security Group inside of a VPC. When creating a new Security
|
new Security Group inside of a VPC. When creating a new Security
|
||||||
|
@ -160,4 +160,4 @@ Security Groups can be imported using the `security group id`, e.g.
|
||||||
|
|
||||||
```
|
```
|
||||||
$ terraform import aws_security_group.elb_sg sg-903004f8
|
$ terraform import aws_security_group.elb_sg sg-903004f8
|
||||||
```
|
```
|
||||||
|
|
|
@ -51,7 +51,7 @@ Only valid with `egress`.
|
||||||
depending on the `type`. Cannot be specified with `cidr_blocks`.
|
depending on the `type`. Cannot be specified with `cidr_blocks`.
|
||||||
* `self` - (Optional) If true, the security group itself will be added as
|
* `self` - (Optional) If true, the security group itself will be added as
|
||||||
a source to this ingress rule.
|
a source to this ingress rule.
|
||||||
* `to_port` - (Required) The end range port.
|
* `to_port` - (Required) The end range port (or ICMP code if protocol is "icmp").
|
||||||
|
|
||||||
## Usage with prefix list IDs
|
## Usage with prefix list IDs
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue