Merge branch 'master' of github.com:hashicorp/terraform
This commit is contained in:
commit
313ec1252c
|
@ -18,10 +18,12 @@ IMPROVEMENTS
|
|||
* provider/aws: Add support for Enhanced monitoring to `aws_rds_cluster_instance` [GH-8038]
|
||||
* provider/aws: Add ability to set Requests Payer in `aws_s3_bucket` [GH-8065]
|
||||
* provider/aws: Add ability to set canned ACL in `aws_s3_bucket_object` [GH-8091]
|
||||
* provider/aws: Allow skipping credentials validation, requesting Account ID and/or metadata API check [GH-7874]
|
||||
* provider/azurerm: Adds support for uploading blobs to azure storage from local source [GH-7994]
|
||||
* provider/google: allows atomic Cloud DNS record changes [GH-6575]
|
||||
* provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472]
|
||||
* provider/google: Support static private IP addresses in `resource_compute_instance` [GH-6310]
|
||||
* provider/openstack: Support pdating the External Gateway assigned to a Neutron router [GH-8070]
|
||||
* provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908]
|
||||
* provider/vsphere: Adding disk type of `Thick Lazy` to `vsphere_virtual_disk` and `vsphere_virtual_machine` [GH-7916]
|
||||
|
||||
|
|
|
@ -86,18 +86,18 @@ func parseAccountIdFromArn(arn string) (string, error) {
|
|||
// This function is responsible for reading credentials from the
|
||||
// environment in the case that they're not explicitly specified
|
||||
// in the Terraform configuration.
|
||||
func GetCredentials(key, secret, token, profile, credsfile string) *awsCredentials.Credentials {
|
||||
func GetCredentials(c *Config) *awsCredentials.Credentials {
|
||||
// build a chain provider, lazy-evaulated by aws-sdk
|
||||
providers := []awsCredentials.Provider{
|
||||
&awsCredentials.StaticProvider{Value: awsCredentials.Value{
|
||||
AccessKeyID: key,
|
||||
SecretAccessKey: secret,
|
||||
SessionToken: token,
|
||||
AccessKeyID: c.AccessKey,
|
||||
SecretAccessKey: c.SecretKey,
|
||||
SessionToken: c.Token,
|
||||
}},
|
||||
&awsCredentials.EnvProvider{},
|
||||
&awsCredentials.SharedCredentialsProvider{
|
||||
Filename: credsfile,
|
||||
Profile: profile,
|
||||
Filename: c.CredsFilename,
|
||||
Profile: c.Profile,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -114,6 +114,7 @@ func GetCredentials(key, secret, token, profile, credsfile string) *awsCredentia
|
|||
// Real AWS should reply to a simple metadata request.
|
||||
// We check it actually does to ensure something else didn't just
|
||||
// happen to be listening on the same IP:Port
|
||||
if c.SkipMetadataApiCheck == false {
|
||||
metadataClient := ec2metadata.New(session.New(cfg))
|
||||
if metadataClient.Available() {
|
||||
providers = append(providers, &ec2rolecreds.EC2RoleProvider{
|
||||
|
@ -128,6 +129,7 @@ func GetCredentials(key, secret, token, profile, credsfile string) *awsCredentia
|
|||
log.Printf("[WARN] Ignoring AWS metadata API endpoint at %s "+
|
||||
"as it doesn't return any instance-id", usedEndpoint)
|
||||
}
|
||||
}
|
||||
|
||||
return awsCredentials.NewChainCredentials(providers)
|
||||
}
|
||||
|
|
|
@ -218,7 +218,7 @@ func TestAWSGetCredentials_shouldError(t *testing.T) {
|
|||
defer resetEnv()
|
||||
cfg := Config{}
|
||||
|
||||
c := GetCredentials(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename)
|
||||
c := GetCredentials(&cfg)
|
||||
_, err := c.Get()
|
||||
if awsErr, ok := err.(awserr.Error); ok {
|
||||
if awsErr.Code() != "NoCredentialProviders" {
|
||||
|
@ -251,7 +251,7 @@ func TestAWSGetCredentials_shouldBeStatic(t *testing.T) {
|
|||
Token: c.Token,
|
||||
}
|
||||
|
||||
creds := GetCredentials(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename)
|
||||
creds := GetCredentials(&cfg)
|
||||
if creds == nil {
|
||||
t.Fatalf("Expected a static creds provider to be returned")
|
||||
}
|
||||
|
@ -286,7 +286,7 @@ func TestAWSGetCredentials_shouldIAM(t *testing.T) {
|
|||
// An empty config, no key supplied
|
||||
cfg := Config{}
|
||||
|
||||
creds := GetCredentials(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename)
|
||||
creds := GetCredentials(&cfg)
|
||||
if creds == nil {
|
||||
t.Fatalf("Expected a static creds provider to be returned")
|
||||
}
|
||||
|
@ -335,7 +335,7 @@ func TestAWSGetCredentials_shouldIgnoreIAM(t *testing.T) {
|
|||
Token: c.Token,
|
||||
}
|
||||
|
||||
creds := GetCredentials(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename)
|
||||
creds := GetCredentials(&cfg)
|
||||
if creds == nil {
|
||||
t.Fatalf("Expected a static creds provider to be returned")
|
||||
}
|
||||
|
@ -362,7 +362,7 @@ func TestAWSGetCredentials_shouldErrorWithInvalidEndpoint(t *testing.T) {
|
|||
ts := invalidAwsEnv(t)
|
||||
defer ts()
|
||||
|
||||
creds := GetCredentials("", "", "", "", "")
|
||||
creds := GetCredentials(&Config{})
|
||||
v, err := creds.Get()
|
||||
if err == nil {
|
||||
t.Fatal("Expected error returned when getting creds w/ invalid EC2 endpoint")
|
||||
|
@ -380,7 +380,7 @@ func TestAWSGetCredentials_shouldIgnoreInvalidEndpoint(t *testing.T) {
|
|||
ts := invalidAwsEnv(t)
|
||||
defer ts()
|
||||
|
||||
creds := GetCredentials("accessKey", "secretKey", "", "", "")
|
||||
creds := GetCredentials(&Config{AccessKey: "accessKey", SecretKey: "secretKey"})
|
||||
v, err := creds.Get()
|
||||
if err != nil {
|
||||
t.Fatalf("Getting static credentials w/ invalid EC2 endpoint failed: %s", err)
|
||||
|
@ -406,7 +406,7 @@ func TestAWSGetCredentials_shouldCatchEC2RoleProvider(t *testing.T) {
|
|||
ts := awsEnv(t)
|
||||
defer ts()
|
||||
|
||||
creds := GetCredentials("", "", "", "", "")
|
||||
creds := GetCredentials(&Config{})
|
||||
if creds == nil {
|
||||
t.Fatalf("Expected an EC2Role creds provider to be returned")
|
||||
}
|
||||
|
@ -452,7 +452,7 @@ func TestAWSGetCredentials_shouldBeShared(t *testing.T) {
|
|||
t.Fatalf("Error resetting env var AWS_SHARED_CREDENTIALS_FILE: %s", err)
|
||||
}
|
||||
|
||||
creds := GetCredentials("", "", "", "myprofile", file.Name())
|
||||
creds := GetCredentials(&Config{Profile: "myprofile", CredsFilename: file.Name()})
|
||||
if creds == nil {
|
||||
t.Fatalf("Expected a provider chain to be returned")
|
||||
}
|
||||
|
@ -479,7 +479,7 @@ func TestAWSGetCredentials_shouldBeENV(t *testing.T) {
|
|||
defer resetEnv()
|
||||
|
||||
cfg := Config{}
|
||||
creds := GetCredentials(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename)
|
||||
creds := GetCredentials(&cfg)
|
||||
if creds == nil {
|
||||
t.Fatalf("Expected a static creds provider to be returned")
|
||||
}
|
||||
|
|
|
@ -75,7 +75,11 @@ type Config struct {
|
|||
Ec2Endpoint string
|
||||
IamEndpoint string
|
||||
ElbEndpoint string
|
||||
S3Endpoint string
|
||||
Insecure bool
|
||||
SkipIamCredsValidation bool
|
||||
SkipIamAccountId bool
|
||||
SkipMetadataApiCheck bool
|
||||
}
|
||||
|
||||
type AWSClient struct {
|
||||
|
@ -141,7 +145,7 @@ func (c *Config) Client() (interface{}, error) {
|
|||
client.region = c.Region
|
||||
|
||||
log.Println("[INFO] Building AWS auth structure")
|
||||
creds := GetCredentials(c.AccessKey, c.SecretKey, c.Token, c.Profile, c.CredsFilename)
|
||||
creds := GetCredentials(c)
|
||||
// Call Get to check for credential provider. If nothing found, we'll get an
|
||||
// error, and we can present it nicely to the user
|
||||
cp, err := creds.Get()
|
||||
|
@ -199,11 +203,15 @@ func (c *Config) Client() (interface{}, error) {
|
|||
client.iamconn = iam.New(awsIamSess)
|
||||
client.stsconn = sts.New(sess)
|
||||
|
||||
if c.SkipIamCredsValidation == false {
|
||||
err = c.ValidateCredentials(client.stsconn)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
return nil, &multierror.Error{Errors: errs}
|
||||
}
|
||||
}
|
||||
|
||||
if c.SkipIamAccountId == false {
|
||||
accountId, err := GetAccountId(client.iamconn, client.stsconn, cp.ProviderName)
|
||||
if err == nil {
|
||||
client.accountid = accountId
|
||||
|
@ -213,6 +221,7 @@ func (c *Config) Client() (interface{}, error) {
|
|||
if authErr != nil {
|
||||
errs = append(errs, authErr)
|
||||
}
|
||||
}
|
||||
|
||||
client.apigateway = apigateway.New(sess)
|
||||
client.appautoscalingconn = applicationautoscaling.New(sess)
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccAWSRole_importBasic(t *testing.T) {
|
||||
resourceName := "aws_iam_role.role"
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSRoleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAWSRoleConfig,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
|
@ -100,6 +100,7 @@ func Provider() terraform.ResourceProvider {
|
|||
Default: "",
|
||||
Description: descriptions["kinesis_endpoint"],
|
||||
},
|
||||
|
||||
"endpoints": endpointsSchema(),
|
||||
|
||||
"insecure": &schema.Schema{
|
||||
|
@ -108,6 +109,27 @@ func Provider() terraform.ResourceProvider {
|
|||
Default: false,
|
||||
Description: descriptions["insecure"],
|
||||
},
|
||||
|
||||
"skip_iam_creds_validation": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
Description: descriptions["skip_iam_creds_validation"],
|
||||
},
|
||||
|
||||
"skip_iam_account_id": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
Description: descriptions["skip_iam_account_id"],
|
||||
},
|
||||
|
||||
"skip_metadata_api_check": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
Description: descriptions["skip_metadata_api_check"],
|
||||
},
|
||||
},
|
||||
|
||||
DataSourcesMap: map[string]*schema.Resource{
|
||||
|
@ -333,6 +355,15 @@ func init() {
|
|||
|
||||
"insecure": "Explicitly allow the provider to perform \"insecure\" SSL requests. If omitted," +
|
||||
"default value is `false`",
|
||||
|
||||
"skip_iam_creds_validation": "Skip the IAM/STS credentials validation. " +
|
||||
"Used for AWS API implementations that do not use IAM.",
|
||||
|
||||
"skip_iam_account_id": "Skip the request of account id to IAM/STS. " +
|
||||
"Used for AWS API implementations that do not use IAM.",
|
||||
|
||||
"skip_medatadata_api_check": "Skip the AWS Metadata API check. " +
|
||||
"Used for AWS API implementations that do not have a metadata api endpoint.",
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -348,6 +379,9 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
|||
DynamoDBEndpoint: d.Get("dynamodb_endpoint").(string),
|
||||
KinesisEndpoint: d.Get("kinesis_endpoint").(string),
|
||||
Insecure: d.Get("insecure").(bool),
|
||||
SkipIamCredsValidation: d.Get("skip_iam_creds_validation").(bool),
|
||||
SkipIamAccountId: d.Get("skip_iam_account_id").(bool),
|
||||
SkipMetadataApiCheck: d.Get("skip_metadata_api_check").(bool),
|
||||
}
|
||||
|
||||
endpointsSet := d.Get("endpoints").(*schema.Set)
|
||||
|
|
|
@ -111,7 +111,16 @@ resource "aws_iam_group" "group" {
|
|||
resource "aws_iam_group_policy" "foo" {
|
||||
name = "foo_policy"
|
||||
group = "${aws_iam_group.group.name}"
|
||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": {
|
||||
"Effect": "Allow",
|
||||
"Action": "*",
|
||||
"Resource": "*"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
}
|
||||
`
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ func testAccCheckAWSInstanceProfileExists(n string, res *iam.GetInstanceProfileO
|
|||
const testAccAwsIamInstanceProfileConfig = `
|
||||
resource "aws_iam_role" "test" {
|
||||
name = "test"
|
||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ec2.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "test" {
|
||||
|
@ -133,7 +133,7 @@ resource "aws_iam_instance_profile" "test" {
|
|||
const testAccAWSInstanceProfilePrefixNameConfig = `
|
||||
resource "aws_iam_role" "test" {
|
||||
name = "test"
|
||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ec2.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "test" {
|
||||
|
|
|
@ -114,7 +114,21 @@ resource "aws_iam_user" "user" {
|
|||
}
|
||||
resource "aws_iam_role" "role" {
|
||||
name = "test-role"
|
||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Sid": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_group" "group" {
|
||||
|
@ -161,15 +175,60 @@ resource "aws_iam_user" "user3" {
|
|||
}
|
||||
resource "aws_iam_role" "role" {
|
||||
name = "test-role"
|
||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Sid": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "role2" {
|
||||
name = "test-role2"
|
||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Sid": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
}
|
||||
resource "aws_iam_role" "role3" {
|
||||
name = "test-role3"
|
||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Sid": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
}
|
||||
resource "aws_iam_group" "group" {
|
||||
name = "test-group"
|
||||
|
|
|
@ -2,7 +2,6 @@ package aws
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
|
@ -21,10 +20,6 @@ func resourceAwsIamRole() *schema.Resource {
|
|||
Update: resourceAwsIamRoleUpdate,
|
||||
Delete: resourceAwsIamRoleDelete,
|
||||
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"arn": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
|
@ -179,10 +174,6 @@ func resourceAwsIamRoleReadResult(d *schema.ResourceData, role *iam.Role) error
|
|||
if err := d.Set("unique_id", role.RoleId); err != nil {
|
||||
return err
|
||||
}
|
||||
policy, _ := url.QueryUnescape(*role.AssumeRolePolicyDocument)
|
||||
if err := d.Set("assume_role_policy", aws.String(policy)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -91,7 +91,21 @@ func testAccCheckAWSRolePolicyAttachmentAttributes(policies []string, out *iam.L
|
|||
const testAccAWSRolePolicyAttachConfig = `
|
||||
resource "aws_iam_role" "role" {
|
||||
name = "test-role"
|
||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Sid": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "policy" {
|
||||
|
@ -122,7 +136,21 @@ resource "aws_iam_role_policy_attachment" "test-attach" {
|
|||
const testAccAWSRolePolicyAttachConfigUpdate = `
|
||||
resource "aws_iam_role" "role" {
|
||||
name = "test-role"
|
||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Sid": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "policy" {
|
||||
|
|
|
@ -115,13 +115,36 @@ func testAccIAMRolePolicyConfig(role, policy1 string) string {
|
|||
resource "aws_iam_role" "role" {
|
||||
name = "tf_test_role_%s"
|
||||
path = "/"
|
||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Sid": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy" "foo" {
|
||||
name = "tf_test_policy_%s"
|
||||
role = "${aws_iam_role.role.name}"
|
||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": {
|
||||
"Effect": "Allow",
|
||||
"Action": "*",
|
||||
"Resource": "*"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
}
|
||||
`, role, policy1)
|
||||
}
|
||||
|
@ -131,19 +154,51 @@ func testAccIAMRolePolicyConfigUpdate(role, policy1, policy2 string) string {
|
|||
resource "aws_iam_role" "role" {
|
||||
name = "tf_test_role_%s"
|
||||
path = "/"
|
||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Sid": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy" "foo" {
|
||||
name = "tf_test_policy_%s"
|
||||
role = "${aws_iam_role.role.name}"
|
||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": {
|
||||
"Effect": "Allow",
|
||||
"Action": "*",
|
||||
"Resource": "*"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy" "bar" {
|
||||
name = "tf_test_policy_2_%s"
|
||||
role = "${aws_iam_role.role.name}"
|
||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": {
|
||||
"Effect": "Allow",
|
||||
"Action": "*",
|
||||
"Resource": "*"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
}
|
||||
`, role, policy1, policy2)
|
||||
}
|
||||
|
|
|
@ -167,7 +167,7 @@ const testAccAWSRoleConfig = `
|
|||
resource "aws_iam_role" "role" {
|
||||
name = "test-role"
|
||||
path = "/"
|
||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ec2.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
|
||||
}
|
||||
`
|
||||
|
||||
|
@ -175,14 +175,29 @@ const testAccAWSRolePrefixNameConfig = `
|
|||
resource "aws_iam_role" "role" {
|
||||
name_prefix = "test-role-"
|
||||
path = "/"
|
||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ec2.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccAWSRolePre = `
|
||||
resource "aws_iam_role" "role_update_test" {
|
||||
name = "tf_old_name"
|
||||
path = "/test/"
|
||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Sid": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy" "role_update_test" {
|
||||
|
@ -217,7 +232,21 @@ const testAccAWSRolePost = `
|
|||
resource "aws_iam_role" "role_update_test" {
|
||||
name = "tf_new_name"
|
||||
path = "/test/"
|
||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Sid": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy" "role_update_test" {
|
||||
|
|
|
@ -178,6 +178,97 @@ func TestAccComputeV2Instance_volumeDetachPostCreation(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAccComputeV2Instance_additionalVolumeDetachPostCreation(t *testing.T) {
|
||||
var instance servers.Server
|
||||
var volume volumes.Volume
|
||||
|
||||
var testAccComputeV2Instance_volumeDetachPostCreationInstanceAndAdditionalVolume = fmt.Sprintf(`
|
||||
|
||||
resource "openstack_blockstorage_volume_v1" "root_volume" {
|
||||
name = "root_volume"
|
||||
size = 1
|
||||
image_id = "%s"
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v1" "additional_volume" {
|
||||
name = "additional_volume"
|
||||
size = 1
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "foo" {
|
||||
name = "terraform-test"
|
||||
security_groups = ["default"]
|
||||
|
||||
block_device {
|
||||
uuid = "${openstack_blockstorage_volume_v1.root_volume.id}"
|
||||
source_type = "volume"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = false
|
||||
}
|
||||
|
||||
volume {
|
||||
volume_id = "${openstack_blockstorage_volume_v1.additional_volume.id}"
|
||||
}
|
||||
}`,
|
||||
os.Getenv("OS_IMAGE_ID"))
|
||||
|
||||
var testAccComputeV2Instance_volumeDetachPostCreationInstance = fmt.Sprintf(`
|
||||
|
||||
resource "openstack_blockstorage_volume_v1" "root_volume" {
|
||||
name = "root_volume"
|
||||
size = 1
|
||||
image_id = "%s"
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v1" "additional_volume" {
|
||||
name = "additional_volume"
|
||||
size = 1
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "foo" {
|
||||
name = "terraform-test"
|
||||
security_groups = ["default"]
|
||||
|
||||
block_device {
|
||||
uuid = "${openstack_blockstorage_volume_v1.root_volume.id}"
|
||||
source_type = "volume"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = false
|
||||
}
|
||||
}`,
|
||||
os.Getenv("OS_IMAGE_ID"))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeV2InstanceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeV2Instance_volumeDetachPostCreationInstanceAndAdditionalVolume,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.root_volume", &volume),
|
||||
testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.additional_volume", &volume),
|
||||
testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.foo", &instance),
|
||||
testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume),
|
||||
testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccComputeV2Instance_volumeDetachPostCreationInstance,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.root_volume", &volume),
|
||||
testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.additional_volume", &volume),
|
||||
testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.foo", &instance),
|
||||
testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume),
|
||||
testAccCheckComputeV2InstanceVolumeDetached(&instance, "openstack_blockstorage_volume_v1.additional_volume"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeV2Instance_floatingIPAttachGlobally(t *testing.T) {
|
||||
var instance servers.Server
|
||||
var fip floatingip.FloatingIP
|
||||
|
@ -993,3 +1084,41 @@ func TestAccComputeV2Instance_stop_before_destroy(t *testing.T) {
|
|||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckComputeV2InstanceVolumeDetached(instance *servers.Server, volume_id string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
var attachments []volumeattach.VolumeAttachment
|
||||
|
||||
rs, ok := s.RootModule().Resources[volume_id]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", volume_id)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
computeClient, err := config.computeV2Client(OS_REGION_NAME)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = volumeattach.List(computeClient, instance.ID).EachPage(func(page pagination.Page) (bool, error) {
|
||||
actual, err := volumeattach.ExtractVolumeAttachments(page)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Unable to lookup attachment: %s", err)
|
||||
}
|
||||
|
||||
attachments = actual
|
||||
return true, nil
|
||||
})
|
||||
|
||||
for _, attachment := range attachments {
|
||||
if attachment.VolumeID == rs.Primary.ID {
|
||||
return fmt.Errorf("Volume is still attached.")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -213,6 +213,15 @@ func resourceNetworkingRouterV2Update(d *schema.ResourceData, meta interface{})
|
|||
asu := d.Get("admin_state_up").(bool)
|
||||
updateOpts.AdminStateUp = &asu
|
||||
}
|
||||
if d.HasChange("external_gateway") {
|
||||
externalGateway := d.Get("external_gateway").(string)
|
||||
if externalGateway != "" {
|
||||
gatewayInfo := routers.GatewayInfo{
|
||||
NetworkID: externalGateway,
|
||||
}
|
||||
updateOpts.GatewayInfo = &gatewayInfo
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Updating Router %s with options: %+v", d.Id(), updateOpts)
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package openstack
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
|
@ -34,6 +35,46 @@ func TestAccNetworkingV2Router_basic(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAccNetworkingV2Router_update_external_gw(t *testing.T) {
|
||||
var router routers.Router
|
||||
externalGateway := os.Getenv("OS_EXTGW_ID")
|
||||
|
||||
var testAccNetworkingV2Router_update_external_gw_1 = fmt.Sprintf(`
|
||||
resource "openstack_networking_router_v2" "foo" {
|
||||
name = "router"
|
||||
admin_state_up = "true"
|
||||
distributed = "false"
|
||||
}`)
|
||||
|
||||
var testAccNetworkingV2Router_update_external_gw_2 = fmt.Sprintf(`
|
||||
resource "openstack_networking_router_v2" "foo" {
|
||||
name = "router"
|
||||
admin_state_up = "true"
|
||||
distributed = "false"
|
||||
external_gateway = "%s"
|
||||
}`, externalGateway)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckNetworkingV2RouterDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccNetworkingV2Router_update_external_gw_1,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckNetworkingV2RouterExists(t, "openstack_networking_router_v2.foo", &router),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccNetworkingV2Router_update_external_gw_2,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr("openstack_networking_router_v2.foo", "external_gateway", externalGateway),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckNetworkingV2RouterDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
networkingClient, err := config.networkingV2Client(OS_REGION_NAME)
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -276,9 +277,26 @@ func (c *AtlasClient) http() (*retryablehttp.Client, error) {
|
|||
return nil, err
|
||||
}
|
||||
rc := retryablehttp.NewClient()
|
||||
|
||||
rc.CheckRetry = func(resp *http.Response, err error) (bool, error) {
|
||||
if err != nil {
|
||||
// don't bother retrying if the certs don't match
|
||||
if err, ok := err.(*url.Error); ok {
|
||||
if _, ok := err.Err.(x509.UnknownAuthorityError); ok {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
// continue retrying
|
||||
return true, nil
|
||||
}
|
||||
return retryablehttp.DefaultRetryPolicy(resp, err)
|
||||
}
|
||||
|
||||
t := cleanhttp.DefaultTransport()
|
||||
t.TLSClientConfig = tlsConfig
|
||||
rc.HTTPClient.Transport = t
|
||||
|
||||
c.HTTPClient = rc
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -3,8 +3,11 @@ package remote
|
|||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -36,6 +39,53 @@ func TestAtlasClient(t *testing.T) {
|
|||
testClient(t, client)
|
||||
}
|
||||
|
||||
func TestAtlasClient_noRetryOnBadCerts(t *testing.T) {
|
||||
acctest.RemoteTestPrecheck(t)
|
||||
|
||||
client, err := atlasFactory(map[string]string{
|
||||
"access_token": "NOT_REQUIRED",
|
||||
"name": "hashicorp/test-remote-state",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
|
||||
ac := client.(*AtlasClient)
|
||||
// trigger the AtlasClient to build the http client and assign HTTPClient
|
||||
httpClient, err := ac.http()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// remove the CA certs from the client
|
||||
brokenCfg := &tls.Config{
|
||||
RootCAs: new(x509.CertPool),
|
||||
}
|
||||
httpClient.HTTPClient.Transport.(*http.Transport).TLSClientConfig = brokenCfg
|
||||
|
||||
// Instrument CheckRetry to make sure we didn't retry
|
||||
retries := 0
|
||||
oldCheck := httpClient.CheckRetry
|
||||
httpClient.CheckRetry = func(resp *http.Response, err error) (bool, error) {
|
||||
if retries > 0 {
|
||||
t.Fatal("retried after certificate error")
|
||||
}
|
||||
retries++
|
||||
return oldCheck(resp, err)
|
||||
}
|
||||
|
||||
_, err = client.Get()
|
||||
if err != nil {
|
||||
if err, ok := err.(*url.Error); ok {
|
||||
if _, ok := err.Err.(x509.UnknownAuthorityError); ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Fatalf("expected x509.UnknownAuthorityError, got %v", err)
|
||||
}
|
||||
|
||||
func TestAtlasClient_ReportedConflictEqualStates(t *testing.T) {
|
||||
fakeAtlas := newFakeAtlas(t, testStateModuleOrderChange)
|
||||
srv := fakeAtlas.Server()
|
||||
|
|
|
@ -60,7 +60,13 @@ func s3Factory(conf map[string]string) (Client, error) {
|
|||
kmsKeyID := conf["kms_key_id"]
|
||||
|
||||
var errs []error
|
||||
creds := terraformAws.GetCredentials(conf["access_key"], conf["secret_key"], conf["token"], conf["profile"], conf["shared_credentials_file"])
|
||||
creds := terraformAws.GetCredentials(&terraformAws.Config{
|
||||
AccessKey: conf["access_key"],
|
||||
SecretKey: conf["secret_key"],
|
||||
Token: conf["token"],
|
||||
Profile: conf["profile"],
|
||||
CredsFilename: conf["shared_credentials_file"],
|
||||
})
|
||||
// Call Get to check for credential provider. If nothing found, we'll get an
|
||||
// error, and we can present it nicely to the user
|
||||
_, err := creds.Get()
|
||||
|
|
|
@ -317,16 +317,18 @@ func (c *Context) Input(mode InputMode) error {
|
|||
}
|
||||
}
|
||||
|
||||
var valueType config.VariableType
|
||||
|
||||
v := m[n]
|
||||
switch v.Type() {
|
||||
switch valueType = v.Type(); valueType {
|
||||
case config.VariableTypeUnknown:
|
||||
continue
|
||||
case config.VariableTypeMap:
|
||||
continue
|
||||
// OK
|
||||
case config.VariableTypeList:
|
||||
continue
|
||||
// OK
|
||||
case config.VariableTypeString:
|
||||
// Good!
|
||||
// OK
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
|
||||
}
|
||||
|
@ -340,6 +342,12 @@ func (c *Context) Input(mode InputMode) error {
|
|||
}
|
||||
}
|
||||
|
||||
// this should only happen during tests
|
||||
if c.uiInput == nil {
|
||||
log.Println("[WARN] Content.uiInput is nil")
|
||||
continue
|
||||
}
|
||||
|
||||
// Ask the user for a value for this variable
|
||||
var value string
|
||||
retry := 0
|
||||
|
@ -365,17 +373,21 @@ func (c *Context) Input(mode InputMode) error {
|
|||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
// no value provided, so don't set the variable at all
|
||||
if value == "" {
|
||||
// No value, just exit the loop. With no value, we just
|
||||
// use whatever is currently set in variables.
|
||||
break
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
decoded, err := parseVariableAsHCL(n, value, valueType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if value != "" {
|
||||
c.variables[n] = value
|
||||
if decoded != nil {
|
||||
c.variables[n] = decoded
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -656,9 +668,20 @@ func (c *Context) walk(
|
|||
// the name of the variable. In order to get around the restriction of HCL requiring a
|
||||
// top level object, we prepend a sentinel key, decode the user-specified value as its
|
||||
// value and pull the value back out of the resulting map.
|
||||
func parseVariableAsHCL(name string, input interface{}, targetType config.VariableType) (interface{}, error) {
|
||||
func parseVariableAsHCL(name string, input string, targetType config.VariableType) (interface{}, error) {
|
||||
// expecting a string so don't decode anything, just strip quotes
|
||||
if targetType == config.VariableTypeString {
|
||||
return input, nil
|
||||
return strings.Trim(input, `"`), nil
|
||||
}
|
||||
|
||||
// return empty types
|
||||
if strings.TrimSpace(input) == "" {
|
||||
switch targetType {
|
||||
case config.VariableTypeList:
|
||||
return []interface{}{}, nil
|
||||
case config.VariableTypeMap:
|
||||
return make(map[string]interface{}), nil
|
||||
}
|
||||
}
|
||||
|
||||
const sentinelValue = "SENTINEL_TERRAFORM_VAR_OVERRIDE_KEY"
|
||||
|
|
|
@ -617,3 +617,44 @@ func TestContext2Input_interpolateVar(t *testing.T) {
|
|||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestContext2Input_hcl(t *testing.T) {
|
||||
input := new(MockUIInput)
|
||||
m := testModule(t, "input-hcl")
|
||||
p := testProvider("hcl")
|
||||
p.ApplyFn = testApplyFn
|
||||
p.DiffFn = testDiffFn
|
||||
ctx := testContext2(t, &ContextOpts{
|
||||
Module: m,
|
||||
Providers: map[string]ResourceProviderFactory{
|
||||
"hcl": testProviderFuncFixed(p),
|
||||
},
|
||||
Variables: map[string]interface{}{},
|
||||
UIInput: input,
|
||||
})
|
||||
|
||||
input.InputReturnMap = map[string]string{
|
||||
"var.listed": `["a", "b"]`,
|
||||
"var.mapped": `{x = "y", w = "z"}`,
|
||||
}
|
||||
|
||||
if err := ctx.Input(InputModeVar | InputModeVarUnset); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if _, err := ctx.Plan(); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
state, err := ctx.Apply()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
actualStr := strings.TrimSpace(state.String())
|
||||
expectedStr := strings.TrimSpace(testTerraformInputHCL)
|
||||
if actualStr != expectedStr {
|
||||
t.Logf("expected: \n%s", expectedStr)
|
||||
t.Fatalf("bad: \n%s", actualStr)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1223,6 +1223,7 @@ func TestContext2Plan_countZero(t *testing.T) {
|
|||
actual := strings.TrimSpace(plan.String())
|
||||
expected := strings.TrimSpace(testTerraformPlanCountZeroStr)
|
||||
if actual != expected {
|
||||
t.Logf("expected:\n%s", expected)
|
||||
t.Fatalf("bad:\n%s", actual)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,10 +2,11 @@ package terraform
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/flatmap"
|
||||
)
|
||||
|
||||
func TestNewContextState(t *testing.T) {
|
||||
|
@ -165,16 +166,7 @@ func testDiffFn(
|
|||
v = c.Config[k]
|
||||
}
|
||||
|
||||
attrDiff := &ResourceAttrDiff{
|
||||
Old: "",
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(v, []interface{}{}) {
|
||||
attrDiff.New = ""
|
||||
} else {
|
||||
attrDiff.New = v.(string)
|
||||
}
|
||||
|
||||
for k, attrDiff := range testFlatAttrDiffs(k, v) {
|
||||
if k == "require_new" {
|
||||
attrDiff.RequiresNew = true
|
||||
}
|
||||
|
@ -183,6 +175,7 @@ func testDiffFn(
|
|||
}
|
||||
diff.Attributes[k] = attrDiff
|
||||
}
|
||||
}
|
||||
|
||||
for _, k := range c.ComputedKeys {
|
||||
diff.Attributes[k] = &ResourceAttrDiff{
|
||||
|
@ -219,6 +212,39 @@ func testDiffFn(
|
|||
return diff, nil
|
||||
}
|
||||
|
||||
// generate ResourceAttrDiffs for nested data structures in tests
|
||||
func testFlatAttrDiffs(k string, i interface{}) map[string]*ResourceAttrDiff {
|
||||
diffs := make(map[string]*ResourceAttrDiff)
|
||||
// check for strings and empty containers first
|
||||
switch t := i.(type) {
|
||||
case string:
|
||||
diffs[k] = &ResourceAttrDiff{New: t}
|
||||
return diffs
|
||||
case map[string]interface{}:
|
||||
if len(t) == 0 {
|
||||
diffs[k] = &ResourceAttrDiff{New: ""}
|
||||
return diffs
|
||||
}
|
||||
case []interface{}:
|
||||
if len(t) == 0 {
|
||||
diffs[k] = &ResourceAttrDiff{New: ""}
|
||||
return diffs
|
||||
}
|
||||
}
|
||||
|
||||
flat := flatmap.Flatten(map[string]interface{}{k: i})
|
||||
|
||||
for k, v := range flat {
|
||||
attrDiff := &ResourceAttrDiff{
|
||||
Old: "",
|
||||
New: v,
|
||||
}
|
||||
diffs[k] = attrDiff
|
||||
}
|
||||
|
||||
return diffs
|
||||
}
|
||||
|
||||
func testProvider(prefix string) *MockResourceProvider {
|
||||
p := new(MockResourceProvider)
|
||||
p.RefreshFn = func(info *InstanceInfo, s *InstanceState) (*InstanceState, error) {
|
||||
|
|
|
@ -1413,3 +1413,14 @@ module.mod2:
|
|||
STATE:
|
||||
|
||||
<no state>`
|
||||
|
||||
const testTerraformInputHCL = `
|
||||
hcl_instance.hcltest:
|
||||
ID = foo
|
||||
bar.w = z
|
||||
bar.x = y
|
||||
foo.# = 2
|
||||
foo.0 = a
|
||||
foo.1 = b
|
||||
type = hcl_instance
|
||||
`
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
variable "mapped" {
|
||||
type = "map"
|
||||
}
|
||||
|
||||
variable "listed" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
resource "hcl_instance" "hcltest" {
|
||||
foo = "${var.listed}"
|
||||
bar = "${var.mapped}"
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
.idea/
|
||||
*.iml
|
||||
*.test
|
|
@ -1,12 +0,0 @@
|
|||
sudo: false
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.5.1
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
script: make updatedeps test
|
|
@ -38,6 +38,10 @@ var (
|
|||
// defaultClient is used for performing requests without explicitly making
|
||||
// a new client. It is purposely private to avoid modifications.
|
||||
defaultClient = NewClient()
|
||||
|
||||
// We need to consume response bodies to maintain http connections, but
|
||||
// limit the size we consume to respReadLimit.
|
||||
respReadLimit = int64(4096)
|
||||
)
|
||||
|
||||
// LenReader is an interface implemented by many in-memory io.Reader's. Used
|
||||
|
@ -86,6 +90,23 @@ func NewRequest(method, url string, body io.ReadSeeker) (*Request, error) {
|
|||
// consumers.
|
||||
type RequestLogHook func(*log.Logger, *http.Request, int)
|
||||
|
||||
// ResponseLogHook is like RequestLogHook, but allows running a function
|
||||
// on each HTTP response. This function will be invoked at the end of
|
||||
// every HTTP request executed, regardless of whether a subsequent retry
|
||||
// needs to be performed or not. If the response body is read or closed
|
||||
// from this method, this will affect the response returned from Do().
|
||||
type ResponseLogHook func(*log.Logger, *http.Response)
|
||||
|
||||
// CheckRetry specifies a policy for handling retries. It is called
|
||||
// following each request with the response and error values returned by
|
||||
// the http.Client. If CheckRetry returns false, the Client stops retrying
|
||||
// and returns the response to the caller. If CheckRetry returns an error,
|
||||
// that error value is returned in lieu of the error from the request. The
|
||||
// Client will close any response body when retrying, but if the retry is
|
||||
// aborted it is up to the CheckResponse callback to properly close any
|
||||
// response body before returning.
|
||||
type CheckRetry func(resp *http.Response, err error) (bool, error)
|
||||
|
||||
// Client is used to make HTTP requests. It adds additional functionality
|
||||
// like automatic retries to tolerate minor outages.
|
||||
type Client struct {
|
||||
|
@ -99,6 +120,14 @@ type Client struct {
|
|||
// RequestLogHook allows a user-supplied function to be called
|
||||
// before each retry.
|
||||
RequestLogHook RequestLogHook
|
||||
|
||||
// ResponseLogHook allows a user-supplied function to be called
|
||||
// with the response from each HTTP request executed.
|
||||
ResponseLogHook ResponseLogHook
|
||||
|
||||
// CheckRetry specifies the policy for handling retries, and is called
|
||||
// after each request. The default policy is DefaultRetryPolicy.
|
||||
CheckRetry CheckRetry
|
||||
}
|
||||
|
||||
// NewClient creates a new Client with default settings.
|
||||
|
@ -109,9 +138,27 @@ func NewClient() *Client {
|
|||
RetryWaitMin: defaultRetryWaitMin,
|
||||
RetryWaitMax: defaultRetryWaitMax,
|
||||
RetryMax: defaultRetryMax,
|
||||
CheckRetry: DefaultRetryPolicy,
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which
|
||||
// will retry on connection errors and server errors.
|
||||
func DefaultRetryPolicy(resp *http.Response, err error) (bool, error) {
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
// Check the response code. We retry on 500-range responses to allow
|
||||
// the server time to recover, as 500's are typically not permanent
|
||||
// errors and may relate to outages on the server side. This will catch
|
||||
// invalid response codes as well, like 0 and 999.
|
||||
if resp.StatusCode == 0 || resp.StatusCode >= 500 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Do wraps calling an HTTP method with retries.
|
||||
func (c *Client) Do(req *Request) (*http.Response, error) {
|
||||
c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL)
|
||||
|
@ -132,23 +179,36 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
|||
|
||||
// Attempt the request
|
||||
resp, err := c.HTTPClient.Do(req.Request)
|
||||
|
||||
// Check if we should continue with retries.
|
||||
checkOK, checkErr := c.CheckRetry(resp, err)
|
||||
|
||||
if err != nil {
|
||||
c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err)
|
||||
goto RETRY
|
||||
} else {
|
||||
// Call this here to maintain the behavior of logging all requests,
|
||||
// even if CheckRetry signals to stop.
|
||||
if c.ResponseLogHook != nil {
|
||||
// Call the response logger function if provided.
|
||||
c.ResponseLogHook(c.Logger, resp)
|
||||
}
|
||||
code = resp.StatusCode
|
||||
|
||||
// Check the response code. We retry on 500-range responses to allow
|
||||
// the server time to recover, as 500's are typically not permanent
|
||||
// errors and may relate to outages on the server side.
|
||||
if code%500 < 100 {
|
||||
resp.Body.Close()
|
||||
goto RETRY
|
||||
}
|
||||
return resp, nil
|
||||
|
||||
RETRY:
|
||||
if i == c.RetryMax {
|
||||
// Now decide if we should continue.
|
||||
if !checkOK {
|
||||
if checkErr != nil {
|
||||
err = checkErr
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// We're going to retry, consume any response to reuse the connection.
|
||||
if err == nil {
|
||||
c.drainBody(resp.Body)
|
||||
}
|
||||
|
||||
remain := c.RetryMax - i
|
||||
if remain == 0 {
|
||||
break
|
||||
}
|
||||
wait := backoff(c.RetryWaitMin, c.RetryWaitMax, i)
|
||||
|
@ -156,7 +216,7 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
|||
if code > 0 {
|
||||
desc = fmt.Sprintf("%s (status: %d)", desc, code)
|
||||
}
|
||||
c.Logger.Printf("[DEBUG] %s: retrying in %s", desc, wait)
|
||||
c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain)
|
||||
time.Sleep(wait)
|
||||
}
|
||||
|
||||
|
@ -165,6 +225,15 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
|
|||
req.Method, req.URL, c.RetryMax+1)
|
||||
}
|
||||
|
||||
// Try to read the response body so we can reuse this connection.
|
||||
func (c *Client) drainBody(body io.ReadCloser) {
|
||||
defer body.Close()
|
||||
_, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit))
|
||||
if err != nil {
|
||||
c.Logger.Printf("[ERR] error reading response body: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get is a shortcut for doing a GET request without making a new client.
|
||||
func Get(url string) (*http.Response, error) {
|
||||
return defaultClient.Get(url)
|
||||
|
|
|
@ -1108,8 +1108,10 @@
|
|||
"revision": "cccb4a1328abbb89898f3ecf4311a05bddc4de6d"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "GBDE1KDl/7c5hlRPYRZ7+C0WQ0g=",
|
||||
"path": "github.com/hashicorp/go-retryablehttp",
|
||||
"revision": "5ec125ef739293cb4d57c3456dd92ba9af29ed6e"
|
||||
"revision": "f4ed9b0fa01a2ac614afe7c897ed2e3d8208f3e8",
|
||||
"revisionTime": "2016-08-10T17:22:55Z"
|
||||
},
|
||||
{
|
||||
"path": "github.com/hashicorp/go-rootcerts",
|
||||
|
|
|
@ -92,7 +92,7 @@ The `ingress` block supports:
|
|||
EC2-Classic, or Group IDs if using a VPC.
|
||||
* `self` - (Optional) If true, the security group itself will be added as
|
||||
a source to this ingress rule.
|
||||
* `to_port` - (Required) The end range port.
|
||||
* `to_port` - (Required) The end range port (or ICMP code if protocol is "icmp").
|
||||
|
||||
The `egress` block supports:
|
||||
|
||||
|
@ -105,7 +105,7 @@ The `egress` block supports:
|
|||
EC2-Classic, or Group IDs if using a VPC.
|
||||
* `self` - (Optional) If true, the security group itself will be added as
|
||||
a source to this egress rule.
|
||||
* `to_port` - (Required) The end range port.
|
||||
* `to_port` - (Required) The end range port (or ICMP code if protocol is "icmp").
|
||||
|
||||
~> **NOTE on Egress rules:** By default, AWS creates an `ALLOW ALL` egress rule when creating a
|
||||
new Security Group inside of a VPC. When creating a new Security
|
||||
|
|
|
@ -51,7 +51,7 @@ Only valid with `egress`.
|
|||
depending on the `type`. Cannot be specified with `cidr_blocks`.
|
||||
* `self` - (Optional) If true, the security group itself will be added as
|
||||
a source to this ingress rule.
|
||||
* `to_port` - (Required) The end range port.
|
||||
* `to_port` - (Required) The end range port (or ICMP code if protocol is "icmp").
|
||||
|
||||
## Usage with prefix list IDs
|
||||
|
||||
|
|
Loading…
Reference in New Issue