Merge branch 'master' into dynamodb-local

This commit is contained in:
Pablo Cantero 2015-07-30 12:16:07 -03:00
commit 28bef7c0c7
103 changed files with 3069 additions and 659 deletions

View File

@ -1,4 +1,39 @@
## 0.6.1 (Unreleased)
## 0.6.2 (Unreleased)
FEATURES:
* **New resource: `google_compute_instance_group_manager`** [GH-2868]
* **New resource: `google_compute_autoscaler`** [GH-2868]
IMPROVEMENTS:
* core: Add resource IDs to errors coming from `apply`/`refresh` [GH-2815]
* provider/aws: Validate credentials before walking the graph [GH-2730]
* provider/aws: Added website_domain for S3 buckets [GH-2210]
* provider/aws: ELB names are now optional, and generated by Terraform if omitted [GH-2571]
* provider/aws: Downcase RDS engine names to prevent continuous diffs [GH-2745]
* provider/aws: Added `source_dest_check` attribute to the aws_network_interface [GH-2741]
* provider/aws: Clean up externally removed Launch Configurations [GH-2806]
* provider/aws: Compute private ip addresses of ENIs if they are not specified [GH-2743]
* provider/azure: Provide a simpler error when using a Platform Image without a
Storage Service [GH-2861]
* provider/google: `account_file` is now expected to be JSON. Paths are still supported for
backwards compatibility. [GH-2839]
BUG FIXES:
* core: Prevent error duplication in `apply` [GH-2815]
* core: Fix crash when a provider validation adds a warning [GH-2878]
* provider/aws: Fix issue with toggling monitoring in AWS Instances [GH-2794]
* provider/aws: Fix issue with Spot Instance Requests and cancellation [GH-2805]
* provider/aws: Fix issue with checking for ElastiCache cluster cache node status [GH-2842]
* provider/aws: Fix issue when unable to find a Root Block Device name of an Instance Backed
AMI [GH-2646]
* provider/dnsimple: Domain and type should force new records [GH-2777]
* provider/aws: Fix issue with IAM Server Certificates and Chains [GH-2871]
* provider/aws: Fix issue with IAM Server Certificates when using `path` [GH-2871]
## 0.6.1 (July 20, 2015)
FEATURES:

View File

@ -51,9 +51,13 @@ git tag -m "${VERSION}" "${VERSION}"
# Build the release
make release
# Add Godeps for the archive
git add Godeps
# Make an archive with vendored dependencies
stashName=$(git stash)
stashName=$(git stash create)
git archive -o terraform-$VERSION-src.tar.gz $stashName
git reset --hard ${VERSION}
# Zip and push release to bintray
export BINTRAY_API_KEY="..."

1
Vagrantfile vendored
View File

@ -42,6 +42,7 @@ SCRIPT
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "chef/ubuntu-12.04"
config.vm.hostname = "terraform"
config.vm.provision "shell", inline: $script, privileged: false
config.vm.synced_folder '.', '/opt/gopath/src/github.com/hashicorp/terraform'

View File

@ -122,7 +122,7 @@ func autoscalingTagsFromMap(m map[string]interface{}, resourceID string) []*auto
result = append(result, &autoscaling.Tag{
Key: aws.String(k),
Value: aws.String(attr["value"].(string)),
PropagateAtLaunch: aws.Boolean(attr["propagate_at_launch"].(bool)),
PropagateAtLaunch: aws.Bool(attr["propagate_at_launch"].(bool)),
ResourceID: aws.String(resourceID),
ResourceType: aws.String("auto-scaling-group"),
})

View File

@ -8,6 +8,7 @@ import (
"github.com/hashicorp/terraform/helper/multierror"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/cloudwatch"
@ -83,8 +84,16 @@ func (c *Config) Client() (interface{}, error) {
creds := credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token)
awsConfig := &aws.Config{
Credentials: creds,
Region: c.Region,
MaxRetries: c.MaxRetries,
Region: aws.String(c.Region),
MaxRetries: aws.Int(c.MaxRetries),
}
log.Println("[INFO] Initializing IAM Connection")
client.iamconn = iam.New(awsConfig)
err := c.ValidateCredentials(client.iamconn)
if err != nil {
errs = append(errs, err)
}
awsDynamoDBConfig := &aws.Config{
Credentials: creds,
@ -111,15 +120,12 @@ func (c *Config) Client() (interface{}, error) {
log.Println("[INFO] Initializing RDS Connection")
client.rdsconn = rds.New(awsConfig)
log.Println("[INFO] Initializing IAM Connection")
client.iamconn = iam.New(awsConfig)
log.Println("[INFO] Initializing Kinesis Connection")
client.kinesisconn = kinesis.New(awsConfig)
err := c.ValidateAccountId(client.iamconn)
if err != nil {
errs = append(errs, err)
authErr := c.ValidateAccountId(client.iamconn)
if authErr != nil {
errs = append(errs, authErr)
}
log.Println("[INFO] Initializing AutoScaling connection")
@ -137,8 +143,8 @@ func (c *Config) Client() (interface{}, error) {
log.Println("[INFO] Initializing Route 53 connection")
client.r53conn = route53.New(&aws.Config{
Credentials: creds,
Region: "us-east-1",
MaxRetries: c.MaxRetries,
Region: aws.String("us-east-1"),
MaxRetries: aws.Int(c.MaxRetries),
})
log.Println("[INFO] Initializing Elasticache Connection")
@ -173,6 +179,19 @@ func (c *Config) ValidateRegion() error {
return fmt.Errorf("Not a valid region: %s", c.Region)
}
// Validate credentials early and fail before we do any graph walking
func (c *Config) ValidateCredentials(iamconn *iam.IAM) error {
_, err := iamconn.GetUser(nil)
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "SignatureDoesNotMatch" {
return fmt.Errorf("Failed authenticating with AWS: please verify credentials")
}
}
return err
}
// ValidateAccountId returns a context-specific error if the configured account
// id is explicitly forbidden or not authorised; and nil if it is authorised.
func (c *Config) ValidateAccountId(iamconn *iam.IAM) error {

View File

@ -26,12 +26,12 @@ func expandNetworkAclEntries(configured []interface{}, entryType string) ([]*ec2
e := &ec2.NetworkACLEntry{
Protocol: aws.String(strconv.Itoa(p)),
PortRange: &ec2.PortRange{
From: aws.Long(int64(data["from_port"].(int))),
To: aws.Long(int64(data["to_port"].(int))),
From: aws.Int64(int64(data["from_port"].(int))),
To: aws.Int64(int64(data["to_port"].(int))),
},
Egress: aws.Boolean((entryType == "egress")),
Egress: aws.Bool((entryType == "egress")),
RuleAction: aws.String(data["action"].(string)),
RuleNumber: aws.Long(int64(data["rule_no"].(int))),
RuleNumber: aws.Int64(int64(data["rule_no"].(int))),
CIDRBlock: aws.String(data["cidr_block"].(string)),
}
@ -39,10 +39,10 @@ func expandNetworkAclEntries(configured []interface{}, entryType string) ([]*ec2
if p == 1 {
e.ICMPTypeCode = &ec2.ICMPTypeCode{}
if v, ok := data["icmp_code"]; ok {
e.ICMPTypeCode.Code = aws.Long(int64(v.(int)))
e.ICMPTypeCode.Code = aws.Int64(int64(v.(int)))
}
if v, ok := data["icmp_type"]; ok {
e.ICMPTypeCode.Type = aws.Long(int64(v.(int)))
e.ICMPTypeCode.Type = aws.Int64(int64(v.(int)))
}
}

View File

@ -41,35 +41,35 @@ func Test_expandNetworkACLEntry(t *testing.T) {
&ec2.NetworkACLEntry{
Protocol: aws.String("6"),
PortRange: &ec2.PortRange{
From: aws.Long(22),
To: aws.Long(22),
From: aws.Int64(22),
To: aws.Int64(22),
},
RuleAction: aws.String("deny"),
RuleNumber: aws.Long(1),
RuleNumber: aws.Int64(1),
CIDRBlock: aws.String("0.0.0.0/0"),
Egress: aws.Boolean(true),
Egress: aws.Bool(true),
},
&ec2.NetworkACLEntry{
Protocol: aws.String("6"),
PortRange: &ec2.PortRange{
From: aws.Long(443),
To: aws.Long(443),
From: aws.Int64(443),
To: aws.Int64(443),
},
RuleAction: aws.String("deny"),
RuleNumber: aws.Long(2),
RuleNumber: aws.Int64(2),
CIDRBlock: aws.String("0.0.0.0/0"),
Egress: aws.Boolean(true),
Egress: aws.Bool(true),
},
&ec2.NetworkACLEntry{
Protocol: aws.String("-1"),
PortRange: &ec2.PortRange{
From: aws.Long(443),
To: aws.Long(443),
From: aws.Int64(443),
To: aws.Int64(443),
},
RuleAction: aws.String("deny"),
RuleNumber: aws.Long(2),
RuleNumber: aws.Int64(2),
CIDRBlock: aws.String("0.0.0.0/0"),
Egress: aws.Boolean(true),
Egress: aws.Bool(true),
},
}
@ -88,21 +88,21 @@ func Test_flattenNetworkACLEntry(t *testing.T) {
&ec2.NetworkACLEntry{
Protocol: aws.String("tcp"),
PortRange: &ec2.PortRange{
From: aws.Long(22),
To: aws.Long(22),
From: aws.Int64(22),
To: aws.Int64(22),
},
RuleAction: aws.String("deny"),
RuleNumber: aws.Long(1),
RuleNumber: aws.Int64(1),
CIDRBlock: aws.String("0.0.0.0/0"),
},
&ec2.NetworkACLEntry{
Protocol: aws.String("tcp"),
PortRange: &ec2.PortRange{
From: aws.Long(443),
To: aws.Long(443),
From: aws.Int64(443),
To: aws.Int64(443),
},
RuleAction: aws.String("deny"),
RuleNumber: aws.Long(2),
RuleNumber: aws.Int64(2),
CIDRBlock: aws.String("0.0.0.0/0"),
},
}

View File

@ -64,7 +64,7 @@ func resourceAwsAppCookieStickinessPolicyCreate(d *schema.ResourceData, meta int
setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{
LoadBalancerName: aws.String(d.Get("load_balancer").(string)),
LoadBalancerPort: aws.Long(int64(d.Get("lb_port").(int))),
LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))),
PolicyNames: []*string{aws.String(d.Get("name").(string))},
}
@ -129,7 +129,7 @@ func resourceAwsAppCookieStickinessPolicyDelete(d *schema.ResourceData, meta int
// policy itself.
setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{
LoadBalancerName: aws.String(d.Get("load_balancer").(string)),
LoadBalancerPort: aws.Long(int64(d.Get("lb_port").(int))),
LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))),
PolicyNames: []*string{},
}

View File

@ -131,8 +131,8 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{})
var autoScalingGroupOpts autoscaling.CreateAutoScalingGroupInput
autoScalingGroupOpts.AutoScalingGroupName = aws.String(d.Get("name").(string))
autoScalingGroupOpts.LaunchConfigurationName = aws.String(d.Get("launch_configuration").(string))
autoScalingGroupOpts.MinSize = aws.Long(int64(d.Get("min_size").(int)))
autoScalingGroupOpts.MaxSize = aws.Long(int64(d.Get("max_size").(int)))
autoScalingGroupOpts.MinSize = aws.Int64(int64(d.Get("min_size").(int)))
autoScalingGroupOpts.MaxSize = aws.Int64(int64(d.Get("max_size").(int)))
// Availability Zones are optional if VPC Zone Identifer(s) are specified
if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 {
@ -145,7 +145,7 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{})
}
if v, ok := d.GetOk("default_cooldown"); ok {
autoScalingGroupOpts.DefaultCooldown = aws.Long(int64(v.(int)))
autoScalingGroupOpts.DefaultCooldown = aws.Int64(int64(v.(int)))
}
if v, ok := d.GetOk("health_check_type"); ok && v.(string) != "" {
@ -153,11 +153,11 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{})
}
if v, ok := d.GetOk("desired_capacity"); ok {
autoScalingGroupOpts.DesiredCapacity = aws.Long(int64(v.(int)))
autoScalingGroupOpts.DesiredCapacity = aws.Int64(int64(v.(int)))
}
if v, ok := d.GetOk("health_check_grace_period"); ok {
autoScalingGroupOpts.HealthCheckGracePeriod = aws.Long(int64(v.(int)))
autoScalingGroupOpts.HealthCheckGracePeriod = aws.Int64(int64(v.(int)))
}
if v, ok := d.GetOk("load_balancers"); ok && v.(*schema.Set).Len() > 0 {
@ -224,11 +224,11 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{})
}
if d.HasChange("default_cooldown") {
opts.DefaultCooldown = aws.Long(int64(d.Get("default_cooldown").(int)))
opts.DefaultCooldown = aws.Int64(int64(d.Get("default_cooldown").(int)))
}
if d.HasChange("desired_capacity") {
opts.DesiredCapacity = aws.Long(int64(d.Get("desired_capacity").(int)))
opts.DesiredCapacity = aws.Int64(int64(d.Get("desired_capacity").(int)))
}
if d.HasChange("launch_configuration") {
@ -236,19 +236,19 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{})
}
if d.HasChange("min_size") {
opts.MinSize = aws.Long(int64(d.Get("min_size").(int)))
opts.MinSize = aws.Int64(int64(d.Get("min_size").(int)))
}
if d.HasChange("max_size") {
opts.MaxSize = aws.Long(int64(d.Get("max_size").(int)))
opts.MaxSize = aws.Int64(int64(d.Get("max_size").(int)))
}
if d.HasChange("health_check_grace_period") {
opts.HealthCheckGracePeriod = aws.Long(int64(d.Get("health_check_grace_period").(int)))
opts.HealthCheckGracePeriod = aws.Int64(int64(d.Get("health_check_grace_period").(int)))
}
if d.HasChange("health_check_type") {
opts.HealthCheckGracePeriod = aws.Long(int64(d.Get("health_check_grace_period").(int)))
opts.HealthCheckGracePeriod = aws.Int64(int64(d.Get("health_check_grace_period").(int)))
opts.HealthCheckType = aws.String(d.Get("health_check_type").(string))
}
@ -342,7 +342,7 @@ func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{})
// and then delete the group. This bypasses that and leaves
// resources potentially dangling.
if d.Get("force_delete").(bool) {
deleteopts.ForceDelete = aws.Boolean(true)
deleteopts.ForceDelete = aws.Bool(true)
}
// We retry the delete operation to handle InUse/InProgress errors coming
@ -418,9 +418,9 @@ func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{})
log.Printf("[DEBUG] Reducing autoscaling group capacity to zero")
opts := autoscaling.UpdateAutoScalingGroupInput{
AutoScalingGroupName: aws.String(d.Id()),
DesiredCapacity: aws.Long(0),
MinSize: aws.Long(0),
MaxSize: aws.Long(0),
DesiredCapacity: aws.Int64(0),
MinSize: aws.Int64(0),
MaxSize: aws.Int64(0),
}
if _, err := conn.UpdateAutoScalingGroup(&opts); err != nil {
return fmt.Errorf("Error setting capacity to zero to drain: %s", err)

View File

@ -217,7 +217,7 @@ func testAccCheckAWSAutoScalingGroupAttributes(group *autoscaling.Group) resourc
t := &autoscaling.TagDescription{
Key: aws.String("Foo"),
Value: aws.String("foo-bar"),
PropagateAtLaunch: aws.Boolean(true),
PropagateAtLaunch: aws.Bool(true),
ResourceType: aws.String("auto-scaling-group"),
ResourceID: group.AutoScalingGroupName,
}

View File

@ -140,15 +140,15 @@ func getAwsAutoscalingPutScalingPolicyInput(d *schema.ResourceData) autoscaling.
}
if v, ok := d.GetOk("cooldown"); ok {
params.Cooldown = aws.Long(int64(v.(int)))
params.Cooldown = aws.Int64(int64(v.(int)))
}
if v, ok := d.GetOk("scaling_adjustment"); ok {
params.ScalingAdjustment = aws.Long(int64(v.(int)))
params.ScalingAdjustment = aws.Int64(int64(v.(int)))
}
if v, ok := d.GetOk("min_adjustment_step"); ok {
params.MinAdjustmentStep = aws.Long(int64(v.(int)))
params.MinAdjustmentStep = aws.Int64(int64(v.(int)))
}
return params

View File

@ -197,16 +197,16 @@ func getAwsCloudWatchPutMetricAlarmInput(d *schema.ResourceData) cloudwatch.PutM
params := cloudwatch.PutMetricAlarmInput{
AlarmName: aws.String(d.Get("alarm_name").(string)),
ComparisonOperator: aws.String(d.Get("comparison_operator").(string)),
EvaluationPeriods: aws.Long(int64(d.Get("evaluation_periods").(int))),
EvaluationPeriods: aws.Int64(int64(d.Get("evaluation_periods").(int))),
MetricName: aws.String(d.Get("metric_name").(string)),
Namespace: aws.String(d.Get("namespace").(string)),
Period: aws.Long(int64(d.Get("period").(int))),
Period: aws.Int64(int64(d.Get("period").(int))),
Statistic: aws.String(d.Get("statistic").(string)),
Threshold: aws.Double(d.Get("threshold").(float64)),
Threshold: aws.Float64(d.Get("threshold").(float64)),
}
if v := d.Get("actions_enabled"); v != nil {
params.ActionsEnabled = aws.Boolean(v.(bool))
params.ActionsEnabled = aws.Bool(v.(bool))
}
if v, ok := d.GetOk("alarm_description"); ok {

View File

@ -48,7 +48,7 @@ func resourceAwsCustomerGatewayCreate(d *schema.ResourceData, meta interface{})
conn := meta.(*AWSClient).ec2conn
createOpts := &ec2.CreateCustomerGatewayInput{
BGPASN: aws.Long(int64(d.Get("bgp_asn").(int))),
BGPASN: aws.Int64(int64(d.Get("bgp_asn").(int))),
PublicIP: aws.String(d.Get("ip_address").(string)),
Type: aws.String(d.Get("type").(string)),
}

View File

@ -11,7 +11,7 @@ import (
"github.com/hashicorp/terraform/terraform"
)
func TestAccCustomerGateway_basic(t *testing.T) {
func TestAccAWSCustomerGateway_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,

View File

@ -46,6 +46,10 @@ func resourceAwsDbInstance() *schema.Resource {
Type: schema.TypeString,
Required: true,
ForceNew: true,
StateFunc: func(v interface{}) string {
value := v.(string)
return strings.ToLower(value)
},
},
"engine_version": &schema.Schema{
@ -268,11 +272,11 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
Tags: tags,
}
if attr, ok := d.GetOk("iops"); ok {
opts.IOPS = aws.Long(int64(attr.(int)))
opts.IOPS = aws.Int64(int64(attr.(int)))
}
if attr, ok := d.GetOk("port"); ok {
opts.Port = aws.Long(int64(attr.(int)))
opts.Port = aws.Int64(int64(attr.(int)))
}
if attr, ok := d.GetOk("availability_zone"); ok {
@ -280,7 +284,7 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
}
if attr, ok := d.GetOk("publicly_accessible"); ok {
opts.PubliclyAccessible = aws.Boolean(attr.(bool))
opts.PubliclyAccessible = aws.Bool(attr.(bool))
}
_, err := conn.CreateDBInstanceReadReplica(&opts)
if err != nil {
@ -295,7 +299,7 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
}
if attr, ok := d.GetOk("auto_minor_version_upgrade"); ok {
opts.AutoMinorVersionUpgrade = aws.Boolean(attr.(bool))
opts.AutoMinorVersionUpgrade = aws.Bool(attr.(bool))
}
if attr, ok := d.GetOk("availability_zone"); ok {
@ -311,7 +315,7 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
}
if attr, ok := d.GetOk("iops"); ok {
opts.IOPS = aws.Long(int64(attr.(int)))
opts.IOPS = aws.Int64(int64(attr.(int)))
}
if attr, ok := d.GetOk("license_model"); ok {
@ -319,7 +323,7 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
}
if attr, ok := d.GetOk("multi_az"); ok {
opts.MultiAZ = aws.Boolean(attr.(bool))
opts.MultiAZ = aws.Bool(attr.(bool))
}
if attr, ok := d.GetOk("option_group_name"); ok {
@ -327,11 +331,11 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
}
if attr, ok := d.GetOk("port"); ok {
opts.Port = aws.Long(int64(attr.(int)))
opts.Port = aws.Int64(int64(attr.(int)))
}
if attr, ok := d.GetOk("publicly_accessible"); ok {
opts.PubliclyAccessible = aws.Boolean(attr.(bool))
opts.PubliclyAccessible = aws.Bool(attr.(bool))
}
if attr, ok := d.GetOk("tde_credential_arn"); ok {
@ -348,7 +352,7 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
}
} else {
opts := rds.CreateDBInstanceInput{
AllocatedStorage: aws.Long(int64(d.Get("allocated_storage").(int))),
AllocatedStorage: aws.Int64(int64(d.Get("allocated_storage").(int))),
DBName: aws.String(d.Get("name").(string)),
DBInstanceClass: aws.String(d.Get("instance_class").(string)),
DBInstanceIdentifier: aws.String(d.Get("identifier").(string)),
@ -356,14 +360,14 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
MasterUserPassword: aws.String(d.Get("password").(string)),
Engine: aws.String(d.Get("engine").(string)),
EngineVersion: aws.String(d.Get("engine_version").(string)),
StorageEncrypted: aws.Boolean(d.Get("storage_encrypted").(bool)),
StorageEncrypted: aws.Bool(d.Get("storage_encrypted").(bool)),
Tags: tags,
}
attr := d.Get("backup_retention_period")
opts.BackupRetentionPeriod = aws.Long(int64(attr.(int)))
opts.BackupRetentionPeriod = aws.Int64(int64(attr.(int)))
if attr, ok := d.GetOk("multi_az"); ok {
opts.MultiAZ = aws.Boolean(attr.(bool))
opts.MultiAZ = aws.Bool(attr.(bool))
}
if attr, ok := d.GetOk("maintenance_window"); ok {
@ -405,11 +409,11 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
}
if attr, ok := d.GetOk("iops"); ok {
opts.IOPS = aws.Long(int64(attr.(int)))
opts.IOPS = aws.Int64(int64(attr.(int)))
}
if attr, ok := d.GetOk("port"); ok {
opts.Port = aws.Long(int64(attr.(int)))
opts.Port = aws.Int64(int64(attr.(int)))
}
if attr, ok := d.GetOk("availability_zone"); ok {
@ -417,7 +421,7 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
}
if attr, ok := d.GetOk("publicly_accessible"); ok {
opts.PubliclyAccessible = aws.Boolean(attr.(bool))
opts.PubliclyAccessible = aws.Bool(attr.(bool))
}
log.Printf("[DEBUG] DB Instance create configuration: %#v", opts)
@ -567,7 +571,7 @@ func resourceAwsDbInstanceDelete(d *schema.ResourceData, meta interface{}) error
finalSnapshot := d.Get("final_snapshot_identifier").(string)
if finalSnapshot == "" {
opts.SkipFinalSnapshot = aws.Boolean(true)
opts.SkipFinalSnapshot = aws.Bool(true)
} else {
opts.FinalDBSnapshotIdentifier = aws.String(finalSnapshot)
}
@ -601,7 +605,7 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
d.Partial(true)
req := &rds.ModifyDBInstanceInput{
ApplyImmediately: aws.Boolean(d.Get("apply_immediately").(bool)),
ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)),
DBInstanceIdentifier: aws.String(d.Id()),
}
d.SetPartial("apply_immediately")
@ -609,12 +613,12 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
requestUpdate := false
if d.HasChange("allocated_storage") {
d.SetPartial("allocated_storage")
req.AllocatedStorage = aws.Long(int64(d.Get("allocated_storage").(int)))
req.AllocatedStorage = aws.Int64(int64(d.Get("allocated_storage").(int)))
requestUpdate = true
}
if d.HasChange("backup_retention_period") {
d.SetPartial("backup_retention_period")
req.BackupRetentionPeriod = aws.Long(int64(d.Get("backup_retention_period").(int)))
req.BackupRetentionPeriod = aws.Int64(int64(d.Get("backup_retention_period").(int)))
requestUpdate = true
}
if d.HasChange("instance_class") {
@ -634,7 +638,7 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
}
if d.HasChange("iops") {
d.SetPartial("iops")
req.IOPS = aws.Long(int64(d.Get("iops").(int)))
req.IOPS = aws.Int64(int64(d.Get("iops").(int)))
requestUpdate = true
}
if d.HasChange("backup_window") {
@ -654,7 +658,7 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
}
if d.HasChange("multi_az") {
d.SetPartial("multi_az")
req.MultiAZ = aws.Boolean(d.Get("multi_az").(bool))
req.MultiAZ = aws.Bool(d.Get("multi_az").(bool))
requestUpdate = true
}
if d.HasChange("storage_type") {
@ -702,7 +706,7 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
DBInstanceIdentifier: aws.String(d.Id()),
}
attr := d.Get("backup_retention_period")
opts.BackupRetentionPeriod = aws.Long(int64(attr.(int)))
opts.BackupRetentionPeriod = aws.Int64(int64(attr.(int)))
if attr, ok := d.GetOk("backup_window"); ok {
opts.PreferredBackupWindow = aws.String(attr.(string))
}

View File

@ -176,7 +176,7 @@ resource "aws_db_instance" "bar" {
identifier = "foobarbaz-test-terraform-%d"
allocated_storage = 10
engine = "mysql"
engine = "MySQL"
engine_version = "5.6.21"
instance_class = "db.t1.micro"
name = "baz"

View File

@ -166,8 +166,8 @@ func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) er
log.Printf("[DEBUG] DynamoDB table create: %s", name)
throughput := &dynamodb.ProvisionedThroughput{
ReadCapacityUnits: aws.Long(int64(d.Get("read_capacity").(int))),
WriteCapacityUnits: aws.Long(int64(d.Get("write_capacity").(int))),
ReadCapacityUnits: aws.Int64(int64(d.Get("read_capacity").(int))),
WriteCapacityUnits: aws.Int64(int64(d.Get("write_capacity").(int))),
}
hash_key_name := d.Get("hash_key").(string)
@ -318,8 +318,8 @@ func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) er
}
throughput := &dynamodb.ProvisionedThroughput{
ReadCapacityUnits: aws.Long(int64(d.Get("read_capacity").(int))),
WriteCapacityUnits: aws.Long(int64(d.Get("write_capacity").(int))),
ReadCapacityUnits: aws.Int64(int64(d.Get("read_capacity").(int))),
WriteCapacityUnits: aws.Int64(int64(d.Get("write_capacity").(int))),
}
req.ProvisionedThroughput = throughput
@ -486,8 +486,8 @@ func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) er
Update: &dynamodb.UpdateGlobalSecondaryIndexAction{
IndexName: aws.String(gsidata["name"].(string)),
ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
WriteCapacityUnits: aws.Long(int64(gsiWriteCapacity)),
ReadCapacityUnits: aws.Long(int64(gsiReadCapacity)),
WriteCapacityUnits: aws.Int64(int64(gsiWriteCapacity)),
ReadCapacityUnits: aws.Int64(int64(gsiReadCapacity)),
},
},
}
@ -634,8 +634,8 @@ func createGSIFromData(data *map[string]interface{}) dynamodb.GlobalSecondaryInd
KeySchema: key_schema,
Projection: projection,
ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
WriteCapacityUnits: aws.Long(int64(writeCapacity)),
ReadCapacityUnits: aws.Long(int64(readCapacity)),
WriteCapacityUnits: aws.Int64(int64(writeCapacity)),
ReadCapacityUnits: aws.Int64(int64(readCapacity)),
},
}
}

View File

@ -74,16 +74,16 @@ func resourceAwsEbsVolumeCreate(d *schema.ResourceData, meta interface{}) error
AvailabilityZone: aws.String(d.Get("availability_zone").(string)),
}
if value, ok := d.GetOk("encrypted"); ok {
request.Encrypted = aws.Boolean(value.(bool))
request.Encrypted = aws.Bool(value.(bool))
}
if value, ok := d.GetOk("iops"); ok {
request.IOPS = aws.Long(int64(value.(int)))
request.IOPS = aws.Int64(int64(value.(int)))
}
if value, ok := d.GetOk("kms_key_id"); ok {
request.KMSKeyID = aws.String(value.(string))
}
if value, ok := d.GetOk("size"); ok {
request.Size = aws.Long(int64(value.(int)))
request.Size = aws.Int64(int64(value.(int)))
}
if value, ok := d.GetOk("snapshot_id"); ok {
request.SnapshotID = aws.String(value.(string))

View File

@ -6,7 +6,6 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ecs"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
@ -58,7 +57,7 @@ func resourceAwsEcsClusterRead(d *schema.ResourceData, meta interface{}) error {
if err != nil {
return err
}
log.Printf("[DEBUG] Received ECS clusters: %s", awsutil.StringValue(out.Clusters))
log.Printf("[DEBUG] Received ECS clusters: %s", out.Clusters)
d.SetId(*out.Clusters[0].ClusterARN)
d.Set("name", *out.Clusters[0].ClusterName)
@ -77,7 +76,7 @@ func resourceAwsEcsClusterDelete(d *schema.ResourceData, meta interface{}) error
})
if err == nil {
log.Printf("[DEBUG] ECS cluster %s deleted: %s", d.Id(), awsutil.StringValue(out))
log.Printf("[DEBUG] ECS cluster %s deleted: %s", d.Id(), out)
return nil
}

View File

@ -9,7 +9,6 @@ import (
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ecs"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/hashcode"
@ -88,7 +87,7 @@ func resourceAwsEcsServiceCreate(d *schema.ResourceData, meta interface{}) error
input := ecs.CreateServiceInput{
ServiceName: aws.String(d.Get("name").(string)),
TaskDefinition: aws.String(d.Get("task_definition").(string)),
DesiredCount: aws.Long(int64(d.Get("desired_count").(int))),
DesiredCount: aws.Int64(int64(d.Get("desired_count").(int))),
ClientToken: aws.String(resource.UniqueId()),
}
@ -98,14 +97,14 @@ func resourceAwsEcsServiceCreate(d *schema.ResourceData, meta interface{}) error
loadBalancers := expandEcsLoadBalancers(d.Get("load_balancer").(*schema.Set).List())
if len(loadBalancers) > 0 {
log.Printf("[DEBUG] Adding ECS load balancers: %s", awsutil.StringValue(loadBalancers))
log.Printf("[DEBUG] Adding ECS load balancers: %s", loadBalancers)
input.LoadBalancers = loadBalancers
}
if v, ok := d.GetOk("iam_role"); ok {
input.Role = aws.String(v.(string))
}
log.Printf("[DEBUG] Creating ECS service: %s", awsutil.StringValue(input))
log.Printf("[DEBUG] Creating ECS service: %s", input)
out, err := conn.CreateService(&input)
if err != nil {
return err
@ -139,7 +138,7 @@ func resourceAwsEcsServiceRead(d *schema.ResourceData, meta interface{}) error {
}
service := out.Services[0]
log.Printf("[DEBUG] Received ECS service %s", awsutil.StringValue(service))
log.Printf("[DEBUG] Received ECS service %s", service)
d.SetId(*service.ServiceARN)
d.Set("name", *service.ServiceName)
@ -177,7 +176,7 @@ func resourceAwsEcsServiceUpdate(d *schema.ResourceData, meta interface{}) error
if d.HasChange("desired_count") {
_, n := d.GetChange("desired_count")
input.DesiredCount = aws.Long(int64(n.(int)))
input.DesiredCount = aws.Int64(int64(n.(int)))
}
if d.HasChange("task_definition") {
_, n := d.GetChange("task_definition")
@ -189,7 +188,7 @@ func resourceAwsEcsServiceUpdate(d *schema.ResourceData, meta interface{}) error
return err
}
service := out.Service
log.Printf("[DEBUG] Updated ECS service %s", awsutil.StringValue(service))
log.Printf("[DEBUG] Updated ECS service %s", service)
return resourceAwsEcsServiceRead(d, meta)
}
@ -217,7 +216,7 @@ func resourceAwsEcsServiceDelete(d *schema.ResourceData, meta interface{}) error
_, err = conn.UpdateService(&ecs.UpdateServiceInput{
Service: aws.String(d.Id()),
Cluster: aws.String(d.Get("cluster").(string)),
DesiredCount: aws.Long(int64(0)),
DesiredCount: aws.Int64(int64(0)),
})
if err != nil {
return err
@ -229,7 +228,7 @@ func resourceAwsEcsServiceDelete(d *schema.ResourceData, meta interface{}) error
Cluster: aws.String(d.Get("cluster").(string)),
}
log.Printf("[DEBUG] Deleting ECS service %s", awsutil.StringValue(input))
log.Printf("[DEBUG] Deleting ECS service %s", input)
out, err := conn.DeleteService(&input)
if err != nil {
return err

View File

@ -8,7 +8,6 @@ import (
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ecs"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
@ -91,7 +90,7 @@ func resourceAwsEcsTaskDefinitionCreate(d *schema.ResourceData, meta interface{}
input.Volumes = volumes
}
log.Printf("[DEBUG] Registering ECS task definition: %s", awsutil.StringValue(input))
log.Printf("[DEBUG] Registering ECS task definition: %s", input)
out, err := conn.RegisterTaskDefinition(&input)
if err != nil {
return err
@ -118,7 +117,7 @@ func resourceAwsEcsTaskDefinitionRead(d *schema.ResourceData, meta interface{})
if err != nil {
return err
}
log.Printf("[DEBUG] Received task definition %s", awsutil.StringValue(out))
log.Printf("[DEBUG] Received task definition %s", out)
taskDefinition := out.TaskDefinition

View File

@ -9,7 +9,6 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/elasticache"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/hashcode"
@ -160,10 +159,10 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{
req := &elasticache.CreateCacheClusterInput{
CacheClusterID: aws.String(clusterId),
CacheNodeType: aws.String(nodeType),
NumCacheNodes: aws.Long(numNodes),
NumCacheNodes: aws.Int64(numNodes),
Engine: aws.String(engine),
EngineVersion: aws.String(engineVersion),
Port: aws.Long(port),
Port: aws.Int64(port),
CacheSubnetGroupName: aws.String(subnetGroupName),
CacheSecurityGroupNames: securityNames,
SecurityGroupIDs: securityIds,
@ -186,11 +185,13 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{
log.Printf("[DEBUG] Restoring Redis cluster from S3 snapshot: %#v", s)
}
_, err := conn.CreateCacheCluster(req)
resp, err := conn.CreateCacheCluster(req)
if err != nil {
return fmt.Errorf("Error creating Elasticache: %s", err)
}
d.SetId(*resp.CacheCluster.CacheClusterID)
pending := []string{"creating"}
stateConf := &resource.StateChangeConf{
Pending: pending,
@ -207,8 +208,6 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{
return fmt.Errorf("Error waiting for elasticache (%s) to be created: %s", d.Id(), sterr)
}
d.SetId(clusterId)
return resourceAwsElasticacheClusterRead(d, meta)
}
@ -216,7 +215,7 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{})
conn := meta.(*AWSClient).elasticacheconn
req := &elasticache.DescribeCacheClustersInput{
CacheClusterID: aws.String(d.Id()),
ShowCacheNodeInfo: aws.Boolean(true),
ShowCacheNodeInfo: aws.Bool(true),
}
res, err := conn.DescribeCacheClusters(req)
@ -281,7 +280,7 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{
req := &elasticache.ModifyCacheClusterInput{
CacheClusterID: aws.String(d.Id()),
ApplyImmediately: aws.Boolean(d.Get("apply_immediately").(bool)),
ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)),
}
requestUpdate := false
@ -308,12 +307,12 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{
}
if d.HasChange("num_cache_nodes") {
req.NumCacheNodes = aws.Long(int64(d.Get("num_cache_nodes").(int)))
req.NumCacheNodes = aws.Int64(int64(d.Get("num_cache_nodes").(int)))
requestUpdate = true
}
if requestUpdate {
log.Printf("[DEBUG] Modifying ElastiCache Cluster (%s), opts:\n%s", d.Id(), awsutil.StringValue(req))
log.Printf("[DEBUG] Modifying ElastiCache Cluster (%s), opts:\n%s", d.Id(), req)
_, err := conn.ModifyCacheCluster(req)
if err != nil {
return fmt.Errorf("[WARN] Error updating ElastiCache cluster (%s), error: %s", d.Id(), err)
@ -348,7 +347,7 @@ func setCacheNodeData(d *schema.ResourceData, c *elasticache.CacheCluster) error
for _, node := range sortedCacheNodes {
if node.CacheNodeID == nil || node.Endpoint == nil || node.Endpoint.Address == nil || node.Endpoint.Port == nil {
return fmt.Errorf("Unexpected nil pointer in: %s", awsutil.StringValue(node))
return fmt.Errorf("Unexpected nil pointer in: %s", node)
}
cacheNodeData = append(cacheNodeData, map[string]interface{}{
"id": *node.CacheNodeID,
@ -404,7 +403,7 @@ func cacheClusterStateRefreshFunc(conn *elasticache.ElastiCache, clusterID, give
return func() (interface{}, string, error) {
resp, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{
CacheClusterID: aws.String(clusterID),
ShowCacheNodeInfo: aws.Boolean(true),
ShowCacheNodeInfo: aws.Bool(true),
})
if err != nil {
apierr := err.(awserr.Error)
@ -418,11 +417,27 @@ func cacheClusterStateRefreshFunc(conn *elasticache.ElastiCache, clusterID, give
return nil, "", err
}
c := resp.CacheClusters[0]
log.Printf("[DEBUG] status: %v", *c.CacheClusterStatus)
if len(resp.CacheClusters) == 0 {
return nil, "", fmt.Errorf("[WARN] Error: no Cache Clusters found for id (%s)", clusterID)
}
var c *elasticache.CacheCluster
for _, cluster := range resp.CacheClusters {
if *cluster.CacheClusterID == clusterID {
log.Printf("[DEBUG] Found matching ElastiCache cluster: %s", *cluster.CacheClusterID)
c = cluster
}
}
if c == nil {
return nil, "", fmt.Errorf("[WARN] Error: no matching Elastic Cache cluster for id (%s)", clusterID)
}
log.Printf("[DEBUG] ElastiCache Cluster (%s) status: %v", clusterID, *c.CacheClusterStatus)
// return the current state if it's in the pending array
for _, p := range pending {
log.Printf("[DEBUG] ElastiCache: checking pending state (%s) for cluster (%s), cluster status: %s", pending, clusterID, *c.CacheClusterStatus)
s := *c.CacheClusterStatus
if p == s {
log.Printf("[DEBUG] Return with status: %v", *c.CacheClusterStatus)
@ -432,18 +447,24 @@ func cacheClusterStateRefreshFunc(conn *elasticache.ElastiCache, clusterID, give
// return given state if it's not in pending
if givenState != "" {
log.Printf("[DEBUG] ElastiCache: checking given state (%s) of cluster (%s) against cluster status (%s)", givenState, clusterID, *c.CacheClusterStatus)
// check to make sure we have the node count we're expecting
if int64(len(c.CacheNodes)) != *c.NumCacheNodes {
log.Printf("[DEBUG] Node count is not what is expected: %d found, %d expected", len(c.CacheNodes), *c.NumCacheNodes)
return nil, "creating", nil
}
log.Printf("[DEBUG] Node count matched (%d)", len(c.CacheNodes))
// loop the nodes and check their status as well
for _, n := range c.CacheNodes {
log.Printf("[DEBUG] Checking cache node for status: %s", n)
if n.CacheNodeStatus != nil && *n.CacheNodeStatus != "available" {
log.Printf("[DEBUG] Node (%s) is not yet available, status: %s", *n.CacheNodeID, *n.CacheNodeStatus)
return nil, "creating", nil
}
log.Printf("[DEBUG] Cache node not in expected state")
}
log.Printf("[DEBUG] ElastiCache returning given state (%s), cluster: %s", givenState, c)
return c, givenState, nil
}
log.Printf("[DEBUG] current status: %v", *c.CacheClusterStatus)

View File

@ -11,6 +11,7 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/elb"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
@ -24,7 +25,8 @@ func resourceAwsElb() *schema.Resource {
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
@ -211,10 +213,18 @@ func resourceAwsElbCreate(d *schema.ResourceData, meta interface{}) error {
return err
}
var elbName string
if v, ok := d.GetOk("name"); ok {
elbName = v.(string)
} else {
elbName = resource.PrefixedUniqueId("tf-lb-")
d.Set("name", elbName)
}
tags := tagsFromMapELB(d.Get("tags").(map[string]interface{}))
// Provision the elb
elbOpts := &elb.CreateLoadBalancerInput{
LoadBalancerName: aws.String(d.Get("name").(string)),
LoadBalancerName: aws.String(elbName),
Listeners: listeners,
Tags: tags,
}
@ -241,7 +251,7 @@ func resourceAwsElbCreate(d *schema.ResourceData, meta interface{}) error {
}
// Assign the elb's unique identifier for use later
d.SetId(d.Get("name").(string))
d.SetId(elbName)
log.Printf("[INFO] ELB ID: %s", d.Id())
// Enable partial mode and record what we set
@ -419,10 +429,10 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error {
LoadBalancerName: aws.String(d.Get("name").(string)),
LoadBalancerAttributes: &elb.LoadBalancerAttributes{
CrossZoneLoadBalancing: &elb.CrossZoneLoadBalancing{
Enabled: aws.Boolean(d.Get("cross_zone_load_balancing").(bool)),
Enabled: aws.Bool(d.Get("cross_zone_load_balancing").(bool)),
},
ConnectionSettings: &elb.ConnectionSettings{
IdleTimeout: aws.Long(int64(d.Get("idle_timeout").(int))),
IdleTimeout: aws.Int64(int64(d.Get("idle_timeout").(int))),
},
},
}
@ -449,8 +459,8 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error {
LoadBalancerName: aws.String(d.Get("name").(string)),
LoadBalancerAttributes: &elb.LoadBalancerAttributes{
ConnectionDraining: &elb.ConnectionDraining{
Enabled: aws.Boolean(true),
Timeout: aws.Long(int64(d.Get("connection_draining_timeout").(int))),
Enabled: aws.Bool(true),
Timeout: aws.Int64(int64(d.Get("connection_draining_timeout").(int))),
},
},
}
@ -470,7 +480,7 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error {
LoadBalancerName: aws.String(d.Get("name").(string)),
LoadBalancerAttributes: &elb.LoadBalancerAttributes{
ConnectionDraining: &elb.ConnectionDraining{
Enabled: aws.Boolean(d.Get("connection_draining").(bool)),
Enabled: aws.Bool(d.Get("connection_draining").(bool)),
},
},
}
@ -490,11 +500,11 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error {
configureHealthCheckOpts := elb.ConfigureHealthCheckInput{
LoadBalancerName: aws.String(d.Id()),
HealthCheck: &elb.HealthCheck{
HealthyThreshold: aws.Long(int64(check["healthy_threshold"].(int))),
UnhealthyThreshold: aws.Long(int64(check["unhealthy_threshold"].(int))),
Interval: aws.Long(int64(check["interval"].(int))),
HealthyThreshold: aws.Int64(int64(check["healthy_threshold"].(int))),
UnhealthyThreshold: aws.Int64(int64(check["unhealthy_threshold"].(int))),
Interval: aws.Int64(int64(check["interval"].(int))),
Target: aws.String(check["target"].(string)),
Timeout: aws.Long(int64(check["timeout"].(int))),
Timeout: aws.Int64(int64(check["timeout"].(int))),
},
}
_, err := elbconn.ConfigureHealthCheck(&configureHealthCheckOpts)

View File

@ -4,6 +4,7 @@ import (
"fmt"
"os"
"reflect"
"regexp"
"sort"
"testing"
@ -74,6 +75,27 @@ func TestAccAWSELB_fullCharacterRange(t *testing.T) {
})
}
func TestAccAWSELB_generatedName(t *testing.T) {
var conf elb.LoadBalancerDescription
generatedNameRegexp := regexp.MustCompile("^tf-lb-")
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSELBDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSELBGeneratedName,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSELBExists("aws_elb.foo", &conf),
resource.TestMatchResourceAttr(
"aws_elb.foo", "name", generatedNameRegexp),
),
},
},
})
}
func TestAccAWSELB_tags(t *testing.T) {
var conf elb.LoadBalancerDescription
var td elb.TagDescription
@ -465,9 +487,9 @@ func testAccCheckAWSELBAttributes(conf *elb.LoadBalancerDescription) resource.Te
}
l := elb.Listener{
InstancePort: aws.Long(int64(8000)),
InstancePort: aws.Int64(int64(8000)),
InstanceProtocol: aws.String("HTTP"),
LoadBalancerPort: aws.Long(int64(80)),
LoadBalancerPort: aws.Int64(int64(80)),
Protocol: aws.String("HTTP"),
}
@ -503,10 +525,10 @@ func testAccCheckAWSELBAttributesHealthCheck(conf *elb.LoadBalancerDescription)
}
check := &elb.HealthCheck{
Timeout: aws.Long(int64(30)),
UnhealthyThreshold: aws.Long(int64(5)),
HealthyThreshold: aws.Long(int64(5)),
Interval: aws.Long(int64(60)),
Timeout: aws.Int64(int64(30)),
UnhealthyThreshold: aws.Int64(int64(5)),
HealthyThreshold: aws.Int64(int64(5)),
Interval: aws.Int64(int64(60)),
Target: aws.String("HTTP:8000/"),
}
@ -592,6 +614,19 @@ resource "aws_elb" "foo" {
}
`
const testAccAWSELBGeneratedName = `
resource "aws_elb" "foo" {
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener {
instance_port = 8000
instance_protocol = "http"
lb_port = 80
lb_protocol = "http"
}
}
`
const testAccAWSELBConfig_TagUpdate = `
resource "aws_elb" "bar" {
name = "foobar-terraform-test"

View File

@ -5,7 +5,6 @@ import (
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
@ -94,7 +93,7 @@ func resourceAwsLogFlowCreate(d *schema.ResourceData, meta interface{}) error {
}
log.Printf(
"[DEBUG] Flow Log Create configuration: %s", awsutil.StringValue(opts))
"[DEBUG] Flow Log Create configuration: %s", opts)
resp, err := conn.CreateFlowLogs(opts)
if err != nil {
return fmt.Errorf("Error creating Flow Log for (%s), error: %s", resourceId, err)

View File

@ -11,7 +11,7 @@ import (
"github.com/hashicorp/terraform/terraform"
)
func TestAccFlowLog_basic(t *testing.T) {
func TestAccAWSFlowLog_basic(t *testing.T) {
var flowLog ec2.FlowLog
lgn := os.Getenv("LOG_GROUP_NAME")
@ -31,7 +31,7 @@ func TestAccFlowLog_basic(t *testing.T) {
})
}
func TestAccFlowLog_subnet(t *testing.T) {
func TestAccAWSFlowLog_subnet(t *testing.T) {
var flowLog ec2.FlowLog
lgn := os.Getenv("LOG_GROUP_NAME")

View File

@ -97,7 +97,7 @@ func resourceAwsIamPolicyUpdate(d *schema.ResourceData, meta interface{}) error
request := &iam.CreatePolicyVersionInput{
PolicyARN: aws.String(d.Id()),
PolicyDocument: aws.String(d.Get("policy").(string)),
SetAsDefault: aws.Boolean(true),
SetAsDefault: aws.Bool(true),
}
if _, err := iamconn.CreatePolicyVersion(request); err != nil {

View File

@ -4,6 +4,7 @@ import (
"crypto/sha1"
"encoding/hex"
"fmt"
"log"
"strings"
"github.com/aws/aws-sdk-go/aws"
@ -34,8 +35,9 @@ func resourceAwsIAMServerCertificate() *schema.Resource {
},
"path": &schema.Schema{
Type: schema.TypeBool,
Type: schema.TypeString,
Optional: true,
Default: "/",
ForceNew: true,
},
@ -74,10 +76,11 @@ func resourceAwsIAMServerCertificateCreate(d *schema.ResourceData, meta interfac
createOpts.CertificateChain = aws.String(v.(string))
}
if v, ok := d.GetOk("Path"); ok {
if v, ok := d.GetOk("path"); ok {
createOpts.Path = aws.String(v.(string))
}
log.Printf("[DEBUG] Creating IAM Server Certificate with opts: %s", createOpts)
resp, err := conn.UploadServerCertificate(createOpts)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
@ -107,7 +110,12 @@ func resourceAwsIAMServerCertificateRead(d *schema.ResourceData, meta interface{
// these values should always be present, and have a default if not set in
// configuration, and so safe to reference with nil checks
d.Set("certificate_body", normalizeCert(resp.ServerCertificate.CertificateBody))
d.Set("certificate_chain", normalizeCert(resp.ServerCertificate.CertificateChain))
c := normalizeCert(resp.ServerCertificate.CertificateChain)
if c != "" {
d.Set("certificate_chain", c)
}
d.Set("path", resp.ServerCertificate.ServerCertificateMetadata.Path)
d.Set("arn", resp.ServerCertificate.ServerCertificateMetadata.ARN)
@ -132,9 +140,10 @@ func resourceAwsIAMServerCertificateDelete(d *schema.ResourceData, meta interfac
}
func normalizeCert(cert interface{}) string {
if cert == nil {
if cert == nil || cert == (*string)(nil) {
return ""
}
switch cert.(type) {
case string:
hash := sha1.Sum([]byte(strings.TrimSpace(cert.(string))))

View File

@ -13,7 +13,7 @@ import (
"github.com/hashicorp/terraform/terraform"
)
func TestAccIAMServerCertificate_basic(t *testing.T) {
func TestAccAWSIAMServerCertificate_basic(t *testing.T) {
var cert iam.ServerCertificate
resource.Test(t, resource.TestCase{

View File

@ -12,7 +12,6 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/resource"
@ -334,8 +333,8 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error {
ImageID: instanceOpts.ImageID,
InstanceType: instanceOpts.InstanceType,
KeyName: instanceOpts.KeyName,
MaxCount: aws.Long(int64(1)),
MinCount: aws.Long(int64(1)),
MaxCount: aws.Int64(int64(1)),
MinCount: aws.Int64(int64(1)),
NetworkInterfaces: instanceOpts.NetworkInterfaces,
Placement: instanceOpts.Placement,
PrivateIPAddress: instanceOpts.PrivateIPAddress,
@ -346,7 +345,7 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error {
}
// Create the instance
log.Printf("[DEBUG] Run configuration: %s", awsutil.StringValue(runOpts))
log.Printf("[DEBUG] Run configuration: %s", runOpts)
var runResp *ec2.Reservation
for i := 0; i < 5; i++ {
@ -543,7 +542,7 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
_, err := conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{
InstanceID: aws.String(d.Id()),
SourceDestCheck: &ec2.AttributeBooleanValue{
Value: aws.Boolean(d.Get("source_dest_check").(bool)),
Value: aws.Bool(d.Get("source_dest_check").(bool)),
},
})
if err != nil {
@ -571,7 +570,7 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
_, err := conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{
InstanceID: aws.String(d.Id()),
DisableAPITermination: &ec2.AttributeBooleanValue{
Value: aws.Boolean(d.Get("disable_api_termination").(bool)),
Value: aws.Bool(d.Get("disable_api_termination").(bool)),
},
})
if err != nil {
@ -579,6 +578,24 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
}
}
if d.HasChange("monitoring") {
var mErr error
if d.Get("monitoring").(bool) {
log.Printf("[DEBUG] Enabling monitoring for Instance (%s)", d.Id())
_, mErr = conn.MonitorInstances(&ec2.MonitorInstancesInput{
InstanceIDs: []*string{aws.String(d.Id())},
})
} else {
log.Printf("[DEBUG] Disabling monitoring for Instance (%s)", d.Id())
_, mErr = conn.UnmonitorInstances(&ec2.UnmonitorInstancesInput{
InstanceIDs: []*string{aws.String(d.Id())},
})
}
if mErr != nil {
return fmt.Errorf("[WARN] Error updating Instance monitoring: %s", mErr)
}
}
// TODO(mitchellh): wait for the attributes we modified to
// persist the change...
@ -760,6 +777,10 @@ func fetchRootDeviceName(ami string, conn *ec2.EC2) (*string, error) {
rootDeviceName = image.BlockDeviceMappings[0].DeviceName
}
if rootDeviceName == nil {
return nil, fmt.Errorf("[WARN] Error finding Root Device Name for AMI (%s)", ami)
}
return rootDeviceName, nil
}
@ -772,7 +793,7 @@ func readBlockDeviceMappingsFromConfig(
for _, v := range vL {
bd := v.(map[string]interface{})
ebs := &ec2.EBSBlockDevice{
DeleteOnTermination: aws.Boolean(bd["delete_on_termination"].(bool)),
DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)),
}
if v, ok := bd["snapshot_id"].(string); ok && v != "" {
@ -780,11 +801,11 @@ func readBlockDeviceMappingsFromConfig(
}
if v, ok := bd["encrypted"].(bool); ok && v {
ebs.Encrypted = aws.Boolean(v)
ebs.Encrypted = aws.Bool(v)
}
if v, ok := bd["volume_size"].(int); ok && v != 0 {
ebs.VolumeSize = aws.Long(int64(v))
ebs.VolumeSize = aws.Int64(int64(v))
}
if v, ok := bd["volume_type"].(string); ok && v != "" {
@ -792,7 +813,7 @@ func readBlockDeviceMappingsFromConfig(
}
if v, ok := bd["iops"].(int); ok && v > 0 {
ebs.IOPS = aws.Long(int64(v))
ebs.IOPS = aws.Int64(int64(v))
}
blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{
@ -821,11 +842,11 @@ func readBlockDeviceMappingsFromConfig(
for _, v := range vL {
bd := v.(map[string]interface{})
ebs := &ec2.EBSBlockDevice{
DeleteOnTermination: aws.Boolean(bd["delete_on_termination"].(bool)),
DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)),
}
if v, ok := bd["volume_size"].(int); ok && v != 0 {
ebs.VolumeSize = aws.Long(int64(v))
ebs.VolumeSize = aws.Int64(int64(v))
}
if v, ok := bd["volume_type"].(string); ok && v != "" {
@ -833,7 +854,7 @@ func readBlockDeviceMappingsFromConfig(
}
if v, ok := bd["iops"].(int); ok && v > 0 {
ebs.IOPS = aws.Long(int64(v))
ebs.IOPS = aws.Int64(int64(v))
}
if dn, err := fetchRootDeviceName(d.Get("ami").(string), conn); err == nil {
@ -880,14 +901,14 @@ func buildAwsInstanceOpts(
conn := meta.(*AWSClient).ec2conn
opts := &awsInstanceOpts{
DisableAPITermination: aws.Boolean(d.Get("disable_api_termination").(bool)),
EBSOptimized: aws.Boolean(d.Get("ebs_optimized").(bool)),
DisableAPITermination: aws.Bool(d.Get("disable_api_termination").(bool)),
EBSOptimized: aws.Bool(d.Get("ebs_optimized").(bool)),
ImageID: aws.String(d.Get("ami").(string)),
InstanceType: aws.String(d.Get("instance_type").(string)),
}
opts.Monitoring = &ec2.RunInstancesMonitoringEnabled{
Enabled: aws.Boolean(d.Get("monitoring").(bool)),
Enabled: aws.Bool(d.Get("monitoring").(bool)),
}
opts.IAMInstanceProfile = &ec2.IAMInstanceProfileSpecification{
@ -943,8 +964,8 @@ func buildAwsInstanceOpts(
// to avoid: Network interfaces and an instance-level security groups may not be specified on
// the same request
ni := &ec2.InstanceNetworkInterfaceSpecification{
AssociatePublicIPAddress: aws.Boolean(associatePublicIPAddress),
DeviceIndex: aws.Long(int64(0)),
AssociatePublicIPAddress: aws.Bool(associatePublicIPAddress),
DeviceIndex: aws.Int64(int64(0)),
SubnetID: aws.String(subnetID),
Groups: groups,
}

View File

@ -7,7 +7,6 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
@ -47,7 +46,7 @@ func TestAccAWSInstance_basic(t *testing.T) {
var err error
vol, err = conn.CreateVolume(&ec2.CreateVolumeInput{
AvailabilityZone: aws.String("us-west-2a"),
Size: aws.Long(int64(5)),
Size: aws.Int64(int64(5)),
})
return err
},
@ -467,8 +466,8 @@ func TestAccAWSInstance_keyPairCheck(t *testing.T) {
if v.KeyName == nil {
return fmt.Errorf("No Key Pair found, expected(%s)", keyName)
}
if *v.KeyName != keyName {
return fmt.Errorf("Bad key name, expected (%s), got (%s)", keyName, awsutil.StringValue(v.KeyName))
if v.KeyName != nil && *v.KeyName != keyName {
return fmt.Errorf("Bad key name, expected (%s), got (%s)", keyName, *v.KeyName)
}
return nil

View File

@ -43,7 +43,7 @@ func resourceAwsKinesisStreamCreate(d *schema.ResourceData, meta interface{}) er
conn := meta.(*AWSClient).kinesisconn
sn := d.Get("name").(string)
createOpts := &kinesis.CreateStreamInput{
ShardCount: aws.Long(int64(d.Get("shard_count").(int))),
ShardCount: aws.Int64(int64(d.Get("shard_count").(int))),
StreamName: aws.String(sn),
}
@ -82,7 +82,7 @@ func resourceAwsKinesisStreamRead(d *schema.ResourceData, meta interface{}) erro
conn := meta.(*AWSClient).kinesisconn
describeOpts := &kinesis.DescribeStreamInput{
StreamName: aws.String(d.Get("name").(string)),
Limit: aws.Long(1),
Limit: aws.Int64(1),
}
resp, err := conn.DescribeStream(describeOpts)
if err != nil {
@ -138,7 +138,7 @@ func streamStateRefreshFunc(conn *kinesis.Kinesis, sn string) resource.StateRefr
return func() (interface{}, string, error) {
describeOpts := &kinesis.DescribeStreamInput{
StreamName: aws.String(sn),
Limit: aws.Long(1),
Limit: aws.Int64(1),
}
resp, err := conn.DescribeStream(describeOpts)
if err != nil {

View File

@ -13,7 +13,7 @@ import (
"github.com/hashicorp/terraform/terraform"
)
func TestAccKinesisStream_basic(t *testing.T) {
func TestAccAWSKinesisStream_basic(t *testing.T) {
var stream kinesis.StreamDescription
resource.Test(t, resource.TestCase{
@ -46,7 +46,7 @@ func testAccCheckKinesisStreamExists(n string, stream *kinesis.StreamDescription
conn := testAccProvider.Meta().(*AWSClient).kinesisconn
describeOpts := &kinesis.DescribeStreamInput{
StreamName: aws.String(rs.Primary.Attributes["name"]),
Limit: aws.Long(1),
Limit: aws.Int64(1),
}
resp, err := conn.DescribeStream(describeOpts)
if err != nil {
@ -84,7 +84,7 @@ func testAccCheckKinesisStreamDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).kinesisconn
describeOpts := &kinesis.DescribeStreamInput{
StreamName: aws.String(rs.Primary.Attributes["name"]),
Limit: aws.Long(1),
Limit: aws.Int64(1),
}
resp, err := conn.DescribeStream(describeOpts)
if err == nil {

View File

@ -112,10 +112,10 @@ func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) e
Description: aws.String(d.Get("description").(string)),
FunctionName: aws.String(functionName),
Handler: aws.String(d.Get("handler").(string)),
MemorySize: aws.Long(int64(d.Get("memory_size").(int))),
MemorySize: aws.Int64(int64(d.Get("memory_size").(int))),
Role: aws.String(iamRole),
Runtime: aws.String(d.Get("runtime").(string)),
Timeout: aws.Long(int64(d.Get("timeout").(int))),
Timeout: aws.Int64(int64(d.Get("timeout").(int))),
}
for i := 0; i < 5; i++ {

View File

@ -10,7 +10,7 @@ import (
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSLambdaFunction_normal(t *testing.T) {
func TestAccAWSLambdaFunction_basic(t *testing.T) {
var conf lambda.GetFunctionOutput
resource.Test(t, resource.TestCase{

View File

@ -264,7 +264,7 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
LaunchConfigurationName: aws.String(d.Get("name").(string)),
ImageID: aws.String(d.Get("image_id").(string)),
InstanceType: aws.String(d.Get("instance_type").(string)),
EBSOptimized: aws.Boolean(d.Get("ebs_optimized").(bool)),
EBSOptimized: aws.Bool(d.Get("ebs_optimized").(bool)),
}
if v, ok := d.GetOk("user_data"); ok {
@ -273,7 +273,7 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
}
createLaunchConfigurationOpts.InstanceMonitoring = &autoscaling.InstanceMonitoring{
Enabled: aws.Boolean(d.Get("enable_monitoring").(bool)),
Enabled: aws.Bool(d.Get("enable_monitoring").(bool)),
}
if v, ok := d.GetOk("iam_instance_profile"); ok {
@ -285,7 +285,7 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
}
if v, ok := d.GetOk("associate_public_ip_address"); ok {
createLaunchConfigurationOpts.AssociatePublicIPAddress = aws.Boolean(v.(bool))
createLaunchConfigurationOpts.AssociatePublicIPAddress = aws.Bool(v.(bool))
}
if v, ok := d.GetOk("key_name"); ok {
@ -308,7 +308,7 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
for _, v := range vL {
bd := v.(map[string]interface{})
ebs := &autoscaling.EBS{
DeleteOnTermination: aws.Boolean(bd["delete_on_termination"].(bool)),
DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)),
}
if v, ok := bd["snapshot_id"].(string); ok && v != "" {
@ -316,7 +316,7 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
}
if v, ok := bd["volume_size"].(int); ok && v != 0 {
ebs.VolumeSize = aws.Long(int64(v))
ebs.VolumeSize = aws.Int64(int64(v))
}
if v, ok := bd["volume_type"].(string); ok && v != "" {
@ -324,7 +324,7 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
}
if v, ok := bd["iops"].(int); ok && v > 0 {
ebs.IOPS = aws.Long(int64(v))
ebs.IOPS = aws.Int64(int64(v))
}
blockDevices = append(blockDevices, &autoscaling.BlockDeviceMapping{
@ -353,11 +353,11 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
for _, v := range vL {
bd := v.(map[string]interface{})
ebs := &autoscaling.EBS{
DeleteOnTermination: aws.Boolean(bd["delete_on_termination"].(bool)),
DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)),
}
if v, ok := bd["volume_size"].(int); ok && v != 0 {
ebs.VolumeSize = aws.Long(int64(v))
ebs.VolumeSize = aws.Int64(int64(v))
}
if v, ok := bd["volume_type"].(string); ok && v != "" {
@ -365,7 +365,7 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
}
if v, ok := bd["iops"].(int); ok && v > 0 {
ebs.IOPS = aws.Long(int64(v))
ebs.IOPS = aws.Int64(int64(v))
}
if dn, err := fetchRootDeviceName(d.Get("image_id").(string), ec2conn); err == nil {
@ -480,7 +480,8 @@ func resourceAwsLaunchConfigurationDelete(d *schema.ResourceData, meta interface
})
if err != nil {
autoscalingerr, ok := err.(awserr.Error)
if ok && autoscalingerr.Code() == "InvalidConfiguration.NotFound" {
if ok && (autoscalingerr.Code() == "InvalidConfiguration.NotFound" || autoscalingerr.Code() == "ValidationError") {
log.Printf("[DEBUG] Launch configuration (%s) not found", d.Id())
return nil
}

View File

@ -53,7 +53,7 @@ func resourceAwsLBCookieStickinessPolicyCreate(d *schema.ResourceData, meta inte
// Provision the LBStickinessPolicy
lbspOpts := &elb.CreateLBCookieStickinessPolicyInput{
CookieExpirationPeriod: aws.Long(int64(d.Get("cookie_expiration_period").(int))),
CookieExpirationPeriod: aws.Int64(int64(d.Get("cookie_expiration_period").(int))),
LoadBalancerName: aws.String(d.Get("load_balancer").(string)),
PolicyName: aws.String(d.Get("name").(string)),
}
@ -64,7 +64,7 @@ func resourceAwsLBCookieStickinessPolicyCreate(d *schema.ResourceData, meta inte
setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{
LoadBalancerName: aws.String(d.Get("load_balancer").(string)),
LoadBalancerPort: aws.Long(int64(d.Get("lb_port").(int))),
LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))),
PolicyNames: []*string{aws.String(d.Get("name").(string))},
}
@ -129,7 +129,7 @@ func resourceAwsLBCookieStickinessPolicyDelete(d *schema.ResourceData, meta inte
// policy itself.
setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{
LoadBalancerName: aws.String(d.Get("load_balancer").(string)),
LoadBalancerPort: aws.Long(int64(d.Get("lb_port").(int))),
LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))),
PolicyNames: []*string{},
}

View File

@ -34,6 +34,7 @@ func resourceAwsNetworkInterface() *schema.Resource {
Type: schema.TypeSet,
Optional: true,
ForceNew: true,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
@ -46,6 +47,12 @@ func resourceAwsNetworkInterface() *schema.Resource {
Set: schema.HashString,
},
"source_dest_check": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: true,
},
"attachment": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
@ -127,6 +134,7 @@ func resourceAwsNetworkInterfaceRead(d *schema.ResourceData, meta interface{}) e
d.Set("subnet_id", eni.SubnetID)
d.Set("private_ips", flattenNetworkInterfacesPrivateIPAddesses(eni.PrivateIPAddresses))
d.Set("security_groups", flattenGroupIdentifiers(eni.Groups))
d.Set("source_dest_check", eni.SourceDestCheck)
// Tags
d.Set("tags", tagsToMap(eni.TagSet))
@ -167,7 +175,7 @@ func resourceAwsNetworkInterfaceDetach(oa *schema.Set, meta interface{}, eniId s
old_attachment := oa.List()[0].(map[string]interface{})
detach_request := &ec2.DetachNetworkInterfaceInput{
AttachmentID: aws.String(old_attachment["attachment_id"].(string)),
Force: aws.Boolean(true),
Force: aws.Bool(true),
}
conn := meta.(*AWSClient).ec2conn
_, detach_err := conn.DetachNetworkInterface(detach_request)
@ -208,7 +216,7 @@ func resourceAwsNetworkInterfaceUpdate(d *schema.ResourceData, meta interface{})
new_attachment := na.(*schema.Set).List()[0].(map[string]interface{})
di := new_attachment["device_index"].(int)
attach_request := &ec2.AttachNetworkInterfaceInput{
DeviceIndex: aws.Long(int64(di)),
DeviceIndex: aws.Int64(int64(di)),
InstanceID: aws.String(new_attachment["instance"].(string)),
NetworkInterfaceID: aws.String(d.Id()),
}
@ -221,6 +229,18 @@ func resourceAwsNetworkInterfaceUpdate(d *schema.ResourceData, meta interface{})
d.SetPartial("attachment")
}
request := &ec2.ModifyNetworkInterfaceAttributeInput{
NetworkInterfaceID: aws.String(d.Id()),
SourceDestCheck: &ec2.AttributeBooleanValue{Value: aws.Bool(d.Get("source_dest_check").(bool))},
}
_, err := conn.ModifyNetworkInterfaceAttribute(request)
if err != nil {
return fmt.Errorf("Failure updating ENI: %s", err)
}
d.SetPartial("source_dest_check")
if d.HasChange("security_groups") {
request := &ec2.ModifyNetworkInterfaceAttributeInput{
NetworkInterfaceID: aws.String(d.Id()),

View File

@ -57,6 +57,46 @@ func TestAccAWSENI_attached(t *testing.T) {
})
}
func TestAccAWSENI_sourceDestCheck(t *testing.T) {
var conf ec2.NetworkInterface
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSENIDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSENIConfigWithSourceDestCheck,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSENIExists("aws_network_interface.bar", &conf),
resource.TestCheckResourceAttr(
"aws_network_interface.bar", "source_dest_check", "false"),
),
},
},
})
}
func TestAccAWSENI_computedIPs(t *testing.T) {
var conf ec2.NetworkInterface
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSENIDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSENIConfigWithNoPrivateIPs,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSENIExists("aws_network_interface.bar", &conf),
resource.TestCheckResourceAttr(
"aws_network_interface.bar", "private_ips.#", "1"),
),
},
},
})
}
func testAccCheckAWSENIExists(n string, res *ec2.NetworkInterface) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
@ -108,6 +148,10 @@ func testAccCheckAWSENIAttributes(conf *ec2.NetworkInterface) resource.TestCheck
return fmt.Errorf("expected private ip to be 172.16.10.100, but was %s", *conf.PrivateIPAddress)
}
if *conf.SourceDestCheck != true {
return fmt.Errorf("expected source_dest_check to be true, but was %t", *conf.SourceDestCheck)
}
if len(conf.TagSet) == 0 {
return fmt.Errorf("expected tags")
}
@ -201,6 +245,41 @@ resource "aws_network_interface" "bar" {
}
`
const testAccAWSENIConfigWithSourceDestCheck = `
resource "aws_vpc" "foo" {
cidr_block = "172.16.0.0/16"
}
resource "aws_subnet" "foo" {
vpc_id = "${aws_vpc.foo.id}"
cidr_block = "172.16.10.0/24"
availability_zone = "us-west-2a"
}
resource "aws_network_interface" "bar" {
subnet_id = "${aws_subnet.foo.id}"
source_dest_check = false
private_ips = ["172.16.10.100"]
}
`
const testAccAWSENIConfigWithNoPrivateIPs = `
resource "aws_vpc" "foo" {
cidr_block = "172.16.0.0/16"
}
resource "aws_subnet" "foo" {
vpc_id = "${aws_vpc.foo.id}"
cidr_block = "172.16.10.0/24"
availability_zone = "us-west-2a"
}
resource "aws_network_interface" "bar" {
subnet_id = "${aws_subnet.foo.id}"
source_dest_check = false
}
`
const testAccAWSENIConfigWithAttachment = `
resource "aws_vpc" "foo" {
cidr_block = "172.16.0.0/16"

View File

@ -13,7 +13,7 @@ import (
"github.com/aws/aws-sdk-go/service/route53"
)
func TestAccRoute53DelegationSet_basic(t *testing.T) {
func TestAccAWSRoute53DelegationSet_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@ -29,7 +29,7 @@ func TestAccRoute53DelegationSet_basic(t *testing.T) {
})
}
func TestAccRoute53DelegationSet_withZones(t *testing.T) {
func TestAccAWSRoute53DelegationSet_withZones(t *testing.T) {
var zone route53.GetHostedZoneOutput
resource.Test(t, resource.TestCase{

View File

@ -68,7 +68,7 @@ func resourceAwsRoute53HealthCheckUpdate(d *schema.ResourceData, meta interface{
}
if d.HasChange("failure_threshold") {
updateHealthCheck.FailureThreshold = aws.Long(int64(d.Get("failure_threshold").(int)))
updateHealthCheck.FailureThreshold = aws.Int64(int64(d.Get("failure_threshold").(int)))
}
if d.HasChange("fqdn") {
@ -76,7 +76,7 @@ func resourceAwsRoute53HealthCheckUpdate(d *schema.ResourceData, meta interface{
}
if d.HasChange("port") {
updateHealthCheck.Port = aws.Long(int64(d.Get("port").(int)))
updateHealthCheck.Port = aws.Int64(int64(d.Get("port").(int)))
}
if d.HasChange("resource_path") {
@ -104,8 +104,8 @@ func resourceAwsRoute53HealthCheckCreate(d *schema.ResourceData, meta interface{
healthConfig := &route53.HealthCheckConfig{
Type: aws.String(d.Get("type").(string)),
FailureThreshold: aws.Long(int64(d.Get("failure_threshold").(int))),
RequestInterval: aws.Long(int64(d.Get("request_interval").(int))),
FailureThreshold: aws.Int64(int64(d.Get("failure_threshold").(int))),
RequestInterval: aws.Int64(int64(d.Get("request_interval").(int))),
}
if v, ok := d.GetOk("fqdn"); ok {
@ -121,7 +121,7 @@ func resourceAwsRoute53HealthCheckCreate(d *schema.ResourceData, meta interface{
}
if v, ok := d.GetOk("port"); ok {
healthConfig.Port = aws.Long(int64(v.(int)))
healthConfig.Port = aws.Int64(int64(v.(int)))
}
if v, ok := d.GetOk("resource_path"); ok {

View File

@ -10,7 +10,7 @@ import (
"github.com/aws/aws-sdk-go/service/route53"
)
func TestAccRoute53HealthCheck_basic(t *testing.T) {
func TestAccAWSRoute53HealthCheck_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@ -34,7 +34,7 @@ func TestAccRoute53HealthCheck_basic(t *testing.T) {
})
}
func TestAccRoute53HealthCheck_IpConfig(t *testing.T) {
func TestAccAWSRoute53HealthCheck_IpConfig(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,

View File

@ -367,7 +367,7 @@ func resourceAwsRoute53RecordBuildSet(d *schema.ResourceData, zoneName string) (
}
if v, ok := d.GetOk("ttl"); ok {
rec.TTL = aws.Long(int64(v.(int)))
rec.TTL = aws.Int64(int64(v.(int)))
}
// Resource records
@ -385,7 +385,7 @@ func resourceAwsRoute53RecordBuildSet(d *schema.ResourceData, zoneName string) (
alias := aliases[0].(map[string]interface{})
rec.AliasTarget = &route53.AliasTarget{
DNSName: aws.String(alias["name"].(string)),
EvaluateTargetHealth: aws.Boolean(alias["evaluate_target_health"].(bool)),
EvaluateTargetHealth: aws.Bool(alias["evaluate_target_health"].(bool)),
HostedZoneID: aws.String(alias["zone_id"].(string)),
}
log.Printf("[DEBUG] Creating alias: %#v", alias)
@ -408,7 +408,7 @@ func resourceAwsRoute53RecordBuildSet(d *schema.ResourceData, zoneName string) (
}
if v, ok := d.GetOk("weight"); ok {
rec.Weight = aws.Long(int64(v.(int)))
rec.Weight = aws.Int64(int64(v.(int)))
}
if v, ok := d.GetOk("set_identifier"); ok {

View File

@ -50,7 +50,7 @@ func TestExpandRecordName(t *testing.T) {
}
}
func TestAccRoute53Record_basic(t *testing.T) {
func TestAccAWSRoute53Record_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@ -66,7 +66,7 @@ func TestAccRoute53Record_basic(t *testing.T) {
})
}
func TestAccRoute53Record_txtSupport(t *testing.T) {
func TestAccAWSRoute53Record_txtSupport(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@ -82,7 +82,7 @@ func TestAccRoute53Record_txtSupport(t *testing.T) {
})
}
func TestAccRoute53Record_generatesSuffix(t *testing.T) {
func TestAccAWSRoute53Record_generatesSuffix(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@ -98,7 +98,7 @@ func TestAccRoute53Record_generatesSuffix(t *testing.T) {
})
}
func TestAccRoute53Record_wildcard(t *testing.T) {
func TestAccAWSRoute53Record_wildcard(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@ -122,7 +122,7 @@ func TestAccRoute53Record_wildcard(t *testing.T) {
})
}
func TestAccRoute53Record_weighted(t *testing.T) {
func TestAccAWSRoute53Record_weighted(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@ -139,7 +139,7 @@ func TestAccRoute53Record_weighted(t *testing.T) {
})
}
func TestAccRoute53Record_alias(t *testing.T) {
func TestAccAWSRoute53Record_alias(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@ -155,7 +155,23 @@ func TestAccRoute53Record_alias(t *testing.T) {
})
}
func TestAccRoute53Record_weighted_alias(t *testing.T) {
func TestAccAWSRoute53Record_s3_alias(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckRoute53RecordDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccRoute53S3AliasRecord,
Check: resource.ComposeTestCheckFunc(
testAccCheckRoute53RecordExists("aws_route53_record.alias"),
),
},
},
})
}
func TestAccAWSRoute53Record_weighted_alias(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@ -182,7 +198,7 @@ func TestAccRoute53Record_weighted_alias(t *testing.T) {
})
}
func TestAccRoute53Record_TypeChange(t *testing.T) {
func TestAccAWSRoute53Record_TypeChange(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@ -449,6 +465,32 @@ resource "aws_route53_record" "alias" {
}
`
const testAccRoute53S3AliasRecord = `
resource "aws_route53_zone" "main" {
name = "notexample.com"
}
resource "aws_s3_bucket" "website" {
bucket = "website.notexample.com"
acl = "public-read"
website {
index_document = "index.html"
}
}
resource "aws_route53_record" "alias" {
zone_id = "${aws_route53_zone.main.zone_id}"
name = "www"
type = "A"
alias {
zone_id = "${aws_s3_bucket.website.hosted_zone_id}"
name = "${aws_s3_bucket.website.website_domain}"
evaluate_target_health = true
}
}
`
const testAccRoute53WeightedElbAliasRecord = `
resource "aws_route53_zone" "main" {
name = "notexample.com"

View File

@ -12,7 +12,7 @@ import (
"github.com/aws/aws-sdk-go/service/route53"
)
func TestAccRoute53ZoneAssociation_basic(t *testing.T) {
func TestAccAWSRoute53ZoneAssociation_basic(t *testing.T) {
var zone route53.HostedZone
resource.Test(t, resource.TestCase{
@ -30,7 +30,7 @@ func TestAccRoute53ZoneAssociation_basic(t *testing.T) {
})
}
func TestAccRoute53ZoneAssociation_region(t *testing.T) {
func TestAccAWSRoute53ZoneAssociation_region(t *testing.T) {
var zone route53.HostedZone
// record the initialized providers so that we can use them to

View File

@ -64,7 +64,7 @@ func TestCleanChangeID(t *testing.T) {
}
}
func TestAccRoute53Zone_basic(t *testing.T) {
func TestAccAWSRoute53Zone_basic(t *testing.T) {
var zone route53.GetHostedZoneOutput
var td route53.ResourceTagSet
@ -85,7 +85,7 @@ func TestAccRoute53Zone_basic(t *testing.T) {
})
}
func TestAccRoute53Zone_private_basic(t *testing.T) {
func TestAccAWSRoute53Zone_private_basic(t *testing.T) {
var zone route53.GetHostedZoneOutput
resource.Test(t, resource.TestCase{
@ -104,7 +104,7 @@ func TestAccRoute53Zone_private_basic(t *testing.T) {
})
}
func TestAccRoute53Zone_private_region(t *testing.T) {
func TestAccAWSRoute53Zone_private_region(t *testing.T) {
var zone route53.GetHostedZoneOutput
// record the initialized providers so that we can use them to

View File

@ -77,12 +77,16 @@ func resourceAwsS3Bucket() *schema.Resource {
Optional: true,
Computed: true,
},
"website_endpoint": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"website_domain": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"tags": tagsSchema(),
@ -237,13 +241,18 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error {
}
// Add website_endpoint as an attribute
endpoint, err := websiteEndpoint(s3conn, d)
websiteEndpoint, err := websiteEndpoint(s3conn, d)
if err != nil {
return err
}
if err := d.Set("website_endpoint", endpoint); err != nil {
if websiteEndpoint != nil {
if err := d.Set("website_endpoint", websiteEndpoint.Endpoint); err != nil {
return err
}
if err := d.Set("website_domain", websiteEndpoint.Domain); err != nil {
return err
}
}
tagSet, err := getTagSetS3(s3conn, d.Id())
if err != nil {
@ -405,11 +414,11 @@ func resourceAwsS3BucketWebsiteDelete(s3conn *s3.S3, d *schema.ResourceData) err
return nil
}
func websiteEndpoint(s3conn *s3.S3, d *schema.ResourceData) (string, error) {
func websiteEndpoint(s3conn *s3.S3, d *schema.ResourceData) (*S3Website, error) {
// If the bucket doesn't have a website configuration, return an empty
// endpoint
if _, ok := d.GetOk("website"); !ok {
return "", nil
return nil, nil
}
bucket := d.Get("bucket").(string)
@ -421,26 +430,31 @@ func websiteEndpoint(s3conn *s3.S3, d *schema.ResourceData) (string, error) {
},
)
if err != nil {
return "", err
return nil, err
}
var region string
if location.LocationConstraint != nil {
region = *location.LocationConstraint
}
return WebsiteEndpointUrl(bucket, region), nil
return WebsiteEndpoint(bucket, region), nil
}
func WebsiteEndpointUrl(bucket string, region string) string {
func WebsiteEndpoint(bucket string, region string) *S3Website {
domain := WebsiteDomainUrl(region)
return &S3Website{Endpoint: fmt.Sprintf("%s.%s", bucket, domain), Domain: domain}
}
func WebsiteDomainUrl(region string) string {
region = normalizeRegion(region)
// Frankfurt(and probably future) regions uses different syntax for website endpoints
// http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html
if region == "eu-central-1" {
return fmt.Sprintf("%s.s3-website.%s.amazonaws.com", bucket, region)
return fmt.Sprintf("s3-website.%s.amazonaws.com", region)
}
return fmt.Sprintf("%s.s3-website-%s.amazonaws.com", bucket, region)
return fmt.Sprintf("s3-website-%s.amazonaws.com", region)
}
func normalizeJson(jsonString interface{}) string {
@ -465,3 +479,7 @@ func normalizeRegion(region string) string {
return region
}
type S3Website struct {
Endpoint, Domain string
}

View File

@ -223,8 +223,8 @@ func resourceAwsSecurityGroupCreate(d *schema.ResourceData, meta interface{}) er
GroupID: createResp.GroupID,
IPPermissions: []*ec2.IPPermission{
&ec2.IPPermission{
FromPort: aws.Long(int64(0)),
ToPort: aws.Long(int64(0)),
FromPort: aws.Int64(int64(0)),
ToPort: aws.Int64(int64(0)),
IPRanges: []*ec2.IPRange{
&ec2.IPRange{
CIDRIP: aws.String("0.0.0.0/0"),

View File

@ -9,7 +9,6 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
@ -98,7 +97,7 @@ func resourceAwsSecurityGroupRuleCreate(d *schema.ResourceData, meta interface{}
switch ruleType {
case "ingress":
log.Printf("[DEBUG] Authorizing security group %s %s rule: %s",
sg_id, "Ingress", awsutil.StringValue(perm))
sg_id, "Ingress", perm)
req := &ec2.AuthorizeSecurityGroupIngressInput{
GroupID: sg.GroupID,
@ -213,7 +212,7 @@ func resourceAwsSecurityGroupRuleDelete(d *schema.ResourceData, meta interface{}
switch ruleType {
case "ingress":
log.Printf("[DEBUG] Revoking rule (%s) from security group %s:\n%s",
"ingress", sg_id, awsutil.StringValue(perm))
"ingress", sg_id, perm)
req := &ec2.RevokeSecurityGroupIngressInput{
GroupID: sg.GroupID,
IPPermissions: []*ec2.IPPermission{perm},
@ -330,8 +329,8 @@ func ipPermissionIDHash(ruleType string, ip *ec2.IPPermission) string {
func expandIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup) *ec2.IPPermission {
var perm ec2.IPPermission
perm.FromPort = aws.Long(int64(d.Get("from_port").(int)))
perm.ToPort = aws.Long(int64(d.Get("to_port").(int)))
perm.FromPort = aws.Int64(int64(d.Get("from_port").(int)))
perm.ToPort = aws.Int64(int64(d.Get("to_port").(int)))
perm.IPProtocol = aws.String(d.Get("protocol").(string))
// build a group map that behaves like a set

View File

@ -56,8 +56,8 @@ func migrateExpandIPPerm(attrs map[string]string) (*ec2.IPPermission, error) {
return nil, fmt.Errorf("Error converting from_port in Security Group migration")
}
perm.ToPort = aws.Long(int64(tp))
perm.FromPort = aws.Long(int64(fp))
perm.ToPort = aws.Int64(int64(tp))
perm.FromPort = aws.Int64(int64(fp))
perm.IPProtocol = aws.String(attrs["protocol"])
groups := make(map[string]bool)

View File

@ -7,7 +7,6 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@ -16,8 +15,8 @@ import (
func TestIpPermissionIDHash(t *testing.T) {
simple := &ec2.IPPermission{
IPProtocol: aws.String("tcp"),
FromPort: aws.Long(int64(80)),
ToPort: aws.Long(int64(8000)),
FromPort: aws.Int64(int64(80)),
ToPort: aws.Int64(int64(8000)),
IPRanges: []*ec2.IPRange{
&ec2.IPRange{
CIDRIP: aws.String("10.0.0.0/8"),
@ -27,8 +26,8 @@ func TestIpPermissionIDHash(t *testing.T) {
egress := &ec2.IPPermission{
IPProtocol: aws.String("tcp"),
FromPort: aws.Long(int64(80)),
ToPort: aws.Long(int64(8000)),
FromPort: aws.Int64(int64(80)),
ToPort: aws.Int64(int64(8000)),
IPRanges: []*ec2.IPRange{
&ec2.IPRange{
CIDRIP: aws.String("10.0.0.0/8"),
@ -47,8 +46,8 @@ func TestIpPermissionIDHash(t *testing.T) {
vpc_security_group_source := &ec2.IPPermission{
IPProtocol: aws.String("tcp"),
FromPort: aws.Long(int64(80)),
ToPort: aws.Long(int64(8000)),
FromPort: aws.Int64(int64(80)),
ToPort: aws.Int64(int64(8000)),
UserIDGroupPairs: []*ec2.UserIDGroupPair{
&ec2.UserIDGroupPair{
UserID: aws.String("987654321"),
@ -67,8 +66,8 @@ func TestIpPermissionIDHash(t *testing.T) {
security_group_source := &ec2.IPPermission{
IPProtocol: aws.String("tcp"),
FromPort: aws.Long(int64(80)),
ToPort: aws.Long(int64(8000)),
FromPort: aws.Int64(int64(80)),
ToPort: aws.Int64(int64(8000)),
UserIDGroupPairs: []*ec2.UserIDGroupPair{
&ec2.UserIDGroupPair{
UserID: aws.String("987654321"),
@ -101,7 +100,7 @@ func TestIpPermissionIDHash(t *testing.T) {
for _, tc := range cases {
actual := ipPermissionIDHash(tc.Type, tc.Input)
if actual != tc.Output {
t.Errorf("input: %s - %s\noutput: %s", tc.Type, awsutil.StringValue(tc.Input), actual)
t.Errorf("input: %s - %s\noutput: %s", tc.Type, tc.Input, actual)
}
}
}
@ -323,8 +322,8 @@ func testAccCheckAWSSecurityGroupRuleExists(n string, group *ec2.SecurityGroup)
func testAccCheckAWSSecurityGroupRuleAttributes(group *ec2.SecurityGroup, ruleType string) resource.TestCheckFunc {
return func(s *terraform.State) error {
p := &ec2.IPPermission{
FromPort: aws.Long(80),
ToPort: aws.Long(8000),
FromPort: aws.Int64(80),
ToPort: aws.Int64(8000),
IPProtocol: aws.String("tcp"),
IPRanges: []*ec2.IPRange{&ec2.IPRange{CIDRIP: aws.String("10.0.0.0/8")}},
}

View File

@ -356,8 +356,8 @@ func testAccCheckAWSSecurityGroupExists(n string, group *ec2.SecurityGroup) reso
func testAccCheckAWSSecurityGroupAttributes(group *ec2.SecurityGroup) resource.TestCheckFunc {
return func(s *terraform.State) error {
p := &ec2.IPPermission{
FromPort: aws.Long(80),
ToPort: aws.Long(8000),
FromPort: aws.Int64(80),
ToPort: aws.Int64(8000),
IPProtocol: aws.String("tcp"),
IPRanges: []*ec2.IPRange{&ec2.IPRange{CIDRIP: aws.String("10.0.0.0/8")}},
}
@ -449,14 +449,14 @@ func testAccCheckAWSSecurityGroupAttributesChanged(group *ec2.SecurityGroup) res
return func(s *terraform.State) error {
p := []*ec2.IPPermission{
&ec2.IPPermission{
FromPort: aws.Long(80),
ToPort: aws.Long(9000),
FromPort: aws.Int64(80),
ToPort: aws.Int64(9000),
IPProtocol: aws.String("tcp"),
IPRanges: []*ec2.IPRange{&ec2.IPRange{CIDRIP: aws.String("10.0.0.0/8")}},
},
&ec2.IPPermission{
FromPort: aws.Long(80),
ToPort: aws.Long(8000),
FromPort: aws.Int64(80),
ToPort: aws.Int64(8000),
IPProtocol: aws.String("tcp"),
IPRanges: []*ec2.IPRange{
&ec2.IPRange{

View File

@ -7,7 +7,6 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
@ -78,7 +77,7 @@ func resourceAwsSpotInstanceRequestCreate(d *schema.ResourceData, meta interface
// Though the AWS API supports creating spot instance requests for multiple
// instances, for TF purposes we fix this to one instance per request.
// Users can get equivalent behavior out of TF's "count" meta-parameter.
InstanceCount: aws.Long(1),
InstanceCount: aws.Int64(1),
LaunchSpecification: &ec2.RequestSpotLaunchSpecification{
BlockDeviceMappings: instanceOpts.BlockDeviceMappings,
@ -95,14 +94,14 @@ func resourceAwsSpotInstanceRequestCreate(d *schema.ResourceData, meta interface
}
// Make the spot instance request
log.Printf("[DEBUG] Requesting spot bid opts: %s", awsutil.StringValue(spotOpts))
log.Printf("[DEBUG] Requesting spot bid opts: %s", spotOpts)
resp, err := conn.RequestSpotInstances(spotOpts)
if err != nil {
return fmt.Errorf("Error requesting spot instances: %s", err)
}
if len(resp.SpotInstanceRequests) != 1 {
return fmt.Errorf(
"Expected response with length 1, got: %s", awsutil.StringValue(resp))
"Expected response with length 1, got: %s", resp)
}
sir := *resp.SpotInstanceRequests[0]
@ -123,7 +122,7 @@ func resourceAwsSpotInstanceRequestCreate(d *schema.ResourceData, meta interface
_, err = spotStateConf.WaitForState()
if err != nil {
return fmt.Errorf("Error while waiting for spot request (%s) to resolve: %s", awsutil.StringValue(sir), err)
return fmt.Errorf("Error while waiting for spot request (%s) to resolve: %s", sir, err)
}
}
@ -160,7 +159,7 @@ func resourceAwsSpotInstanceRequestRead(d *schema.ResourceData, meta interface{}
request := resp.SpotInstanceRequests[0]
// if the request is cancelled, then it is gone
if *request.State == "canceled" {
if *request.State == "cancelled" {
d.SetId("")
return nil
}

View File

@ -137,7 +137,7 @@ func resourceAwsSubnetUpdate(d *schema.ResourceData, meta interface{}) error {
modifyOpts := &ec2.ModifySubnetAttributeInput{
SubnetID: aws.String(d.Id()),
MapPublicIPOnLaunch: &ec2.AttributeBooleanValue{
Value: aws.Boolean(d.Get("map_public_ip_on_launch").(bool)),
Value: aws.Bool(d.Get("map_public_ip_on_launch").(bool)),
},
}

View File

@ -157,7 +157,7 @@ func resourceAwsVolumeAttachmentDelete(d *schema.ResourceData, meta interface{})
Device: aws.String(d.Get("device_name").(string)),
InstanceID: aws.String(iID),
VolumeID: aws.String(vID),
Force: aws.Boolean(d.Get("force_detach").(bool)),
Force: aws.Bool(d.Get("force_detach").(bool)),
}
_, err := conn.DetachVolume(opts)

View File

@ -11,7 +11,7 @@ import (
"github.com/hashicorp/terraform/terraform"
)
func TestAccDHCPOptions_basic(t *testing.T) {
func TestAccAWSDHCPOptions_basic(t *testing.T) {
var d ec2.DHCPOptions
resource.Test(t, resource.TestCase{

View File

@ -11,7 +11,7 @@ import (
"github.com/hashicorp/terraform/terraform"
)
func TestAccVpcEndpoint_basic(t *testing.T) {
func TestAccAWSVpcEndpoint_basic(t *testing.T) {
var endpoint ec2.VPCEndpoint
resource.Test(t, resource.TestCase{
@ -29,7 +29,7 @@ func TestAccVpcEndpoint_basic(t *testing.T) {
})
}
func TestAccVpcEndpoint_withRouteTableAndPolicy(t *testing.T) {
func TestAccAWSVpcEndpoint_withRouteTableAndPolicy(t *testing.T) {
var endpoint ec2.VPCEndpoint
var routeTable ec2.RouteTable

View File

@ -11,7 +11,7 @@ import (
"github.com/hashicorp/terraform/terraform"
)
func TestAccVpc_basic(t *testing.T) {
func TestAccAWSVpc_basic(t *testing.T) {
var vpc ec2.VPC
resource.Test(t, resource.TestCase{
@ -32,7 +32,7 @@ func TestAccVpc_basic(t *testing.T) {
})
}
func TestAccVpc_dedicatedTenancy(t *testing.T) {
func TestAccAWSVpc_dedicatedTenancy(t *testing.T) {
var vpc ec2.VPC
resource.Test(t, resource.TestCase{
@ -52,7 +52,7 @@ func TestAccVpc_dedicatedTenancy(t *testing.T) {
})
}
func TestAccVpc_tags(t *testing.T) {
func TestAccAWSVpc_tags(t *testing.T) {
var vpc ec2.VPC
resource.Test(t, resource.TestCase{
@ -83,7 +83,7 @@ func TestAccVpc_tags(t *testing.T) {
})
}
func TestAccVpcUpdate(t *testing.T) {
func TestAccAWSVpc_update(t *testing.T) {
var vpc ec2.VPC
resource.Test(t, resource.TestCase{
@ -187,7 +187,7 @@ func testAccCheckVpcExists(n string, vpc *ec2.VPC) resource.TestCheckFunc {
}
// https://github.com/hashicorp/terraform/issues/1301
func TestAccVpc_bothDnsOptionsSet(t *testing.T) {
func TestAccAWSVpc_bothDnsOptionsSet(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,

View File

@ -143,7 +143,7 @@ func resourceAwsVpnConnectionCreate(d *schema.ResourceData, meta interface{}) er
conn := meta.(*AWSClient).ec2conn
connectOpts := &ec2.VPNConnectionOptionsSpecification{
StaticRoutesOnly: aws.Boolean(d.Get("static_routes_only").(bool)),
StaticRoutesOnly: aws.Bool(d.Get("static_routes_only").(bool)),
}
createOpts := &ec2.CreateVPNConnectionInput{

View File

@ -87,7 +87,7 @@ func TestAccAWSVpnGateway_delete(t *testing.T) {
})
}
func TestAccVpnGateway_tags(t *testing.T) {
func TestAccAWSVpnGateway_tags(t *testing.T) {
var v ec2.VPNGateway
resource.Test(t, resource.TestCase{

View File

@ -94,7 +94,7 @@ func expandEcsLoadBalancers(configured []interface{}) []*ecs.LoadBalancer {
l := &ecs.LoadBalancer{
ContainerName: aws.String(data["container_name"].(string)),
ContainerPort: aws.Long(int64(data["container_port"].(int))),
ContainerPort: aws.Int64(int64(data["container_port"].(int))),
LoadBalancerName: aws.String(data["elb_name"].(string)),
}
@ -117,8 +117,8 @@ func expandIPPerms(
var perm ec2.IPPermission
m := mRaw.(map[string]interface{})
perm.FromPort = aws.Long(int64(m["from_port"].(int)))
perm.ToPort = aws.Long(int64(m["to_port"].(int)))
perm.FromPort = aws.Int64(int64(m["from_port"].(int)))
perm.ToPort = aws.Int64(int64(m["to_port"].(int)))
perm.IPProtocol = aws.String(m["protocol"].(string))
// When protocol is "-1", AWS won't store any ports for the
@ -405,7 +405,7 @@ func expandPrivateIPAddesses(ips []interface{}) []*ec2.PrivateIPAddressSpecifica
PrivateIPAddress: aws.String(v.(string)),
}
new_private_ip.Primary = aws.Boolean(i == 0)
new_private_ip.Primary = aws.Bool(i == 0)
dtos = append(dtos, new_private_ip)
}

View File

@ -70,8 +70,8 @@ func TestexpandIPPerms(t *testing.T) {
expected := []ec2.IPPermission{
ec2.IPPermission{
IPProtocol: aws.String("icmp"),
FromPort: aws.Long(int64(1)),
ToPort: aws.Long(int64(-1)),
FromPort: aws.Int64(int64(1)),
ToPort: aws.Int64(int64(-1)),
IPRanges: []*ec2.IPRange{&ec2.IPRange{CIDRIP: aws.String("0.0.0.0/0")}},
UserIDGroupPairs: []*ec2.UserIDGroupPair{
&ec2.UserIDGroupPair{
@ -85,8 +85,8 @@ func TestexpandIPPerms(t *testing.T) {
},
ec2.IPPermission{
IPProtocol: aws.String("icmp"),
FromPort: aws.Long(int64(1)),
ToPort: aws.Long(int64(-1)),
FromPort: aws.Int64(int64(1)),
ToPort: aws.Int64(int64(-1)),
UserIDGroupPairs: []*ec2.UserIDGroupPair{
&ec2.UserIDGroupPair{
UserID: aws.String("foo"),
@ -149,8 +149,8 @@ func TestExpandIPPerms_NegOneProtocol(t *testing.T) {
expected := []ec2.IPPermission{
ec2.IPPermission{
IPProtocol: aws.String("-1"),
FromPort: aws.Long(int64(0)),
ToPort: aws.Long(int64(0)),
FromPort: aws.Int64(int64(0)),
ToPort: aws.Int64(int64(0)),
IPRanges: []*ec2.IPRange{&ec2.IPRange{CIDRIP: aws.String("0.0.0.0/0")}},
UserIDGroupPairs: []*ec2.UserIDGroupPair{
&ec2.UserIDGroupPair{
@ -245,8 +245,8 @@ func TestExpandIPPerms_nonVPC(t *testing.T) {
expected := []ec2.IPPermission{
ec2.IPPermission{
IPProtocol: aws.String("icmp"),
FromPort: aws.Long(int64(1)),
ToPort: aws.Long(int64(-1)),
FromPort: aws.Int64(int64(1)),
ToPort: aws.Int64(int64(-1)),
IPRanges: []*ec2.IPRange{&ec2.IPRange{CIDRIP: aws.String("0.0.0.0/0")}},
UserIDGroupPairs: []*ec2.UserIDGroupPair{
&ec2.UserIDGroupPair{
@ -259,8 +259,8 @@ func TestExpandIPPerms_nonVPC(t *testing.T) {
},
ec2.IPPermission{
IPProtocol: aws.String("icmp"),
FromPort: aws.Long(int64(1)),
ToPort: aws.Long(int64(-1)),
FromPort: aws.Int64(int64(1)),
ToPort: aws.Int64(int64(-1)),
UserIDGroupPairs: []*ec2.UserIDGroupPair{
&ec2.UserIDGroupPair{
GroupName: aws.String("foo"),
@ -302,8 +302,8 @@ func TestexpandListeners(t *testing.T) {
}
expected := &elb.Listener{
InstancePort: aws.Long(int64(8000)),
LoadBalancerPort: aws.Long(int64(80)),
InstancePort: aws.Int64(int64(8000)),
LoadBalancerPort: aws.Int64(int64(80)),
InstanceProtocol: aws.String("http"),
Protocol: aws.String("http"),
}
@ -324,11 +324,11 @@ func TestflattenHealthCheck(t *testing.T) {
}{
{
Input: &elb.HealthCheck{
UnhealthyThreshold: aws.Long(int64(10)),
HealthyThreshold: aws.Long(int64(10)),
UnhealthyThreshold: aws.Int64(int64(10)),
HealthyThreshold: aws.Int64(int64(10)),
Target: aws.String("HTTP:80/"),
Timeout: aws.Long(int64(30)),
Interval: aws.Long(int64(30)),
Timeout: aws.Int64(int64(30)),
Interval: aws.Int64(int64(30)),
},
Output: []map[string]interface{}{
map[string]interface{}{
@ -570,7 +570,7 @@ func TestexpandPrivateIPAddesses(t *testing.T) {
func TestflattenAttachment(t *testing.T) {
expanded := &ec2.NetworkInterfaceAttachment{
InstanceID: aws.String("i-00001"),
DeviceIndex: aws.Long(int64(1)),
DeviceIndex: aws.Int64(int64(1)),
AttachmentID: aws.String("at-002"),
}

View File

@ -4,7 +4,6 @@ import (
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema"
)
@ -39,7 +38,7 @@ func setTags(conn *ec2.EC2, d *schema.ResourceData) error {
}
}
if len(create) > 0 {
log.Printf("[DEBUG] Creating tags: %s for %s", awsutil.StringValue(create), d.Id())
log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id())
_, err := conn.CreateTags(&ec2.CreateTagsInput{
Resources: []*string{aws.String(d.Id())},
Tags: create,

View File

@ -20,9 +20,9 @@ var websiteEndpoints = []struct {
func TestWebsiteEndpointUrl(t *testing.T) {
for _, tt := range websiteEndpoints {
s := WebsiteEndpointUrl("bucket-name", tt.in)
if s != tt.out {
t.Errorf("WebsiteEndpointUrl(\"bucket-name\", %q) => %q, want %q", tt.in, s, tt.out)
s := WebsiteEndpoint("bucket-name", tt.in)
if s.Endpoint != tt.out {
t.Errorf("WebsiteEndpointUrl(\"bucket-name\", %q) => %q, want %q", tt.in, s.Endpoint, tt.out)
}
}
}

View File

@ -0,0 +1,5 @@
package azure
import "errors"
var PlatformStorageError = errors.New("When using a platform image, the 'storage' parameter is required")

View File

@ -591,6 +591,10 @@ func retrieveImageDetails(
return configureForImage, osType, nil
}
if err == PlatformStorageError {
return nil, "", err
}
return nil, "", fmt.Errorf("Could not find image with label '%s'. Available images are: %s",
label, strings.Join(append(VMLabels, OSLabels...), ", "))
}
@ -646,8 +650,7 @@ func retrieveOSImageDetails(
}
if img.MediaLink == "" {
if storage == "" {
return nil, "", nil,
fmt.Errorf("When using a platform image, the 'storage' parameter is required")
return nil, "", nil, PlatformStorageError
}
img.MediaLink = fmt.Sprintf(osDiskBlobStorageURL, storage, name)
}

View File

@ -19,6 +19,7 @@ func resourceDNSimpleRecord() *schema.Resource {
"domain": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"domain_id": &schema.Schema{
@ -39,6 +40,7 @@ func resourceDNSimpleRecord() *schema.Resource {
"type": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"value": &schema.Schema{
@ -74,12 +76,12 @@ func resourceDNSimpleRecordCreate(d *schema.ResourceData, meta interface{}) erro
newRecord.Ttl = ttl.(string)
}
log.Printf("[DEBUG] DNS Simple Record create configuration: %#v", newRecord)
log.Printf("[DEBUG] DNSimple Record create configuration: %#v", newRecord)
recId, err := client.CreateRecord(d.Get("domain").(string), newRecord)
if err != nil {
return fmt.Errorf("Failed to create DNS Simple Record: %s", err)
return fmt.Errorf("Failed to create DNSimple Record: %s", err)
}
d.SetId(recId)
@ -93,7 +95,7 @@ func resourceDNSimpleRecordRead(d *schema.ResourceData, meta interface{}) error
rec, err := client.RetrieveRecord(d.Get("domain").(string), d.Id())
if err != nil {
return fmt.Errorf("Couldn't find DNS Simple Record: %s", err)
return fmt.Errorf("Couldn't find DNSimple Record: %s", err)
}
d.Set("domain_id", rec.StringDomainId())
@ -133,11 +135,11 @@ func resourceDNSimpleRecordUpdate(d *schema.ResourceData, meta interface{}) erro
updateRecord.Ttl = attr.(string)
}
log.Printf("[DEBUG] DNS Simple Record update configuration: %#v", updateRecord)
log.Printf("[DEBUG] DNSimple Record update configuration: %#v", updateRecord)
_, err := client.UpdateRecord(d.Get("domain").(string), d.Id(), updateRecord)
if err != nil {
return fmt.Errorf("Failed to update DNS Simple Record: %s", err)
return fmt.Errorf("Failed to update DNSimple Record: %s", err)
}
return resourceDNSimpleRecordRead(d, meta)
@ -146,12 +148,12 @@ func resourceDNSimpleRecordUpdate(d *schema.ResourceData, meta interface{}) erro
func resourceDNSimpleRecordDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*dnsimple.Client)
log.Printf("[INFO] Deleting DNS Simple Record: %s, %s", d.Get("domain").(string), d.Id())
log.Printf("[INFO] Deleting DNSimple Record: %s, %s", d.Get("domain").(string), d.Id())
err := client.DestroyRecord(d.Get("domain").(string), d.Id())
if err != nil {
return fmt.Errorf("Error deleting DNS Simple Record: %s", err)
return fmt.Errorf("Error deleting DNSimple Record: %s", err)
}
return nil

View File

@ -3,10 +3,13 @@ package google
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"runtime"
"strings"
// TODO(dcunnin): Use version code from version.go
// "github.com/hashicorp/terraform"
@ -35,7 +38,6 @@ type Config struct {
func (c *Config) loadAndValidate() error {
var account accountFile
// TODO: validation that it isn't blank
if c.AccountFile == "" {
c.AccountFile = os.Getenv("GOOGLE_ACCOUNT_FILE")
}
@ -49,13 +51,35 @@ func (c *Config) loadAndValidate() error {
var client *http.Client
if c.AccountFile != "" {
if err := loadJSON(&account, c.AccountFile); err != nil {
contents := c.AccountFile
// Assume account_file is a JSON string
if err := parseJSON(&account, contents); err != nil {
// If account_file was not JSON, assume it is a file path instead
if _, err := os.Stat(c.AccountFile); os.IsNotExist(err) {
return fmt.Errorf(
"Error loading account file '%s': %s",
"account_file path does not exist: %s",
c.AccountFile)
}
b, err := ioutil.ReadFile(c.AccountFile)
if err != nil {
return fmt.Errorf(
"Error reading account_file from path '%s': %s",
c.AccountFile,
err)
}
contents = string(b)
if err := parseJSON(&account, contents); err != nil {
return fmt.Errorf(
"Error parsing account file '%s': %s",
contents,
err)
}
}
clientScopes := []string{
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
@ -145,13 +169,9 @@ type accountFile struct {
ClientId string `json:"client_id"`
}
func loadJSON(result interface{}, path string) error {
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
func parseJSON(result interface{}, contents string) error {
r := strings.NewReader(contents)
dec := json.NewDecoder(r)
dec := json.NewDecoder(f)
return dec.Decode(result)
}

View File

@ -1,24 +1,50 @@
package google
import (
"reflect"
"io/ioutil"
"testing"
)
func TestConfigLoadJSON_account(t *testing.T) {
var actual accountFile
if err := loadJSON(&actual, "./test-fixtures/fake_account.json"); err != nil {
t.Fatalf("err: %s", err)
const testFakeAccountFilePath = "./test-fixtures/fake_account.json"
func TestConfigLoadAndValidate_accountFilePath(t *testing.T) {
config := Config{
AccountFile: testFakeAccountFilePath,
Project: "my-gce-project",
Region: "us-central1",
}
expected := accountFile{
PrivateKeyId: "foo",
PrivateKey: "bar",
ClientEmail: "foo@bar.com",
ClientId: "id@foo.com",
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
err := config.loadAndValidate()
if err != nil {
t.Fatalf("error: %v", err)
}
}
func TestConfigLoadAndValidate_accountFileJSON(t *testing.T) {
contents, err := ioutil.ReadFile(testFakeAccountFilePath)
if err != nil {
t.Fatalf("error: %v", err)
}
config := Config{
AccountFile: string(contents),
Project: "my-gce-project",
Region: "us-central1",
}
err = config.loadAndValidate()
if err != nil {
t.Fatalf("error: %v", err)
}
}
func TestConfigLoadAndValidate_accountFileJSONInvalid(t *testing.T) {
config := Config{
AccountFile: "{this is not json}",
Project: "my-gce-project",
Region: "us-central1",
}
if config.loadAndValidate() == nil {
t.Fatalf("expected error, but got nil")
}
}

View File

@ -5,7 +5,6 @@ import (
"fmt"
"google.golang.org/api/compute/v1"
"github.com/hashicorp/terraform/helper/resource"
)
@ -25,8 +24,8 @@ type OperationWaiter struct {
Op *compute.Operation
Project string
Region string
Zone string
Type OperationWaitType
Zone string
}
func (w *OperationWaiter) RefreshFunc() resource.StateRefreshFunc {
@ -78,3 +77,4 @@ func (e OperationError) Error() string {
return buf.String()
}

View File

@ -1,6 +1,10 @@
package google
import (
"encoding/json"
"fmt"
"os"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
)
@ -11,8 +15,9 @@ func Provider() terraform.ResourceProvider {
Schema: map[string]*schema.Schema{
"account_file": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil),
ValidateFunc: validateAccountFile,
},
"project": &schema.Schema{
@ -29,6 +34,7 @@ func Provider() terraform.ResourceProvider {
},
ResourcesMap: map[string]*schema.Resource{
"google_compute_autoscaler": resourceComputeAutoscaler(),
"google_compute_address": resourceComputeAddress(),
"google_compute_disk": resourceComputeDisk(),
"google_compute_firewall": resourceComputeFirewall(),
@ -42,6 +48,7 @@ func Provider() terraform.ResourceProvider {
"google_container_cluster": resourceContainerCluster(),
"google_dns_managed_zone": resourceDnsManagedZone(),
"google_dns_record_set": resourceDnsRecordSet(),
"google_compute_instance_group_manager": resourceComputeInstanceGroupManager(),
"google_storage_bucket": resourceStorageBucket(),
},
@ -62,3 +69,31 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
return &config, nil
}
func validateAccountFile(v interface{}, k string) (warnings []string, errors []error) {
value := v.(string)
if value == "" {
return
}
var account accountFile
if err := json.Unmarshal([]byte(value), &account); err != nil {
warnings = append(warnings, `
account_file is not valid JSON, so we are assuming it is a file path. This
support will be removed in the future. Please update your configuration to use
${file("filename.json")} instead.`)
} else {
return
}
if _, err := os.Stat(value); err != nil {
errors = append(errors,
fmt.Errorf(
"account_file path could not be read from '%s': %s",
value,
err))
}
return
}

View File

@ -0,0 +1,352 @@
package google
import (
"fmt"
"log"
"time"
"google.golang.org/api/googleapi"
"google.golang.org/api/compute/v1"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceComputeAutoscaler() *schema.Resource {
return &schema.Resource{
Create: resourceComputeAutoscalerCreate,
Read: resourceComputeAutoscalerRead,
Update: resourceComputeAutoscalerUpdate,
Delete: resourceComputeAutoscalerDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
Required: true,
},
"description": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"target": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"autoscaling_policy": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"min_replicas": &schema.Schema{
Type: schema.TypeInt,
Required: true,
},
"max_replicas": &schema.Schema{
Type: schema.TypeInt,
Required: true,
},
"cooldown_period": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Default: 60,
},
"cpu_utilization": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"target": &schema.Schema{
Type: schema.TypeFloat,
Required: true,
},
},
},
},
"metric": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"target": &schema.Schema{
Type: schema.TypeFloat,
Required: true,
},
"type": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
},
},
},
"load_balancing_utilization": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"target": &schema.Schema{
Type: schema.TypeFloat,
Required: true,
},
},
},
},
},
},
},
"zone": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"self_link": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
},
}
}
func buildAutoscaler(d *schema.ResourceData) (*compute.Autoscaler, error) {
// Build the parameter
scaler := &compute.Autoscaler{
Name: d.Get("name").(string),
Target: d.Get("target").(string),
}
// Optional fields
if v, ok := d.GetOk("description"); ok {
scaler.Description = v.(string)
}
aspCount := d.Get("autoscaling_policy.#").(int)
if aspCount != 1 {
return nil, fmt.Errorf("The autoscaler must have exactly one autoscaling_policy, found %d.", aspCount)
}
prefix := "autoscaling_policy.0."
scaler.AutoscalingPolicy = &compute.AutoscalingPolicy{
MaxNumReplicas: int64(d.Get(prefix + "max_replicas").(int)),
MinNumReplicas: int64(d.Get(prefix + "min_replicas").(int)),
CoolDownPeriodSec: int64(d.Get(prefix + "cooldown_period").(int)),
}
// Check that only one autoscaling policy is defined
policyCounter := 0
if _, ok := d.GetOk(prefix + "cpu_utilization"); ok {
if d.Get(prefix+"cpu_utilization.0.target").(float64) != 0 {
cpuUtilCount := d.Get(prefix + "cpu_utilization.#").(int)
if cpuUtilCount != 1 {
return nil, fmt.Errorf("The autoscaling_policy must have exactly one cpu_utilization, found %d.", cpuUtilCount)
}
policyCounter++
scaler.AutoscalingPolicy.CpuUtilization = &compute.AutoscalingPolicyCpuUtilization{
UtilizationTarget: d.Get(prefix + "cpu_utilization.0.target").(float64),
}
}
}
if _, ok := d.GetOk("autoscaling_policy.0.metric"); ok {
if d.Get(prefix+"metric.0.name") != "" {
policyCounter++
metricCount := d.Get(prefix + "metric.#").(int)
if metricCount != 1 {
return nil, fmt.Errorf("The autoscaling_policy must have exactly one metric, found %d.", metricCount)
}
scaler.AutoscalingPolicy.CustomMetricUtilizations = []*compute.AutoscalingPolicyCustomMetricUtilization{
{
Metric: d.Get(prefix + "metric.0.name").(string),
UtilizationTarget: d.Get(prefix + "metric.0.target").(float64),
UtilizationTargetType: d.Get(prefix + "metric.0.type").(string),
},
}
}
}
if _, ok := d.GetOk("autoscaling_policy.0.load_balancing_utilization"); ok {
if d.Get(prefix+"load_balancing_utilization.0.target").(float64) != 0 {
policyCounter++
lbuCount := d.Get(prefix + "load_balancing_utilization.#").(int)
if lbuCount != 1 {
return nil, fmt.Errorf("The autoscaling_policy must have exactly one load_balancing_utilization, found %d.", lbuCount)
}
scaler.AutoscalingPolicy.LoadBalancingUtilization = &compute.AutoscalingPolicyLoadBalancingUtilization{
UtilizationTarget: d.Get(prefix + "load_balancing_utilization.0.target").(float64),
}
}
}
if policyCounter != 1 {
return nil, fmt.Errorf("One policy must be defined for an autoscaler.")
}
return scaler, nil
}
func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
// Get the zone
log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string))
zone, err := config.clientCompute.Zones.Get(
config.Project, d.Get("zone").(string)).Do()
if err != nil {
return fmt.Errorf(
"Error loading zone '%s': %s", d.Get("zone").(string), err)
}
scaler, err := buildAutoscaler(d)
if err != nil {
return err
}
op, err := config.clientCompute.Autoscalers.Insert(
config.Project, zone.Name, scaler).Do()
if err != nil {
return fmt.Errorf("Error creating Autoscaler: %s", err)
}
// It probably maybe worked, so store the ID now
d.SetId(scaler.Name)
// Wait for the operation to complete
w := &OperationWaiter{
Service: config.clientCompute,
Op: op,
Project: config.Project,
Type: OperationWaitZone,
Zone: zone.Name,
}
state := w.Conf()
state.Timeout = 2 * time.Minute
state.MinTimeout = 1 * time.Second
opRaw, err := state.WaitForState()
if err != nil {
return fmt.Errorf("Error waiting for Autoscaler to create: %s", err)
}
op = opRaw.(*compute.Operation)
if op.Error != nil {
// The resource didn't actually create
d.SetId("")
// Return the error
return OperationError(*op.Error)
}
return resourceComputeAutoscalerRead(d, meta)
}
func resourceComputeAutoscalerRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
zone := d.Get("zone").(string)
scaler, err := config.clientCompute.Autoscalers.Get(
config.Project, zone, d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
// The resource doesn't exist anymore
d.SetId("")
return nil
}
return fmt.Errorf("Error reading Autoscaler: %s", err)
}
d.Set("self_link", scaler.SelfLink)
return nil
}
func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
zone := d.Get("zone").(string)
scaler, err := buildAutoscaler(d)
if err != nil {
return err
}
op, err := config.clientCompute.Autoscalers.Patch(
config.Project, zone, d.Id(), scaler).Do()
if err != nil {
return fmt.Errorf("Error updating Autoscaler: %s", err)
}
// It probably maybe worked, so store the ID now
d.SetId(scaler.Name)
// Wait for the operation to complete
w := &OperationWaiter{
Service: config.clientCompute,
Op: op,
Project: config.Project,
Type: OperationWaitZone,
Zone: zone,
}
state := w.Conf()
state.Timeout = 2 * time.Minute
state.MinTimeout = 1 * time.Second
opRaw, err := state.WaitForState()
if err != nil {
return fmt.Errorf("Error waiting for Autoscaler to update: %s", err)
}
op = opRaw.(*compute.Operation)
if op.Error != nil {
// Return the error
return OperationError(*op.Error)
}
return resourceComputeAutoscalerRead(d, meta)
}
func resourceComputeAutoscalerDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
zone := d.Get("zone").(string)
op, err := config.clientCompute.Autoscalers.Delete(
config.Project, zone, d.Id()).Do()
if err != nil {
return fmt.Errorf("Error deleting autoscaler: %s", err)
}
// Wait for the operation to complete
w := &OperationWaiter{
Service: config.clientCompute,
Op: op,
Project: config.Project,
Type: OperationWaitZone,
Zone: zone,
}
state := w.Conf()
state.Timeout = 2 * time.Minute
state.MinTimeout = 1 * time.Second
opRaw, err := state.WaitForState()
if err != nil {
return fmt.Errorf("Error waiting for Autoscaler to delete: %s", err)
}
op = opRaw.(*compute.Operation)
if op.Error != nil {
// Return the error
return OperationError(*op.Error)
}
d.SetId("")
return nil
}

View File

@ -0,0 +1,245 @@
package google
import (
"fmt"
"testing"
"google.golang.org/api/compute/v1"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAutoscaler_basic(t *testing.T) {
var ascaler compute.Autoscaler
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAutoscalerDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAutoscaler_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckAutoscalerExists(
"google_compute_autoscaler.foobar", &ascaler),
),
},
},
})
}
func TestAccAutoscaler_update(t *testing.T) {
var ascaler compute.Autoscaler
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAutoscalerDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAutoscaler_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckAutoscalerExists(
"google_compute_autoscaler.foobar", &ascaler),
),
},
resource.TestStep{
Config: testAccAutoscaler_update,
Check: resource.ComposeTestCheckFunc(
testAccCheckAutoscalerExists(
"google_compute_autoscaler.foobar", &ascaler),
testAccCheckAutoscalerUpdated(
"google_compute_autoscaler.foobar", 10),
),
},
},
})
}
func testAccCheckAutoscalerDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
for _, rs := range s.RootModule().Resources {
if rs.Type != "google_compute_autoscaler" {
continue
}
_, err := config.clientCompute.Autoscalers.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
if err == nil {
return fmt.Errorf("Autoscaler still exists")
}
}
return nil
}
func testAccCheckAutoscalerExists(n string, ascaler *compute.Autoscaler) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
config := testAccProvider.Meta().(*Config)
found, err := config.clientCompute.Autoscalers.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
if err != nil {
return err
}
if found.Name != rs.Primary.ID {
return fmt.Errorf("Autoscaler not found")
}
*ascaler = *found
return nil
}
}
func testAccCheckAutoscalerUpdated(n string, max int64) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
config := testAccProvider.Meta().(*Config)
ascaler, err := config.clientCompute.Autoscalers.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
if err != nil {
return err
}
if ascaler.AutoscalingPolicy.MaxNumReplicas != max {
return fmt.Errorf("maximum replicas incorrect")
}
return nil
}
}
const testAccAutoscaler_basic = `
resource "google_compute_instance_template" "foobar" {
name = "terraform-test-template-foobar"
machine_type = "n1-standard-1"
can_ip_forward = false
tags = ["foo", "bar"]
disk {
source_image = "debian-cloud/debian-7-wheezy-v20140814"
auto_delete = true
boot = true
}
network_interface {
network = "default"
}
metadata {
foo = "bar"
}
service_account {
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
}
}
resource "google_compute_target_pool" "foobar" {
description = "Resource created for Terraform acceptance testing"
name = "terraform-test-tpool-foobar"
session_affinity = "CLIENT_IP_PROTO"
}
resource "google_compute_instance_group_manager" "foobar" {
description = "Terraform test instance group manager"
name = "terraform-test-groupmanager"
instance_template = "${google_compute_instance_template.foobar.self_link}"
target_pools = ["${google_compute_target_pool.foobar.self_link}"]
base_instance_name = "foobar"
zone = "us-central1-a"
}
resource "google_compute_autoscaler" "foobar" {
description = "Resource created for Terraform acceptance testing"
name = "terraform-test-ascaler"
zone = "us-central1-a"
target = "${google_compute_instance_group_manager.foobar.self_link}"
autoscaling_policy = {
max_replicas = 5
min_replicas = 0
cooldown_period = 60
cpu_utilization = {
target = 0.5
}
}
}`
const testAccAutoscaler_update = `
resource "google_compute_instance_template" "foobar" {
name = "terraform-test-template-foobar"
machine_type = "n1-standard-1"
can_ip_forward = false
tags = ["foo", "bar"]
disk {
source_image = "debian-cloud/debian-7-wheezy-v20140814"
auto_delete = true
boot = true
}
network_interface {
network = "default"
}
metadata {
foo = "bar"
}
service_account {
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
}
}
resource "google_compute_target_pool" "foobar" {
description = "Resource created for Terraform acceptance testing"
name = "terraform-test-tpool-foobar"
session_affinity = "CLIENT_IP_PROTO"
}
resource "google_compute_instance_group_manager" "foobar" {
description = "Terraform test instance group manager"
name = "terraform-test-groupmanager"
instance_template = "${google_compute_instance_template.foobar.self_link}"
target_pools = ["${google_compute_target_pool.foobar.self_link}"]
base_instance_name = "foobar"
zone = "us-central1-a"
}
resource "google_compute_autoscaler" "foobar" {
description = "Resource created for Terraform acceptance testing"
name = "terraform-test-ascaler"
zone = "us-central1-a"
target = "${google_compute_instance_group_manager.foobar.self_link}"
autoscaling_policy = {
max_replicas = 10
min_replicas = 0
cooldown_period = 60
cpu_utilization = {
target = 0.5
}
}
}`

View File

@ -0,0 +1,301 @@
package google
import (
"fmt"
"log"
"time"
"google.golang.org/api/googleapi"
"google.golang.org/api/compute/v1"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceComputeInstanceGroupManager() *schema.Resource {
return &schema.Resource{
Create: resourceComputeInstanceGroupManagerCreate,
Read: resourceComputeInstanceGroupManagerRead,
Update: resourceComputeInstanceGroupManagerUpdate,
Delete: resourceComputeInstanceGroupManagerDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"description": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"base_instance_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"fingerprint": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"instance_group": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"instance_template": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"target_pools": &schema.Schema{
Type: schema.TypeSet,
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: func(v interface{}) int {
return hashcode.String(v.(string))
},
},
"target_size": &schema.Schema{
Type: schema.TypeInt,
Computed: true,
Optional: true,
},
"zone": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"self_link": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
},
}
}
func waitOpZone(config *Config, op *compute.Operation, zone string,
resource string, action string) (*compute.Operation, error) {
w := &OperationWaiter{
Service: config.clientCompute,
Op: op,
Project: config.Project,
Zone: zone,
Type: OperationWaitZone,
}
state := w.Conf()
state.Timeout = 8 * time.Minute
state.MinTimeout = 1 * time.Second
opRaw, err := state.WaitForState()
if err != nil {
return nil, fmt.Errorf("Error waiting for %s to %s: %s", resource, action, err)
}
return opRaw.(*compute.Operation), nil
}
func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
// Get group size, default to 1 if not given
var target_size int64 = 1
if v, ok := d.GetOk("target_size"); ok {
target_size = int64(v.(int))
}
// Build the parameter
manager := &compute.InstanceGroupManager{
Name: d.Get("name").(string),
BaseInstanceName: d.Get("base_instance_name").(string),
InstanceTemplate: d.Get("instance_template").(string),
TargetSize: target_size,
}
// Set optional fields
if v, ok := d.GetOk("description"); ok {
manager.Description = v.(string)
}
if attr := d.Get("target_pools").(*schema.Set); attr.Len() > 0 {
var s []string
for _, v := range attr.List() {
s = append(s, v.(string))
}
manager.TargetPools = s
}
log.Printf("[DEBUG] InstanceGroupManager insert request: %#v", manager)
op, err := config.clientCompute.InstanceGroupManagers.Insert(
config.Project, d.Get("zone").(string), manager).Do()
if err != nil {
return fmt.Errorf("Error creating InstanceGroupManager: %s", err)
}
// It probably maybe worked, so store the ID now
d.SetId(manager.Name)
// Wait for the operation to complete
op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "create")
if err != nil {
return err
}
if op.Error != nil {
// The resource didn't actually create
d.SetId("")
// Return the error
return OperationError(*op.Error)
}
return resourceComputeInstanceGroupManagerRead(d, meta)
}
func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
manager, err := config.clientCompute.InstanceGroupManagers.Get(
config.Project, d.Get("zone").(string), d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
// The resource doesn't exist anymore
d.SetId("")
return nil
}
return fmt.Errorf("Error reading instance group manager: %s", err)
}
// Set computed fields
d.Set("fingerprint", manager.Fingerprint)
d.Set("instance_group", manager.InstanceGroup)
d.Set("target_size", manager.TargetSize)
d.Set("self_link", manager.SelfLink)
return nil
}
func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
d.Partial(true)
// If target_pools changes then update
if d.HasChange("target_pools") {
var targetPools []string
if attr := d.Get("target_pools").(*schema.Set); attr.Len() > 0 {
for _, v := range attr.List() {
targetPools = append(targetPools, v.(string))
}
}
// Build the parameter
setTargetPools := &compute.InstanceGroupManagersSetTargetPoolsRequest{
Fingerprint: d.Get("fingerprint").(string),
TargetPools: targetPools,
}
op, err := config.clientCompute.InstanceGroupManagers.SetTargetPools(
config.Project, d.Get("zone").(string), d.Id(), setTargetPools).Do()
if err != nil {
return fmt.Errorf("Error updating InstanceGroupManager: %s", err)
}
// Wait for the operation to complete
op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "update TargetPools")
if err != nil {
return err
}
if op.Error != nil {
return OperationError(*op.Error)
}
d.SetPartial("target_pools")
}
// If instance_template changes then update
if d.HasChange("instance_template") {
// Build the parameter
setInstanceTemplate := &compute.InstanceGroupManagersSetInstanceTemplateRequest{
InstanceTemplate: d.Get("instance_template").(string),
}
op, err := config.clientCompute.InstanceGroupManagers.SetInstanceTemplate(
config.Project, d.Get("zone").(string), d.Id(), setInstanceTemplate).Do()
if err != nil {
return fmt.Errorf("Error updating InstanceGroupManager: %s", err)
}
// Wait for the operation to complete
op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "update instance template")
if err != nil {
return err
}
if op.Error != nil {
return OperationError(*op.Error)
}
d.SetPartial("instance_template")
}
// If size changes trigger a resize
if d.HasChange("target_size") {
if v, ok := d.GetOk("target_size"); ok {
// Only do anything if the new size is set
target_size := int64(v.(int))
op, err := config.clientCompute.InstanceGroupManagers.Resize(
config.Project, d.Get("zone").(string), d.Id(), target_size).Do()
if err != nil {
return fmt.Errorf("Error updating InstanceGroupManager: %s", err)
}
// Wait for the operation to complete
op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "update target_size")
if err != nil {
return err
}
if op.Error != nil {
return OperationError(*op.Error)
}
}
d.SetPartial("target_size")
}
d.Partial(false)
return resourceComputeInstanceGroupManagerRead(d, meta)
}
func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
zone := d.Get("zone").(string)
op, err := config.clientCompute.InstanceGroupManagers.Delete(config.Project, zone, d.Id()).Do()
if err != nil {
return fmt.Errorf("Error deleting instance group manager: %s", err)
}
// Wait for the operation to complete
op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "delete")
if err != nil {
return err
}
if op.Error != nil {
// The resource didn't actually create
d.SetId("")
// Return the error
return OperationError(*op.Error)
}
d.SetId("")
return nil
}

View File

@ -0,0 +1,298 @@
package google
import (
"fmt"
"testing"
"google.golang.org/api/compute/v1"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccInstanceGroupManager_basic(t *testing.T) {
var manager compute.InstanceGroupManager
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccInstanceGroupManager_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckInstanceGroupManagerExists(
"google_compute_instance_group_manager.igm-basic", &manager),
),
},
},
})
}
func TestAccInstanceGroupManager_update(t *testing.T) {
var manager compute.InstanceGroupManager
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccInstanceGroupManager_update,
Check: resource.ComposeTestCheckFunc(
testAccCheckInstanceGroupManagerExists(
"google_compute_instance_group_manager.igm-update", &manager),
),
},
resource.TestStep{
Config: testAccInstanceGroupManager_update2,
Check: resource.ComposeTestCheckFunc(
testAccCheckInstanceGroupManagerExists(
"google_compute_instance_group_manager.igm-update", &manager),
testAccCheckInstanceGroupManagerUpdated(
"google_compute_instance_group_manager.igm-update", 3,
"google_compute_target_pool.igm-update", "terraform-test-igm-update2"),
),
},
},
})
}
func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
for _, rs := range s.RootModule().Resources {
if rs.Type != "google_compute_instance_group_manager" {
continue
}
_, err := config.clientCompute.InstanceGroupManagers.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
if err != nil {
return fmt.Errorf("InstanceGroupManager still exists")
}
}
return nil
}
func testAccCheckInstanceGroupManagerExists(n string, manager *compute.InstanceGroupManager) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
config := testAccProvider.Meta().(*Config)
found, err := config.clientCompute.InstanceGroupManagers.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
if err != nil {
return err
}
if found.Name != rs.Primary.ID {
return fmt.Errorf("InstanceGroupManager not found")
}
*manager = *found
return nil
}
}
func testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool string, template string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
config := testAccProvider.Meta().(*Config)
manager, err := config.clientCompute.InstanceGroupManagers.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
if err != nil {
return err
}
// Cannot check the target pool as the instance creation is asynchronous. However, can
// check the target_size.
if manager.TargetSize != size {
return fmt.Errorf("instance count incorrect")
}
// check that the instance template updated
instanceTemplate, err := config.clientCompute.InstanceTemplates.Get(
config.Project, template).Do()
if err != nil {
return fmt.Errorf("Error reading instance template: %s", err)
}
if instanceTemplate.Name != template {
return fmt.Errorf("instance template not updated")
}
return nil
}
}
const testAccInstanceGroupManager_basic = `
resource "google_compute_instance_template" "igm-basic" {
name = "terraform-test-igm-basic"
machine_type = "n1-standard-1"
can_ip_forward = false
tags = ["foo", "bar"]
disk {
source_image = "debian-cloud/debian-7-wheezy-v20140814"
auto_delete = true
boot = true
}
network_interface {
network = "default"
}
metadata {
foo = "bar"
}
service_account {
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
}
}
resource "google_compute_target_pool" "igm-basic" {
description = "Resource created for Terraform acceptance testing"
name = "terraform-test-igm-basic"
session_affinity = "CLIENT_IP_PROTO"
}
resource "google_compute_instance_group_manager" "igm-basic" {
description = "Terraform test instance group manager"
name = "terraform-test-igm-basic"
instance_template = "${google_compute_instance_template.igm-basic.self_link}"
target_pools = ["${google_compute_target_pool.igm-basic.self_link}"]
base_instance_name = "igm-basic"
zone = "us-central1-c"
target_size = 2
}`
const testAccInstanceGroupManager_update = `
resource "google_compute_instance_template" "igm-update" {
name = "terraform-test-igm-update"
machine_type = "n1-standard-1"
can_ip_forward = false
tags = ["foo", "bar"]
disk {
source_image = "debian-cloud/debian-7-wheezy-v20140814"
auto_delete = true
boot = true
}
network_interface {
network = "default"
}
metadata {
foo = "bar"
}
service_account {
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
}
}
resource "google_compute_target_pool" "igm-update" {
description = "Resource created for Terraform acceptance testing"
name = "terraform-test-igm-update"
session_affinity = "CLIENT_IP_PROTO"
}
resource "google_compute_instance_group_manager" "igm-update" {
description = "Terraform test instance group manager"
name = "terraform-test-igm-update"
instance_template = "${google_compute_instance_template.igm-update.self_link}"
target_pools = ["${google_compute_target_pool.igm-update.self_link}"]
base_instance_name = "igm-update"
zone = "us-central1-c"
target_size = 2
}`
// Change IGM's instance template and target size
const testAccInstanceGroupManager_update2 = `
resource "google_compute_instance_template" "igm-update" {
name = "terraform-test-igm-update"
machine_type = "n1-standard-1"
can_ip_forward = false
tags = ["foo", "bar"]
disk {
source_image = "debian-cloud/debian-7-wheezy-v20140814"
auto_delete = true
boot = true
}
network_interface {
network = "default"
}
metadata {
foo = "bar"
}
service_account {
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
}
}
resource "google_compute_target_pool" "igm-update" {
description = "Resource created for Terraform acceptance testing"
name = "terraform-test-igm-update"
session_affinity = "CLIENT_IP_PROTO"
}
resource "google_compute_instance_template" "igm-update2" {
name = "terraform-test-igm-update2"
machine_type = "n1-standard-1"
can_ip_forward = false
tags = ["foo", "bar"]
disk {
source_image = "debian-cloud/debian-7-wheezy-v20140814"
auto_delete = true
boot = true
}
network_interface {
network = "default"
}
metadata {
foo = "bar"
}
service_account {
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
}
}
resource "google_compute_instance_group_manager" "igm-update" {
description = "Terraform test instance group manager"
name = "terraform-test-igm-update"
instance_template = "${google_compute_instance_template.igm-update2.self_link}"
target_pools = ["${google_compute_target_pool.igm-update.self_link}"]
base_instance_name = "igm-update"
zone = "us-central1-c"
target_size = 3
}`

View File

@ -227,7 +227,9 @@ func resourceComputeInstanceTemplate() *schema.Resource {
}
}
func buildDisks(d *schema.ResourceData, meta interface{}) []*compute.AttachedDisk {
func buildDisks(d *schema.ResourceData, meta interface{}) ([]*compute.AttachedDisk, error) {
config := meta.(*Config)
disksCount := d.Get("disk.#").(int)
disks := make([]*compute.AttachedDisk, 0, disksCount)
@ -267,7 +269,14 @@ func buildDisks(d *schema.ResourceData, meta interface{}) []*compute.AttachedDis
}
if v, ok := d.GetOk(prefix + ".source_image"); ok {
disk.InitializeParams.SourceImage = v.(string)
imageName := v.(string)
imageUrl, err := resolveImage(config, imageName)
if err != nil {
return nil, fmt.Errorf(
"Error resolving image name '%s': %s",
imageName, err)
}
disk.InitializeParams.SourceImage = imageUrl
}
}
@ -286,7 +295,7 @@ func buildDisks(d *schema.ResourceData, meta interface{}) []*compute.AttachedDis
disks = append(disks, &disk)
}
return disks
return disks, nil
}
func buildNetworks(d *schema.ResourceData, meta interface{}) (error, []*compute.NetworkInterface) {
@ -330,7 +339,11 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac
instanceProperties.CanIpForward = d.Get("can_ip_forward").(bool)
instanceProperties.Description = d.Get("instance_description").(string)
instanceProperties.MachineType = d.Get("machine_type").(string)
instanceProperties.Disks = buildDisks(d, meta)
disks, err := buildDisks(d, meta)
if err != nil {
return err
}
instanceProperties.Disks = disks
metadata, err := resourceInstanceMetadata(d)
if err != nil {
return err

View File

@ -24,7 +24,7 @@ func TestAccComputeInstanceTemplate_basic(t *testing.T) {
"google_compute_instance_template.foobar", &instanceTemplate),
testAccCheckComputeInstanceTemplateTag(&instanceTemplate, "foo"),
testAccCheckComputeInstanceTemplateMetadata(&instanceTemplate, "foo", "bar"),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "debian-7-wheezy-v20140814", true, true),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140814", true, true),
),
},
},
@ -64,7 +64,7 @@ func TestAccComputeInstanceTemplate_disks(t *testing.T) {
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceTemplateExists(
"google_compute_instance_template.foobar", &instanceTemplate),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "debian-7-wheezy-v20140814", true, true),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140814", true, true),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "terraform-test-foobar", false, false),
),
},

View File

@ -140,7 +140,8 @@ func (h *UiHook) PostApply(
}
if applyerr != nil {
msg = fmt.Sprintf("Error: %s", applyerr)
// Errors are collected and printed in ApplyCommand, no need to duplicate
return terraform.HookActionContinue, nil
}
h.ui.Output(h.Colorize.Color(fmt.Sprintf(

385
deps/v0-6-1.json vendored Normal file
View File

@ -0,0 +1,385 @@
{
"ImportPath": "github.com/hashicorp/terraform",
"GoVersion": "go1.4.2",
"Packages": [
"./..."
],
"Deps": [
{
"ImportPath": "code.google.com/p/go-uuid/uuid",
"Comment": "null-15",
"Rev": "35bc42037350f0078e3c974c6ea690f1926603ab"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/core/http",
"Comment": "v1.2-216-g9197765",
"Rev": "91977650587a7bc48318c0430649d7fea886f111"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/core/tls",
"Comment": "v1.2-216-g9197765",
"Rev": "91977650587a7bc48318c0430649d7fea886f111"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/management",
"Comment": "v1.2-216-g9197765",
"Rev": "91977650587a7bc48318c0430649d7fea886f111"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/storage",
"Comment": "v1.2-216-g9197765",
"Rev": "91977650587a7bc48318c0430649d7fea886f111"
},
{
"ImportPath": "github.com/Azure/go-pkcs12",
"Rev": "a635c0684cd517745ca5c9552a312627791d5ba0"
},
{
"ImportPath": "github.com/armon/circbuf",
"Rev": "f092b4f207b6e5cce0569056fba9e1a2735cb6cf"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/endpoints",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/ec2query",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/json/jsonutil",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/jsonrpc",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/query",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/rest",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/restjson",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/restxml",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/signer/v4",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/autoscaling",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatch",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/dynamodb",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/ec2",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/ecs",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/elasticache",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/elb",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/iam",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/kinesis",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/lambda",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/rds",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/route53",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/s3",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/sns",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/sqs",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/awslabs/aws-sdk-go/aws/credentials",
"Comment": "v0.6.7-3-g2a6648c",
"Rev": "2a6648c479175ce005bca95780f948a196a46062"
},
{
"ImportPath": "github.com/cyberdelia/heroku-go/v3",
"Rev": "594d483b9b6a8ddc7cd2f1e3e7d1de92fa2de665"
},
{
"ImportPath": "github.com/dylanmei/iso8601",
"Rev": "2075bf119b58e5576c6ed9f867b8f3d17f2e54d4"
},
{
"ImportPath": "github.com/dylanmei/winrmtest",
"Rev": "3e9661c52c45dab9a8528966a23d421922fca9b9"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient",
"Rev": "f6e9f5396e0e8f34472efe443d0cb7f9af162b88"
},
{
"ImportPath": "github.com/hashicorp/atlas-go/archive",
"Comment": "20141209094003-71-g785958f",
"Rev": "785958ffcd6a8857890651f3f4d9a289ddc27633"
},
{
"ImportPath": "github.com/hashicorp/atlas-go/v1",
"Comment": "20141209094003-71-g785958f",
"Rev": "785958ffcd6a8857890651f3f4d9a289ddc27633"
},
{
"ImportPath": "github.com/hashicorp/consul/api",
"Comment": "v0.5.2-159-gc34bcb4",
"Rev": "c34bcb45c670af076846826ea72c436fbd0e2c35"
},
{
"ImportPath": "github.com/hashicorp/errwrap",
"Rev": "7554cd9344cec97297fa6649b055a8c98c2a1e55"
},
{
"ImportPath": "github.com/hashicorp/go-checkpoint",
"Rev": "88326f6851319068e7b34981032128c0b1a6524d"
},
{
"ImportPath": "github.com/hashicorp/go-multierror",
"Rev": "56912fb08d85084aa318edcf2bba735b97cf35c5"
},
{
"ImportPath": "github.com/hashicorp/go-version",
"Rev": "999359b6b7a041ce16e695d51e92145b83f01087"
},
{
"ImportPath": "github.com/hashicorp/hcl",
"Rev": "54864211433d45cb780682431585b3e573b49e4a"
},
{
"ImportPath": "github.com/hashicorp/yamux",
"Rev": "8e00b30457b1486b012f204b82ec92ae6b547de8"
},
{
"ImportPath": "github.com/imdario/mergo",
"Comment": "0.2.0-5-g61a5285",
"Rev": "61a52852277811e93e06d28e0d0c396284a7730b"
},
{
"ImportPath": "github.com/masterzen/simplexml/dom",
"Rev": "95ba30457eb1121fa27753627c774c7cd4e90083"
},
{
"ImportPath": "github.com/masterzen/winrm/soap",
"Rev": "23128e7b3dc1f8091aeff7aae82cb2112ce53c75"
},
{
"ImportPath": "github.com/masterzen/winrm/winrm",
"Rev": "23128e7b3dc1f8091aeff7aae82cb2112ce53c75"
},
{
"ImportPath": "github.com/masterzen/xmlpath",
"Rev": "13f4951698adc0fa9c1dda3e275d489a24201161"
},
{
"ImportPath": "github.com/mitchellh/cli",
"Rev": "8102d0ed5ea2709ade1243798785888175f6e415"
},
{
"ImportPath": "github.com/mitchellh/colorstring",
"Rev": "61164e49940b423ba1f12ddbdf01632ac793e5e9"
},
{
"ImportPath": "github.com/mitchellh/copystructure",
"Rev": "6fc66267e9da7d155a9d3bd489e00dad02666dc6"
},
{
"ImportPath": "github.com/mitchellh/go-homedir",
"Rev": "1f6da4a72e57d4e7edd4a7295a585e0a3999a2d4"
},
{
"ImportPath": "github.com/mitchellh/go-linereader",
"Rev": "07bab5fdd9580500aea6ada0e09df4aa28e68abd"
},
{
"ImportPath": "github.com/mitchellh/mapstructure",
"Rev": "281073eb9eb092240d33ef253c404f1cca550309"
},
{
"ImportPath": "github.com/mitchellh/osext",
"Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702"
},
{
"ImportPath": "github.com/mitchellh/packer/common/uuid",
"Comment": "v0.8.2-4-g2010a0c",
"Rev": "2010a0c966175b3c0fa8d158a879c10acbba0d76"
},
{
"ImportPath": "github.com/mitchellh/panicwrap",
"Rev": "45cbfd3bae250c7676c077fb275be1a2968e066a"
},
{
"ImportPath": "github.com/mitchellh/prefixedio",
"Rev": "89d9b535996bf0a185f85b59578f2e245f9e1724"
},
{
"ImportPath": "github.com/mitchellh/reflectwalk",
"Rev": "eecf4c70c626c7cfbb95c90195bc34d386c74ac6"
},
{
"ImportPath": "github.com/nu7hatch/gouuid",
"Rev": "179d4d0c4d8d407a32af483c2354df1d2c91e6c3"
},
{
"ImportPath": "github.com/packer-community/winrmcp/winrmcp",
"Rev": "743b1afe5ee3f6d5ba71a0d50673fa0ba2123d6b"
},
{
"ImportPath": "github.com/pearkes/cloudflare",
"Rev": "19e280b056f3742e535ea12ae92a37ea7767ea82"
},
{
"ImportPath": "github.com/pearkes/digitalocean",
"Rev": "e966f00c2d9de5743e87697ab77c7278f5998914"
},
{
"ImportPath": "github.com/pearkes/dnsimple",
"Rev": "2a807d118c9e52e94819f414a6ec0293b45cad01"
},
{
"ImportPath": "github.com/pearkes/mailgun",
"Rev": "5b02e7e9ffee9869f81393e80db138f6ff726260"
},
{
"ImportPath": "github.com/rackspace/gophercloud",
"Comment": "v1.0.0-623-ge83aa01",
"Rev": "e83aa011e019917c7bd951444d61c42431b4d21d"
},
{
"ImportPath": "github.com/satori/go.uuid",
"Rev": "afe1e2ddf0f05b7c29d388a3f8e76cb15c2231ca"
},
{
"ImportPath": "github.com/soniah/dnsmadeeasy",
"Comment": "v1.1-2-g5578a8c",
"Rev": "5578a8c15e33958c61cf7db720b6181af65f4a9e"
},
{
"ImportPath": "github.com/vaughan0/go-ini",
"Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
},
{
"ImportPath": "github.com/xanzy/go-cloudstack/cloudstack",
"Comment": "v1.2.0-36-g0031956",
"Rev": "00319560eeca5e6ffef3ba048c97c126a465854f"
},
{
"ImportPath": "golang.org/x/crypto/ssh",
"Rev": "7d5b0be716b9d6d4269afdaae10032bb296d3cdf"
},
{
"ImportPath": "golang.org/x/net/context",
"Rev": "f0cf018861e2b54077eced91659e255072b5f215"
},
{
"ImportPath": "golang.org/x/oauth2",
"Rev": "8914e5017ca260f2a3a1575b1e6868874050d95e"
},
{
"ImportPath": "google.golang.org/api/compute/v1",
"Rev": "18450f4e95c7e76ce3a5dc3a8cb7178ab6d56121"
},
{
"ImportPath": "google.golang.org/api/container/v1",
"Rev": "18450f4e95c7e76ce3a5dc3a8cb7178ab6d56121"
},
{
"ImportPath": "google.golang.org/api/dns/v1",
"Rev": "18450f4e95c7e76ce3a5dc3a8cb7178ab6d56121"
},
{
"ImportPath": "google.golang.org/api/googleapi",
"Rev": "18450f4e95c7e76ce3a5dc3a8cb7178ab6d56121"
},
{
"ImportPath": "google.golang.org/api/storage/v1",
"Rev": "18450f4e95c7e76ce3a5dc3a8cb7178ab6d56121"
},
{
"ImportPath": "google.golang.org/cloud/compute/metadata",
"Rev": "522a8ceb4bb83c2def27baccf31d646bce11a4b2"
},
{
"ImportPath": "google.golang.org/cloud/internal",
"Rev": "522a8ceb4bb83c2def27baccf31d646bce11a4b2"
}
]
}

View File

@ -9,13 +9,18 @@ import (
const UniqueIdPrefix = `terraform-`
// Helper for a resource to generate a unique identifier
// Helper for a resource to generate a unique identifier w/ default prefix
func UniqueId() string {
return PrefixedUniqueId(UniqueIdPrefix)
}
// Helper for a resource to generate a unique identifier w/ given prefix
//
// This uses a simple RFC 4122 v4 UUID with some basic cosmetic filters
// applied (base32, remove padding, downcase) to make visually distinguishing
// identifiers easier.
func UniqueId() string {
return fmt.Sprintf("%s%s", UniqueIdPrefix,
func PrefixedUniqueId(prefix string) string {
return fmt.Sprintf("%s%s", prefix,
strings.ToLower(
strings.Replace(
base32.StdEncoding.EncodeToString(uuidV4()),

View File

@ -9,4 +9,4 @@ DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
cd $DIR
# Push the subtree (force)
git push heroku `git subtree split --prefix website master`:master --force
git push heroku `git subtree split --prefix website HEAD`:master --force

View File

@ -67,7 +67,7 @@ func s3Factory(conf map[string]string) (Client, error) {
awsConfig := &aws.Config{
Credentials: credentialsProvider,
Region: regionName,
Region: aws.String(regionName),
}
nativeClient := s3.New(awsConfig)

View File

@ -43,7 +43,7 @@ func TestS3Factory(t *testing.T) {
s3Client := client.(*S3Client)
if s3Client.nativeClient.Config.Region != "us-west-1" {
if *s3Client.nativeClient.Config.Region != "us-west-1" {
t.Fatalf("Incorrect region was populated")
}
if s3Client.bucketName != "foo" {

View File

@ -78,6 +78,46 @@ func TestContext2Apply_providerAlias(t *testing.T) {
}
}
// GH-2870
func TestContext2Apply_providerWarning(t *testing.T) {
m := testModule(t, "apply-provider-warning")
p := testProvider("aws")
p.ApplyFn = testApplyFn
p.DiffFn = testDiffFn
p.ValidateFn = func(c *ResourceConfig) (ws []string, es []error) {
ws = append(ws, "Just a warning")
return
}
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
})
if _, err := ctx.Plan(); err != nil {
t.Fatalf("err: %s", err)
}
state, err := ctx.Apply()
if err != nil {
t.Fatalf("err: %s", err)
}
actual := strings.TrimSpace(state.String())
expected := strings.TrimSpace(`
aws_instance.foo:
ID = foo
`)
if actual != expected {
t.Fatalf("got: \n%s\n\nexpected:\n%s", actual, expected)
}
if !p.ConfigureCalled {
t.Fatalf("provider Configure() was never called!")
}
}
func TestContext2Apply_emptyModule(t *testing.T) {
m := testModule(t, "apply-empty-module")
p := testProvider("aws")

View File

@ -94,7 +94,8 @@ func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) {
// if we have one, otherwise we just output it.
if err != nil {
if n.Error != nil {
*n.Error = multierror.Append(*n.Error, err)
helpfulErr := fmt.Errorf("%s: %s", n.Info.Id, err.Error())
*n.Error = multierror.Append(*n.Error, helpfulErr)
} else {
return nil, err
}

View File

@ -1,6 +1,7 @@
package terraform
import (
"fmt"
"log"
)
@ -35,7 +36,7 @@ func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) {
// Refresh!
state, err = provider.Refresh(n.Info, state)
if err != nil {
return nil, err
return nil, fmt.Errorf("%s: %s", n.Info.Id, err.Error())
}
// Call post-refresh hook

View File

@ -40,9 +40,8 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode {
},
})
// Apply stuff
seq = append(seq, &EvalOpFilter{
Ops: []walkOperation{walkValidate, walkRefresh, walkPlan, walkApply},
Ops: []walkOperation{walkValidate},
Node: &EvalSequence{
Nodes: []EvalNode{
&EvalGetProvider{
@ -70,6 +69,32 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode {
},
})
// Apply stuff
seq = append(seq, &EvalOpFilter{
Ops: []walkOperation{walkRefresh, walkPlan, walkApply},
Node: &EvalSequence{
Nodes: []EvalNode{
&EvalGetProvider{
Name: n,
Output: &provider,
},
&EvalInterpolate{
Config: config,
Output: &resourceConfig,
},
&EvalBuildProviderConfig{
Provider: n,
Config: &resourceConfig,
Output: &resourceConfig,
},
&EvalSetProviderConfig{
Provider: n,
Config: &resourceConfig,
},
},
},
})
// We configure on everything but validate, since validate may
// not have access to all the variables.
seq = append(seq, &EvalOpFilter{

View File

@ -0,0 +1 @@
resource "aws_instance" "foo" {}

View File

@ -1,7 +1,7 @@
package terraform
// The main version number that is being run at the moment.
const Version = "0.6.1"
const Version = "0.6.2"
// A pre-release marker for the version. If this is "" (empty string)
// then it means that it is a final release. Otherwise, this is a pre-release

View File

@ -57,7 +57,7 @@ resource "aws_elb" "bar" {
The following arguments are supported:
* `name` - (Required) The name of the ELB
* `name` - (Optional) The name of the ELB. By default generated by terraform.
* `availability_zones` - (Required for an EC2-classic ELB) The AZ's to serve traffic in.
* `security_groups` - (Optional) A list of security group IDs to assign to the ELB.
* `subnets` - (Required for a VPC ELB) A list of subnet IDs to attach to the ELB.

View File

@ -32,6 +32,7 @@ The following arguments are supported:
* `private_ips` - (Optional) List of private IPs to assign to the ENI.
* `security_groups` - (Optional) List of security group IDs to assign to the ENI.
* `attachment` - (Required) Block to define the attachment of the ENI. Documented below.
* `source_dest_check` - (Optional) Whether to enable source destination checking for the ENI. Default true.
* `tags` - (Optional) A mapping of tags to assign to the resource.
The `attachment` block supports:
@ -47,5 +48,6 @@ The following attributes are exported:
* `private_ips` - List of private IPs assigned to the ENI.
* `security_groups` - List of security groups attached to the ENI.
* `attachment` - Block defining the attachment of the ENI.
* `source_dest_check` - Whether source destination checking is enabled
* `tags` - Tags assigned to the ENI.

View File

@ -67,3 +67,4 @@ The following attributes are exported:
* `hosted_zone_id` - The [Route 53 Hosted Zone ID](http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region.
* `region` - The AWS region this bucket resides in.
* `website_endpoint` - The website endpoint, if the bucket is configured with a website. If not, this will be an empty string.
* `website_domain` - The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records.

View File

@ -25,10 +25,10 @@ resource "aws_sqs_queue" "terraform_queue" {
The following arguments are supported:
* `name` - (Required) This is the human-readable name of the queue
* `visibility_timeout_seconds` - (Optional) The time in seconds that the delivery of all messages in the queue will be delayed. An integer from 0 to 900 (15 minutes). The default for this attribute is 30 seconds
* `visibility_timeout_seconds` - (Optional) The visibility timeout for the queue. An integer from 0 to 43200 (12 hours). The default for this attribute is 30. For more information about visibility timeout see AWS docs.
* `message_retention_seconds` - (Optional) The number of seconds Amazon SQS retains a message. Integer representing seconds, from 60 (1 minute) to 1209600 (14 days). The default for this attribute is 345600 (4 days).
* `max_message_size` - (Optional) The limit of how many bytes a message can contain before Amazon SQS rejects it. An integer from 1024 bytes (1 KiB) up to 262144 bytes (256 KiB). The default for this attribute is 262144 (256 KiB).
* `delay_seconds` - (Optional) The visibility timeout for the queue. An integer from 0 to 43200 (12 hours). The default for this attribute is 30. For more information about visibility timeout.
* `delay_seconds` - (Optional) The time in seconds that the delivery of all messages in the queue will be delayed. An integer from 0 to 900 (15 minutes). The default for this attribute is 30 seconds.
* `receive_wait_time_seconds` - (Optional) The time for which a ReceiveMessage call will wait for a message to arrive (long polling) before returning. An integer from 0 to 20 (seconds). The default for this attribute is 0, meaning that the call will return immediately.
* `policy` - (Optional) The JSON policy for the SQS queue

View File

@ -72,7 +72,8 @@ The following arguments are supported:
* `storage_service_name` - (Optional) The name of an existing storage account
within the subscription which will be used to store the VHDs of this
instance. Changing this forces a new resource to be created.
instance. Changing this forces a new resource to be created. **A Storage
Service is required if you are using a Platform Image**
* `reverse_dns` - (Optional) The DNS address to which the IP address of the
hosted service resolves when queried using a reverse DNS query. Changing

View File

@ -26,7 +26,7 @@ resource "azure_storage_service" "tfstor" {
The following arguments are supported:
* `name` - (Required) The name of the storage service. Must be between 4 and 24
lowercase-only characters or digits Must be unique on Azure.
lowercase-only characters or digits. Must be unique on Azure.
* `location` - (Required) The location where the storage service should be created.
For a list of all Azure locations, please consult [this link](http://azure.microsoft.com/en-us/regions/).

View File

@ -19,7 +19,7 @@ Use the navigation to the left to read about the available resources.
```
# Configure the Google Cloud provider
provider "google" {
account_file = "account.json"
account_file = "${file("account.json")}"
project = "my-gce-project"
region = "us-central1"
}
@ -34,12 +34,12 @@ resource "google_compute_instance" "default" {
The following keys can be used to configure the provider.
* `account_file` - (Required) Path to the JSON file used to describe your
* `account_file` - (Required) Contents of the JSON file used to describe your
account credentials, downloaded from Google Cloud Console. More details on
retrieving this file are below. The _account file_ can be "" if you
are running terraform from a GCE instance with a properly-configured [Compute
Engine Service Account](https://cloud.google.com/compute/docs/authentication).
This can also be specified with the `GOOGLE_ACCOUNT_FILE` shell environment
retrieving this file are below. The `account file` can be "" if you are running
terraform from a GCE instance with a properly-configured [Compute Engine
Service Account](https://cloud.google.com/compute/docs/authentication). This
can also be specified with the `GOOGLE_ACCOUNT_FILE` shell environment
variable.
* `project` - (Required) The ID of the project to apply any resources to. This

View File

@ -0,0 +1,135 @@
---
layout: "google"
page_title: "Google: google_compute_autoscaler"
sidebar_current: "docs-google-resource-compute-autoscaler"
description: |-
Manages an Autoscaler within GCE.
---
# google\_compute\_autoscaler
A Compute Engine Autoscaler automatically adds or removes virtual machines from
a managed instance group based on increases or decreases in load. This allows
your applications to gracefully handle increases in traffic and reduces cost
when the need for resources is lower. You just define the autoscaling policy and
the autoscaler performs automatic scaling based on the measured load. For more
information, see [the official
documentation](https://cloud.google.com/compute/docs/autoscaler/) and
[API](https://cloud.google.com/compute/docs/autoscaler/v1beta2/autoscalers)
## Example Usage
```
resource "google_compute_instance_template" "foobar" {
name = "foobar"
machine_type = "n1-standard-1"
can_ip_forward = false
tags = ["foo", "bar"]
disk {
source_image = "debian-cloud/debian-7-wheezy-v20140814"
}
network_interface {
network = "default"
}
metadata {
foo = "bar"
}
service_account {
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
}
}
resource "google_compute_target_pool" "foobar" {
name = "foobar"
}
resource "google_compute_instance_group_manager" "foobar" {
name = "foobar"
instance_template = "${google_compute_instance_template.foobar.self_link}"
target_pools = ["${google_compute_target_pool.foobar.self_link}"]
base_instance_name = "foobar"
zone = "us-central1-f"
}
resource "google_compute_autoscaler" "foobar" {
name = "foobar"
zone = "us-central1-f"
target = "${google_compute_instance_group_manager.foobar.self_link}"
autoscaling_policy = {
max_replicas = 5
min_replicas = 1
cooldown_period = 60
cpu_utilization = {
target = 0.5
}
}
}
```
## Argument Refernce
The following arguments are supported:
* `description` - (Optional) An optional textual description of the instance
group manager.
* `target` - (Required) The full URL to the instance group manager whose size we
control.
* `autoscaling_policy.` - (Required) The parameters of the autoscaling
algorithm. Structure is documented below.
* `zone` - (Required) The zone of the target.
The `autoscaling_policy` block contains:
* `max_replicas` - (Required) The group will never be larger than this.
* `min_replicas` - (Required) The group will never be smaller than this.
* `cooldown_period` - (Optional) Period to wait between changes. This should be
at least double the time your instances take to start up.
* `cpu_utilization` - (Optional) A policy that scales when the cluster's average
CPU is above or below a given threshold. Structure is documented below.
* `metric` - (Optional) A policy that scales according to Google Cloud
Monitoring metrics Structure is documented below.
* `load_balancing_utilization` - (Optional) A policy that scales when the load
reaches a proportion of a limit defined in the HTTP load balancer. Structure
is documented below.
The `cpu_utilization` block contains:
* `target` - The floating point threshold where CPU utilization should be. E.g.
for 50% one would specify 0.5.
The `metric` block contains (more documentation
[here](https://cloud.google.com/monitoring/api/metrics)):
* `name` - The name of the Google Cloud Monitoring metric to follow, e.g.
compute.googleapis.com/instance/network/received_bytes_count
* `type` - Either "cumulative", "delta", or "gauge".
* `target` - The desired metric value per instance. Must be a positive value.
The `load_balancing_utilization` block contains:
* `target` - The floating point threshold where load balancing utilization
should be. E.g. if the load balancer's `maxRatePerInstance` is 10 requests
per second (RPS) then setting this to 0.5 would cause the group to be scaled
such that each instance receives 5 RPS.
## Attributes Reference
The following attributes are exported:
* `self_link` - The URL of the created resource.

View File

@ -0,0 +1,65 @@
---
layout: "google"
page_title: "Google: google_compute_instance_group_manager"
sidebar_current: "docs-google-resource-compute-instance_group_manager"
description: |-
Manages an Instance Group within GCE.
---
# google\_compute\_instance\_group\_manager
The Google Compute Engine Instance Group Manager API creates and manages pools
of homogeneous Compute Engine virtual machine instances from a common instance
template. For more information, see [the official documentation](https://cloud.google.com/compute/docs/instance-groups/manager
and [API](https://cloud.google.com/compute/docs/instance-groups/manager/v1beta2/instanceGroupManagers)
## Example Usage
```
resource "google_compute_instance_group_manager" "foobar" {
description = "Terraform test instance group manager"
name = "terraform-test"
instance_template = "${google_compute_instance_template.foobar.self_link}"
target_pools = ["${google_compute_target_pool.foobar.self_link}"]
base_instance_name = "foobar"
zone = "us-central1-a"
target_size = 2
}
```
## Argument Refernce
The following arguments are supported:
* `base_instance_name` - (Required) The base instance name to use for
instances in this group. The value must be a valid [RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name.
Supported characters are lowercase letters, numbers, and hyphens (-). Instances
are named by appending a hyphen and a random four-character string to the base
instance name.
* `description` - (Optional) An optional textual description of the instance
group manager.
* `instance_template` - (Required) The full URL to an instance template from
which all new instances will be created.
* `name` - (Required) The name of the instance group manager. Must be 1-63
characters long and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt).
Supported characters include lowercase letters, numbers, and hyphens.
* `target_size` - (Optional) If not given at creation time, this defaults to 1. Do not specify this
if you are managing the group with an autoscaler, as this will cause fighting.
* `target_pools` - (Required) The full URL of all target pools to which new
instances in the group are added. Updating the target pool values does not
affect existing instances.
* `zone` - (Required) The zone that instances in this group should be created in.
## Attributes Reference
The following attributes are exported:
* `instance_group` - The full URL of the instance group created by the manager.
* `self_link` - The URL of the created resource.

View File

@ -32,6 +32,24 @@
ga('send', 'pageview');
</script>
<script type="text/javascript">
adroll_adv_id = "6QAAFJDIWBG3DJBDRJ7BEX";
adroll_pix_id = "PYT5HSNKNRDS7LMUR5B6YG";
(function () {
var oldonload = window.onload;
window.onload = function(){
__adroll_loaded=true;
var scr = document.createElement("script");
var host = (("https:" == document.location.protocol) ? "https://s.adroll.com" : "http://a.adroll.com");
scr.setAttribute('async', 'true');
scr.type = "text/javascript";
scr.src = host + "/j/roundtrip.js";
((document.getElementsByTagName('head') || [null])[0] ||
document.getElementsByTagName('script')[0].parentNode).appendChild(scr);
if(oldonload){oldonload()}};
}());
</script>
<%= javascript_include_tag "application" %>
</body>

View File

@ -10,12 +10,39 @@
<a href="/docs/providers/aws/index.html">AWS Provider</a>
</li>
<li<%= sidebar_current(/^docs-aws-resource/) %>>
<a href="#">Resources</a>
<li<%= sidebar_current(/^docs-aws-resource-cloudwatch/) %>>
<a href="#">CloudWatch Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-aws-app-cookie-stickiness-policy") %>>
<li<%= sidebar_current("docs-aws-resource-cloudwatch-metric-alarm") %>>
<a href="/docs/providers/aws/r/cloudwatch_metric_alarm.html">aws_cloudwatch_metric_alarm</a>
</li>
</ul>
</li>
<li<%= sidebar_current(/^docs-aws-resource-dynamodb/) %>>
<a href="#">DynamoDB Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-aws-resource-dynamodb-table") %>>
<a href="/docs/providers/aws/r/dynamodb_table.html">aws_dynamodb_table</a>
</li>
</ul>
</li>
<li<%= sidebar_current(/^docs-aws-resource-(app|autoscaling|ebs|elb|eip|instance|launch|lb|proxy|spot|volume)/) %>>
<a href="#">EC2 Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-aws-resource-app-cookie-stickiness-policy") %>>
<a href="/docs/providers/aws/r/app_cookie_stickiness_policy.html">aws_app_cookie_stickiness_policy</a>
</li>
<li<%= sidebar_current("docs-aws-resource-autoscaling-group") %>>
<a href="/docs/providers/aws/r/autoscaling_group.html">aws_autoscaling_group</a>
</li>
@ -28,38 +55,51 @@
<a href="/docs/providers/aws/r/autoscaling_policy.html">aws_autoscaling_policy</a>
</li>
<li<%= sidebar_current("docs-aws-resource-cloudwatch-metric-alarm") %>>
<a href="/docs/providers/aws/r/cloudwatch_metric_alarm.html">aws_cloudwatch_metric_alarm</a>
</li>
<li<%= sidebar_current("docs-aws-resource-customer-gateway") %>>
<a href="/docs/providers/aws/r/customer_gateway.html">aws_customer_gateway</a>
</li>
<li<%= sidebar_current("docs-aws-resource-db-instance") %>>
<a href="/docs/providers/aws/r/db_instance.html">aws_db_instance</a>
</li>
<li<%= sidebar_current("docs-aws-resource-db-parameter-group") %>>
<a href="/docs/providers/aws/r/db_parameter_group.html">aws_db_parameter_group</a>
</li>
<li<%= sidebar_current("docs-aws-resource-db-security-group") %>>
<a href="/docs/providers/aws/r/db_security_group.html">aws_db_security_group</a>
</li>
<li<%= sidebar_current("docs-aws-resource-db-subnet-group") %>>
<a href="/docs/providers/aws/r/db_subnet_group.html">aws_db_subnet_group</a>
</li>
<li<%= sidebar_current("docs-aws-resource-dynamodb-table") %>>
<a href="/docs/providers/aws/r/dynamodb_table.html">aws_dynamodb_table</a>
</li>
<li<%= sidebar_current("docs-aws-resource-ebs-volume") %>>
<a href="/docs/providers/aws/r/ebs_volume.html">aws_ebs_volume</a>
</li>
<li<%= sidebar_current("docs-aws-resource-eip") %>>
<a href="/docs/providers/aws/r/eip.html">aws_eip</a>
</li>
<li<%= sidebar_current("docs-aws-resource-elb") %>>
<a href="/docs/providers/aws/r/elb.html">aws_elb</a>
</li>
<li<%= sidebar_current("docs-aws-resource-instance") %>>
<a href="/docs/providers/aws/r/instance.html">aws_instance</a>
</li>
<li<%= sidebar_current("docs-aws-resource-launch-configuration") %>>
<a href="/docs/providers/aws/r/launch_configuration.html">aws_launch_configuration</a>
</li>
<li<%= sidebar_current("docs-aws-resource-lb-cookie-stickiness-policy") %>>
<a href="/docs/providers/aws/r/lb_cookie_stickiness_policy.html">aws_lb_cookie_stickiness_policy</a>
</li>
<li<%= sidebar_current("docs-aws-resource-proxy-protocol-policy") %>>
<a href="/docs/providers/aws/r/proxy_protocol_policy.html">aws_proxy_protocol_policy</a>
</li>
<li<%= sidebar_current("docs-aws-resource-spot-instance-request") %>>
<a href="/docs/providers/aws/r/spot_instance_request.html">aws_spot_instance_request</a>
</li>
<li<%= sidebar_current("docs-aws-resource-volume-attachment") %>>
<a href="/docs/providers/aws/r/volume_attachment.html">aws_volume_attachment</a>
</li>
</ul>
</li>
<li<%= sidebar_current(/^docs-aws-resource-ecs/) %>>
<a href="#">ECS Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-aws-resource-ecs-cluster") %>>
<a href="/docs/providers/aws/r/ecs_cluster.html">aws_ecs_cluster</a>
</li>
@ -72,10 +112,14 @@
<a href="/docs/providers/aws/r/ecs_task_definition.html">aws_ecs_task_definition</a>
</li>
<li<%= sidebar_current("docs-aws-resource-eip") %>>
<a href="/docs/providers/aws/r/eip.html">aws_eip</a>
</ul>
</li>
<li<%= sidebar_current(/^docs-aws-resource-elasticache/) %>>
<a href="#">ElastiCache Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-aws-resource-elasticache-cluster") %>>
<a href="/docs/providers/aws/r/elasticache_cluster.html">aws_elasticache_cluster</a>
</li>
@ -92,13 +136,13 @@
<a href="/docs/providers/aws/r/elasticache_subnet_group.html">aws_elasticache_subnet_group</a>
</li>
<li<%= sidebar_current("docs-aws-resource-elb") %>>
<a href="/docs/providers/aws/r/elb.html">aws_elb</a>
</ul>
</li>
<li<%= sidebar_current("docs-aws-resource-flow-log") %>>
<a href="/docs/providers/aws/r/flow_log.html">aws_flow_log</a>
</li>
<li<%= sidebar_current(/^docs-aws-resource-iam/) %>>
<a href="#">IAM Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-aws-resource-iam-access-key") %>>
<a href="/docs/providers/aws/r/iam_access_key.html">aws_iam_access_key</a>
@ -147,50 +191,62 @@
<a href="/docs/providers/aws/r/iam_user_policy.html">aws_iam_user_policy</a>
</li>
<li<%= sidebar_current("docs-aws-resource-instance") %>>
<a href="/docs/providers/aws/r/instance.html">aws_instance</a>
</ul>
</li>
<li<%= sidebar_current("docs-aws-resource-internet-gateway") %>>
<a href="/docs/providers/aws/r/internet_gateway.html">aws_internet_gateway</a>
</li>
<li<%= sidebar_current("docs-aws-resource-key-pair") %>>
<a href="/docs/providers/aws/r/key_pair.html">aws_key_pair</a>
</li>
<li<%= sidebar_current(/^docs-aws-resource-kinesis/) %>>
<a href="#">Kinesis Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-aws-resource-kinesis-stream") %>>
<a href="/docs/providers/aws/r/kinesis_stream.html">aws_kinesis_stream</a>
</li>
</ul>
</li>
<li<%= sidebar_current(/^docs-aws-resource-lambda/) %>>
<a href="#">Lambda Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-aws-resource-lambda-function") %>>
<a href="/docs/providers/aws/r/lambda_function.html">aws_lambda_function</a>
</li>
<li<%= sidebar_current("docs-aws-resource-launch-configuration") %>>
<a href="/docs/providers/aws/r/launch_configuration.html">aws_launch_configuration</a>
</ul>
</li>
<li<%= sidebar_current("docs-aws-resource-lb-cookie-stickiness-policy") %>>
<a href="/docs/providers/aws/r/lb_cookie_stickiness_policy.html">aws_lb_cookie_stickiness_policy</a>
<li<%= sidebar_current(/^docs-aws-resource-db/) %>>
<a href="#">RDS Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-aws-resource-db-instance") %>>
<a href="/docs/providers/aws/r/db_instance.html">aws_db_instance</a>
</li>
<li<%= sidebar_current("docs-aws-resource-main-route-table-assoc") %>>
<a href="/docs/providers/aws/r/main_route_table_assoc.html">aws_main_route_table_association</a>
<li<%= sidebar_current("docs-aws-resource-db-parameter-group") %>>
<a href="/docs/providers/aws/r/db_parameter_group.html">aws_db_parameter_group</a>
</li>
<li<%= sidebar_current("docs-aws-resource-network-acl") %>>
<a href="/docs/providers/aws/r/network_acl.html">aws_network_acl</a>
<li<%= sidebar_current("docs-aws-resource-db-security-group") %>>
<a href="/docs/providers/aws/r/db_security_group.html">aws_db_security_group</a>
</li>
<li<%= sidebar_current("docs-aws-resource-network-interface") %>>
<a href="/docs/providers/aws/r/network_interface.html">aws_network_interface</a>
<li<%= sidebar_current("docs-aws-resource-db-subnet-group") %>>
<a href="/docs/providers/aws/r/db_subnet_group.html">aws_db_subnet_group</a>
</li>
<li<%= sidebar_current("docs-aws-resource-proxy-protocol-policy") %>>
<a href="/docs/providers/aws/r/proxy_protocol_policy.html">aws_proxy_protocol_policy</a>
</ul>
</li>
<li<%= sidebar_current(/^docs-aws-resource-route53/) %>>
<a href="#">Route53 Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-aws-resource-route53-delegation-set") %>>
<a href="/docs/providers/aws/r/route53_delegation_set.html">aws_route53_delegation_set</a>
</li>
@ -211,6 +267,82 @@
<a href="/docs/providers/aws/r/route53_zone_association.html">aws_route53_zone_association</a>
</li>
</ul>
</li>
<li<%= sidebar_current(/^docs-aws-resource-s3/) %>>
<a href="#">S3 Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-aws-resource-s3-bucket") %>>
<a href="/docs/providers/aws/r/s3_bucket.html">aws_s3_bucket</a>
</li>
</ul>
</li>
<li<%= sidebar_current(/^docs-aws-resource-sns/) %>>
<a href="#">SNS Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-aws-resource-sns-topic") %>>
<a href="/docs/providers/aws/r/sns_topic.html">aws_sns_topic</a>
</li>
<li<%= sidebar_current("docs-aws-resource-sns-topic-subscription") %>>
<a href="/docs/providers/aws/r/sns_topic_subscription.html">aws_sns_topic_subscription</a>
</li>
</ul>
</li>
<li<%= sidebar_current(/^docs-aws-resource-sqs/) %>>
<a href="#">SQS Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-aws-resource-sqs-queue") %>>
<a href="/docs/providers/aws/r/sqs_queue.html">aws_sqs_queue</a>
</li>
</ul>
</li>
<li<%= sidebar_current(/^docs-aws-resource-(customer|flow|internet-gateway|key-pair|main-route|network|route-|security-group|subnet|vpc|vpn)/) %>>
<a href="#">VPC Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-aws-resource-customer-gateway") %>>
<a href="/docs/providers/aws/r/customer_gateway.html">aws_customer_gateway</a>
</li>
<li<%= sidebar_current("docs-aws-resource-flow-log") %>>
<a href="/docs/providers/aws/r/flow_log.html">aws_flow_log</a>
</li>
<li<%= sidebar_current("docs-aws-resource-internet-gateway") %>>
<a href="/docs/providers/aws/r/internet_gateway.html">aws_internet_gateway</a>
</li>
<li<%= sidebar_current("docs-aws-resource-key-pair") %>>
<a href="/docs/providers/aws/r/key_pair.html">aws_key_pair</a>
</li>
<li<%= sidebar_current("docs-aws-resource-main-route-table-assoc") %>>
<a href="/docs/providers/aws/r/main_route_table_assoc.html">aws_main_route_table_association</a>
</li>
<li<%= sidebar_current("docs-aws-resource-network-acl") %>>
<a href="/docs/providers/aws/r/network_acl.html">aws_network_acl</a>
</li>
<li<%= sidebar_current("docs-aws-resource-network-interface") %>>
<a href="/docs/providers/aws/r/network_interface.html">aws_network_interface</a>
</li>
<li<%= sidebar_current("docs-aws-resource-route-table|") %>>
<a href="/docs/providers/aws/r/route_table.html">aws_route_table</a>
</li>
@ -219,10 +351,6 @@
<a href="/docs/providers/aws/r/route_table_association.html">aws_route_table_association</a>
</li>
<li<%= sidebar_current("docs-aws-resource-s3-bucket") %>>
<a href="/docs/providers/aws/r/s3_bucket.html">aws_s3_bucket</a>
</li>
<li<%= sidebar_current("docs-aws-resource-security-group") %>>
<a href="/docs/providers/aws/r/security_group.html">aws_security_group</a>
</li>
@ -231,30 +359,10 @@
<a href="/docs/providers/aws/r/security_group_rule.html">aws_security_group_rule</a>
</li>
<li<%= sidebar_current("docs-aws-resource-sns-topic") %>>
<a href="/docs/providers/aws/r/sns_topic.html">aws_sns_topic</a>
</li>
<li<%= sidebar_current("docs-aws-resource-sns-topic-subscription") %>>
<a href="/docs/providers/aws/r/sns_topic_subscription.html">aws_sns_topic_subscription</a>
</li>
<li<%= sidebar_current("docs-aws-resource-spot-instance-request") %>>
<a href="/docs/providers/aws/r/spot_instance_request.html">aws_spot_instance_request</a>
</li>
<li<%= sidebar_current("docs-aws-resource-sqs-queue") %>>
<a href="/docs/providers/aws/r/sqs_queue.html">aws_sqs_queue</a>
</li>
<li<%= sidebar_current("docs-aws-resource-subnet") %>>
<a href="/docs/providers/aws/r/subnet.html">aws_subnet</a>
</li>
<li<%= sidebar_current("docs-aws-resource-volume-attachment") %>>
<a href="/docs/providers/aws/r/volume_attachment.html">aws_volume_attachment</a>
</li>
<li<%= sidebar_current("docs-aws-resource-vpc") %>>
<a href="/docs/providers/aws/r/vpc.html">aws_vpc</a>
</li>
@ -286,8 +394,10 @@
<li<%= sidebar_current("docs-aws-resource-vpn-gateway") %>>
<a href="/docs/providers/aws/r/vpn_gateway.html">aws_vpn_gateway</a>
</li>
</ul>
</li>
</ul>
</div>
<% end %>

Some files were not shown because too many files have changed in this diff Show More