Merge pull request #2874 from hashicorp/b-aws-upstream-fixes

provider/aws: Update source to comply with upstream breaking change
This commit is contained in:
Clint 2015-07-28 15:59:16 -05:00
commit 7de7a406f3
46 changed files with 236 additions and 247 deletions

View File

@ -122,7 +122,7 @@ func autoscalingTagsFromMap(m map[string]interface{}, resourceID string) []*auto
result = append(result, &autoscaling.Tag{ result = append(result, &autoscaling.Tag{
Key: aws.String(k), Key: aws.String(k),
Value: aws.String(attr["value"].(string)), Value: aws.String(attr["value"].(string)),
PropagateAtLaunch: aws.Boolean(attr["propagate_at_launch"].(bool)), PropagateAtLaunch: aws.Bool(attr["propagate_at_launch"].(bool)),
ResourceID: aws.String(resourceID), ResourceID: aws.String(resourceID),
ResourceType: aws.String("auto-scaling-group"), ResourceType: aws.String("auto-scaling-group"),
}) })

View File

@ -82,8 +82,8 @@ func (c *Config) Client() (interface{}, error) {
creds := credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token) creds := credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token)
awsConfig := &aws.Config{ awsConfig := &aws.Config{
Credentials: creds, Credentials: creds,
Region: c.Region, Region: aws.String(c.Region),
MaxRetries: c.MaxRetries, MaxRetries: aws.Int(c.MaxRetries),
} }
log.Println("[INFO] Initializing IAM Connection") log.Println("[INFO] Initializing IAM Connection")
@ -135,8 +135,8 @@ func (c *Config) Client() (interface{}, error) {
log.Println("[INFO] Initializing Route 53 connection") log.Println("[INFO] Initializing Route 53 connection")
client.r53conn = route53.New(&aws.Config{ client.r53conn = route53.New(&aws.Config{
Credentials: creds, Credentials: creds,
Region: "us-east-1", Region: aws.String("us-east-1"),
MaxRetries: c.MaxRetries, MaxRetries: aws.Int(c.MaxRetries),
}) })
log.Println("[INFO] Initializing Elasticache Connection") log.Println("[INFO] Initializing Elasticache Connection")

View File

@ -26,12 +26,12 @@ func expandNetworkAclEntries(configured []interface{}, entryType string) ([]*ec2
e := &ec2.NetworkACLEntry{ e := &ec2.NetworkACLEntry{
Protocol: aws.String(strconv.Itoa(p)), Protocol: aws.String(strconv.Itoa(p)),
PortRange: &ec2.PortRange{ PortRange: &ec2.PortRange{
From: aws.Long(int64(data["from_port"].(int))), From: aws.Int64(int64(data["from_port"].(int))),
To: aws.Long(int64(data["to_port"].(int))), To: aws.Int64(int64(data["to_port"].(int))),
}, },
Egress: aws.Boolean((entryType == "egress")), Egress: aws.Bool((entryType == "egress")),
RuleAction: aws.String(data["action"].(string)), RuleAction: aws.String(data["action"].(string)),
RuleNumber: aws.Long(int64(data["rule_no"].(int))), RuleNumber: aws.Int64(int64(data["rule_no"].(int))),
CIDRBlock: aws.String(data["cidr_block"].(string)), CIDRBlock: aws.String(data["cidr_block"].(string)),
} }
@ -39,10 +39,10 @@ func expandNetworkAclEntries(configured []interface{}, entryType string) ([]*ec2
if p == 1 { if p == 1 {
e.ICMPTypeCode = &ec2.ICMPTypeCode{} e.ICMPTypeCode = &ec2.ICMPTypeCode{}
if v, ok := data["icmp_code"]; ok { if v, ok := data["icmp_code"]; ok {
e.ICMPTypeCode.Code = aws.Long(int64(v.(int))) e.ICMPTypeCode.Code = aws.Int64(int64(v.(int)))
} }
if v, ok := data["icmp_type"]; ok { if v, ok := data["icmp_type"]; ok {
e.ICMPTypeCode.Type = aws.Long(int64(v.(int))) e.ICMPTypeCode.Type = aws.Int64(int64(v.(int)))
} }
} }

View File

@ -41,35 +41,35 @@ func Test_expandNetworkACLEntry(t *testing.T) {
&ec2.NetworkACLEntry{ &ec2.NetworkACLEntry{
Protocol: aws.String("6"), Protocol: aws.String("6"),
PortRange: &ec2.PortRange{ PortRange: &ec2.PortRange{
From: aws.Long(22), From: aws.Int64(22),
To: aws.Long(22), To: aws.Int64(22),
}, },
RuleAction: aws.String("deny"), RuleAction: aws.String("deny"),
RuleNumber: aws.Long(1), RuleNumber: aws.Int64(1),
CIDRBlock: aws.String("0.0.0.0/0"), CIDRBlock: aws.String("0.0.0.0/0"),
Egress: aws.Boolean(true), Egress: aws.Bool(true),
}, },
&ec2.NetworkACLEntry{ &ec2.NetworkACLEntry{
Protocol: aws.String("6"), Protocol: aws.String("6"),
PortRange: &ec2.PortRange{ PortRange: &ec2.PortRange{
From: aws.Long(443), From: aws.Int64(443),
To: aws.Long(443), To: aws.Int64(443),
}, },
RuleAction: aws.String("deny"), RuleAction: aws.String("deny"),
RuleNumber: aws.Long(2), RuleNumber: aws.Int64(2),
CIDRBlock: aws.String("0.0.0.0/0"), CIDRBlock: aws.String("0.0.0.0/0"),
Egress: aws.Boolean(true), Egress: aws.Bool(true),
}, },
&ec2.NetworkACLEntry{ &ec2.NetworkACLEntry{
Protocol: aws.String("-1"), Protocol: aws.String("-1"),
PortRange: &ec2.PortRange{ PortRange: &ec2.PortRange{
From: aws.Long(443), From: aws.Int64(443),
To: aws.Long(443), To: aws.Int64(443),
}, },
RuleAction: aws.String("deny"), RuleAction: aws.String("deny"),
RuleNumber: aws.Long(2), RuleNumber: aws.Int64(2),
CIDRBlock: aws.String("0.0.0.0/0"), CIDRBlock: aws.String("0.0.0.0/0"),
Egress: aws.Boolean(true), Egress: aws.Bool(true),
}, },
} }
@ -88,21 +88,21 @@ func Test_flattenNetworkACLEntry(t *testing.T) {
&ec2.NetworkACLEntry{ &ec2.NetworkACLEntry{
Protocol: aws.String("tcp"), Protocol: aws.String("tcp"),
PortRange: &ec2.PortRange{ PortRange: &ec2.PortRange{
From: aws.Long(22), From: aws.Int64(22),
To: aws.Long(22), To: aws.Int64(22),
}, },
RuleAction: aws.String("deny"), RuleAction: aws.String("deny"),
RuleNumber: aws.Long(1), RuleNumber: aws.Int64(1),
CIDRBlock: aws.String("0.0.0.0/0"), CIDRBlock: aws.String("0.0.0.0/0"),
}, },
&ec2.NetworkACLEntry{ &ec2.NetworkACLEntry{
Protocol: aws.String("tcp"), Protocol: aws.String("tcp"),
PortRange: &ec2.PortRange{ PortRange: &ec2.PortRange{
From: aws.Long(443), From: aws.Int64(443),
To: aws.Long(443), To: aws.Int64(443),
}, },
RuleAction: aws.String("deny"), RuleAction: aws.String("deny"),
RuleNumber: aws.Long(2), RuleNumber: aws.Int64(2),
CIDRBlock: aws.String("0.0.0.0/0"), CIDRBlock: aws.String("0.0.0.0/0"),
}, },
} }

View File

@ -64,7 +64,7 @@ func resourceAwsAppCookieStickinessPolicyCreate(d *schema.ResourceData, meta int
setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{
LoadBalancerName: aws.String(d.Get("load_balancer").(string)), LoadBalancerName: aws.String(d.Get("load_balancer").(string)),
LoadBalancerPort: aws.Long(int64(d.Get("lb_port").(int))), LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))),
PolicyNames: []*string{aws.String(d.Get("name").(string))}, PolicyNames: []*string{aws.String(d.Get("name").(string))},
} }
@ -129,7 +129,7 @@ func resourceAwsAppCookieStickinessPolicyDelete(d *schema.ResourceData, meta int
// policy itself. // policy itself.
setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{
LoadBalancerName: aws.String(d.Get("load_balancer").(string)), LoadBalancerName: aws.String(d.Get("load_balancer").(string)),
LoadBalancerPort: aws.Long(int64(d.Get("lb_port").(int))), LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))),
PolicyNames: []*string{}, PolicyNames: []*string{},
} }

View File

@ -131,8 +131,8 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{})
var autoScalingGroupOpts autoscaling.CreateAutoScalingGroupInput var autoScalingGroupOpts autoscaling.CreateAutoScalingGroupInput
autoScalingGroupOpts.AutoScalingGroupName = aws.String(d.Get("name").(string)) autoScalingGroupOpts.AutoScalingGroupName = aws.String(d.Get("name").(string))
autoScalingGroupOpts.LaunchConfigurationName = aws.String(d.Get("launch_configuration").(string)) autoScalingGroupOpts.LaunchConfigurationName = aws.String(d.Get("launch_configuration").(string))
autoScalingGroupOpts.MinSize = aws.Long(int64(d.Get("min_size").(int))) autoScalingGroupOpts.MinSize = aws.Int64(int64(d.Get("min_size").(int)))
autoScalingGroupOpts.MaxSize = aws.Long(int64(d.Get("max_size").(int))) autoScalingGroupOpts.MaxSize = aws.Int64(int64(d.Get("max_size").(int)))
// Availability Zones are optional if VPC Zone Identifer(s) are specified // Availability Zones are optional if VPC Zone Identifer(s) are specified
if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 { if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 {
@ -145,7 +145,7 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{})
} }
if v, ok := d.GetOk("default_cooldown"); ok { if v, ok := d.GetOk("default_cooldown"); ok {
autoScalingGroupOpts.DefaultCooldown = aws.Long(int64(v.(int))) autoScalingGroupOpts.DefaultCooldown = aws.Int64(int64(v.(int)))
} }
if v, ok := d.GetOk("health_check_type"); ok && v.(string) != "" { if v, ok := d.GetOk("health_check_type"); ok && v.(string) != "" {
@ -153,11 +153,11 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{})
} }
if v, ok := d.GetOk("desired_capacity"); ok { if v, ok := d.GetOk("desired_capacity"); ok {
autoScalingGroupOpts.DesiredCapacity = aws.Long(int64(v.(int))) autoScalingGroupOpts.DesiredCapacity = aws.Int64(int64(v.(int)))
} }
if v, ok := d.GetOk("health_check_grace_period"); ok { if v, ok := d.GetOk("health_check_grace_period"); ok {
autoScalingGroupOpts.HealthCheckGracePeriod = aws.Long(int64(v.(int))) autoScalingGroupOpts.HealthCheckGracePeriod = aws.Int64(int64(v.(int)))
} }
if v, ok := d.GetOk("load_balancers"); ok && v.(*schema.Set).Len() > 0 { if v, ok := d.GetOk("load_balancers"); ok && v.(*schema.Set).Len() > 0 {
@ -224,11 +224,11 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{})
} }
if d.HasChange("default_cooldown") { if d.HasChange("default_cooldown") {
opts.DefaultCooldown = aws.Long(int64(d.Get("default_cooldown").(int))) opts.DefaultCooldown = aws.Int64(int64(d.Get("default_cooldown").(int)))
} }
if d.HasChange("desired_capacity") { if d.HasChange("desired_capacity") {
opts.DesiredCapacity = aws.Long(int64(d.Get("desired_capacity").(int))) opts.DesiredCapacity = aws.Int64(int64(d.Get("desired_capacity").(int)))
} }
if d.HasChange("launch_configuration") { if d.HasChange("launch_configuration") {
@ -236,19 +236,19 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{})
} }
if d.HasChange("min_size") { if d.HasChange("min_size") {
opts.MinSize = aws.Long(int64(d.Get("min_size").(int))) opts.MinSize = aws.Int64(int64(d.Get("min_size").(int)))
} }
if d.HasChange("max_size") { if d.HasChange("max_size") {
opts.MaxSize = aws.Long(int64(d.Get("max_size").(int))) opts.MaxSize = aws.Int64(int64(d.Get("max_size").(int)))
} }
if d.HasChange("health_check_grace_period") { if d.HasChange("health_check_grace_period") {
opts.HealthCheckGracePeriod = aws.Long(int64(d.Get("health_check_grace_period").(int))) opts.HealthCheckGracePeriod = aws.Int64(int64(d.Get("health_check_grace_period").(int)))
} }
if d.HasChange("health_check_type") { if d.HasChange("health_check_type") {
opts.HealthCheckGracePeriod = aws.Long(int64(d.Get("health_check_grace_period").(int))) opts.HealthCheckGracePeriod = aws.Int64(int64(d.Get("health_check_grace_period").(int)))
opts.HealthCheckType = aws.String(d.Get("health_check_type").(string)) opts.HealthCheckType = aws.String(d.Get("health_check_type").(string))
} }
@ -342,7 +342,7 @@ func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{})
// and then delete the group. This bypasses that and leaves // and then delete the group. This bypasses that and leaves
// resources potentially dangling. // resources potentially dangling.
if d.Get("force_delete").(bool) { if d.Get("force_delete").(bool) {
deleteopts.ForceDelete = aws.Boolean(true) deleteopts.ForceDelete = aws.Bool(true)
} }
// We retry the delete operation to handle InUse/InProgress errors coming // We retry the delete operation to handle InUse/InProgress errors coming
@ -418,9 +418,9 @@ func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{})
log.Printf("[DEBUG] Reducing autoscaling group capacity to zero") log.Printf("[DEBUG] Reducing autoscaling group capacity to zero")
opts := autoscaling.UpdateAutoScalingGroupInput{ opts := autoscaling.UpdateAutoScalingGroupInput{
AutoScalingGroupName: aws.String(d.Id()), AutoScalingGroupName: aws.String(d.Id()),
DesiredCapacity: aws.Long(0), DesiredCapacity: aws.Int64(0),
MinSize: aws.Long(0), MinSize: aws.Int64(0),
MaxSize: aws.Long(0), MaxSize: aws.Int64(0),
} }
if _, err := conn.UpdateAutoScalingGroup(&opts); err != nil { if _, err := conn.UpdateAutoScalingGroup(&opts); err != nil {
return fmt.Errorf("Error setting capacity to zero to drain: %s", err) return fmt.Errorf("Error setting capacity to zero to drain: %s", err)

View File

@ -217,7 +217,7 @@ func testAccCheckAWSAutoScalingGroupAttributes(group *autoscaling.Group) resourc
t := &autoscaling.TagDescription{ t := &autoscaling.TagDescription{
Key: aws.String("Foo"), Key: aws.String("Foo"),
Value: aws.String("foo-bar"), Value: aws.String("foo-bar"),
PropagateAtLaunch: aws.Boolean(true), PropagateAtLaunch: aws.Bool(true),
ResourceType: aws.String("auto-scaling-group"), ResourceType: aws.String("auto-scaling-group"),
ResourceID: group.AutoScalingGroupName, ResourceID: group.AutoScalingGroupName,
} }

View File

@ -140,15 +140,15 @@ func getAwsAutoscalingPutScalingPolicyInput(d *schema.ResourceData) autoscaling.
} }
if v, ok := d.GetOk("cooldown"); ok { if v, ok := d.GetOk("cooldown"); ok {
params.Cooldown = aws.Long(int64(v.(int))) params.Cooldown = aws.Int64(int64(v.(int)))
} }
if v, ok := d.GetOk("scaling_adjustment"); ok { if v, ok := d.GetOk("scaling_adjustment"); ok {
params.ScalingAdjustment = aws.Long(int64(v.(int))) params.ScalingAdjustment = aws.Int64(int64(v.(int)))
} }
if v, ok := d.GetOk("min_adjustment_step"); ok { if v, ok := d.GetOk("min_adjustment_step"); ok {
params.MinAdjustmentStep = aws.Long(int64(v.(int))) params.MinAdjustmentStep = aws.Int64(int64(v.(int)))
} }
return params return params

View File

@ -197,16 +197,16 @@ func getAwsCloudWatchPutMetricAlarmInput(d *schema.ResourceData) cloudwatch.PutM
params := cloudwatch.PutMetricAlarmInput{ params := cloudwatch.PutMetricAlarmInput{
AlarmName: aws.String(d.Get("alarm_name").(string)), AlarmName: aws.String(d.Get("alarm_name").(string)),
ComparisonOperator: aws.String(d.Get("comparison_operator").(string)), ComparisonOperator: aws.String(d.Get("comparison_operator").(string)),
EvaluationPeriods: aws.Long(int64(d.Get("evaluation_periods").(int))), EvaluationPeriods: aws.Int64(int64(d.Get("evaluation_periods").(int))),
MetricName: aws.String(d.Get("metric_name").(string)), MetricName: aws.String(d.Get("metric_name").(string)),
Namespace: aws.String(d.Get("namespace").(string)), Namespace: aws.String(d.Get("namespace").(string)),
Period: aws.Long(int64(d.Get("period").(int))), Period: aws.Int64(int64(d.Get("period").(int))),
Statistic: aws.String(d.Get("statistic").(string)), Statistic: aws.String(d.Get("statistic").(string)),
Threshold: aws.Double(d.Get("threshold").(float64)), Threshold: aws.Float64(d.Get("threshold").(float64)),
} }
if v := d.Get("actions_enabled"); v != nil { if v := d.Get("actions_enabled"); v != nil {
params.ActionsEnabled = aws.Boolean(v.(bool)) params.ActionsEnabled = aws.Bool(v.(bool))
} }
if v, ok := d.GetOk("alarm_description"); ok { if v, ok := d.GetOk("alarm_description"); ok {

View File

@ -48,7 +48,7 @@ func resourceAwsCustomerGatewayCreate(d *schema.ResourceData, meta interface{})
conn := meta.(*AWSClient).ec2conn conn := meta.(*AWSClient).ec2conn
createOpts := &ec2.CreateCustomerGatewayInput{ createOpts := &ec2.CreateCustomerGatewayInput{
BGPASN: aws.Long(int64(d.Get("bgp_asn").(int))), BGPASN: aws.Int64(int64(d.Get("bgp_asn").(int))),
PublicIP: aws.String(d.Get("ip_address").(string)), PublicIP: aws.String(d.Get("ip_address").(string)),
Type: aws.String(d.Get("type").(string)), Type: aws.String(d.Get("type").(string)),
} }

View File

@ -272,11 +272,11 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
Tags: tags, Tags: tags,
} }
if attr, ok := d.GetOk("iops"); ok { if attr, ok := d.GetOk("iops"); ok {
opts.IOPS = aws.Long(int64(attr.(int))) opts.IOPS = aws.Int64(int64(attr.(int)))
} }
if attr, ok := d.GetOk("port"); ok { if attr, ok := d.GetOk("port"); ok {
opts.Port = aws.Long(int64(attr.(int))) opts.Port = aws.Int64(int64(attr.(int)))
} }
if attr, ok := d.GetOk("availability_zone"); ok { if attr, ok := d.GetOk("availability_zone"); ok {
@ -284,7 +284,7 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
} }
if attr, ok := d.GetOk("publicly_accessible"); ok { if attr, ok := d.GetOk("publicly_accessible"); ok {
opts.PubliclyAccessible = aws.Boolean(attr.(bool)) opts.PubliclyAccessible = aws.Bool(attr.(bool))
} }
_, err := conn.CreateDBInstanceReadReplica(&opts) _, err := conn.CreateDBInstanceReadReplica(&opts)
if err != nil { if err != nil {
@ -299,7 +299,7 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
} }
if attr, ok := d.GetOk("auto_minor_version_upgrade"); ok { if attr, ok := d.GetOk("auto_minor_version_upgrade"); ok {
opts.AutoMinorVersionUpgrade = aws.Boolean(attr.(bool)) opts.AutoMinorVersionUpgrade = aws.Bool(attr.(bool))
} }
if attr, ok := d.GetOk("availability_zone"); ok { if attr, ok := d.GetOk("availability_zone"); ok {
@ -315,7 +315,7 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
} }
if attr, ok := d.GetOk("iops"); ok { if attr, ok := d.GetOk("iops"); ok {
opts.IOPS = aws.Long(int64(attr.(int))) opts.IOPS = aws.Int64(int64(attr.(int)))
} }
if attr, ok := d.GetOk("license_model"); ok { if attr, ok := d.GetOk("license_model"); ok {
@ -323,7 +323,7 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
} }
if attr, ok := d.GetOk("multi_az"); ok { if attr, ok := d.GetOk("multi_az"); ok {
opts.MultiAZ = aws.Boolean(attr.(bool)) opts.MultiAZ = aws.Bool(attr.(bool))
} }
if attr, ok := d.GetOk("option_group_name"); ok { if attr, ok := d.GetOk("option_group_name"); ok {
@ -331,11 +331,11 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
} }
if attr, ok := d.GetOk("port"); ok { if attr, ok := d.GetOk("port"); ok {
opts.Port = aws.Long(int64(attr.(int))) opts.Port = aws.Int64(int64(attr.(int)))
} }
if attr, ok := d.GetOk("publicly_accessible"); ok { if attr, ok := d.GetOk("publicly_accessible"); ok {
opts.PubliclyAccessible = aws.Boolean(attr.(bool)) opts.PubliclyAccessible = aws.Bool(attr.(bool))
} }
if attr, ok := d.GetOk("tde_credential_arn"); ok { if attr, ok := d.GetOk("tde_credential_arn"); ok {
@ -352,7 +352,7 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
} }
} else { } else {
opts := rds.CreateDBInstanceInput{ opts := rds.CreateDBInstanceInput{
AllocatedStorage: aws.Long(int64(d.Get("allocated_storage").(int))), AllocatedStorage: aws.Int64(int64(d.Get("allocated_storage").(int))),
DBName: aws.String(d.Get("name").(string)), DBName: aws.String(d.Get("name").(string)),
DBInstanceClass: aws.String(d.Get("instance_class").(string)), DBInstanceClass: aws.String(d.Get("instance_class").(string)),
DBInstanceIdentifier: aws.String(d.Get("identifier").(string)), DBInstanceIdentifier: aws.String(d.Get("identifier").(string)),
@ -360,14 +360,14 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
MasterUserPassword: aws.String(d.Get("password").(string)), MasterUserPassword: aws.String(d.Get("password").(string)),
Engine: aws.String(d.Get("engine").(string)), Engine: aws.String(d.Get("engine").(string)),
EngineVersion: aws.String(d.Get("engine_version").(string)), EngineVersion: aws.String(d.Get("engine_version").(string)),
StorageEncrypted: aws.Boolean(d.Get("storage_encrypted").(bool)), StorageEncrypted: aws.Bool(d.Get("storage_encrypted").(bool)),
Tags: tags, Tags: tags,
} }
attr := d.Get("backup_retention_period") attr := d.Get("backup_retention_period")
opts.BackupRetentionPeriod = aws.Long(int64(attr.(int))) opts.BackupRetentionPeriod = aws.Int64(int64(attr.(int)))
if attr, ok := d.GetOk("multi_az"); ok { if attr, ok := d.GetOk("multi_az"); ok {
opts.MultiAZ = aws.Boolean(attr.(bool)) opts.MultiAZ = aws.Bool(attr.(bool))
} }
if attr, ok := d.GetOk("maintenance_window"); ok { if attr, ok := d.GetOk("maintenance_window"); ok {
@ -409,11 +409,11 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
} }
if attr, ok := d.GetOk("iops"); ok { if attr, ok := d.GetOk("iops"); ok {
opts.IOPS = aws.Long(int64(attr.(int))) opts.IOPS = aws.Int64(int64(attr.(int)))
} }
if attr, ok := d.GetOk("port"); ok { if attr, ok := d.GetOk("port"); ok {
opts.Port = aws.Long(int64(attr.(int))) opts.Port = aws.Int64(int64(attr.(int)))
} }
if attr, ok := d.GetOk("availability_zone"); ok { if attr, ok := d.GetOk("availability_zone"); ok {
@ -421,7 +421,7 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
} }
if attr, ok := d.GetOk("publicly_accessible"); ok { if attr, ok := d.GetOk("publicly_accessible"); ok {
opts.PubliclyAccessible = aws.Boolean(attr.(bool)) opts.PubliclyAccessible = aws.Bool(attr.(bool))
} }
log.Printf("[DEBUG] DB Instance create configuration: %#v", opts) log.Printf("[DEBUG] DB Instance create configuration: %#v", opts)
@ -571,7 +571,7 @@ func resourceAwsDbInstanceDelete(d *schema.ResourceData, meta interface{}) error
finalSnapshot := d.Get("final_snapshot_identifier").(string) finalSnapshot := d.Get("final_snapshot_identifier").(string)
if finalSnapshot == "" { if finalSnapshot == "" {
opts.SkipFinalSnapshot = aws.Boolean(true) opts.SkipFinalSnapshot = aws.Bool(true)
} else { } else {
opts.FinalDBSnapshotIdentifier = aws.String(finalSnapshot) opts.FinalDBSnapshotIdentifier = aws.String(finalSnapshot)
} }
@ -605,7 +605,7 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
d.Partial(true) d.Partial(true)
req := &rds.ModifyDBInstanceInput{ req := &rds.ModifyDBInstanceInput{
ApplyImmediately: aws.Boolean(d.Get("apply_immediately").(bool)), ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)),
DBInstanceIdentifier: aws.String(d.Id()), DBInstanceIdentifier: aws.String(d.Id()),
} }
d.SetPartial("apply_immediately") d.SetPartial("apply_immediately")
@ -613,12 +613,12 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
requestUpdate := false requestUpdate := false
if d.HasChange("allocated_storage") { if d.HasChange("allocated_storage") {
d.SetPartial("allocated_storage") d.SetPartial("allocated_storage")
req.AllocatedStorage = aws.Long(int64(d.Get("allocated_storage").(int))) req.AllocatedStorage = aws.Int64(int64(d.Get("allocated_storage").(int)))
requestUpdate = true requestUpdate = true
} }
if d.HasChange("backup_retention_period") { if d.HasChange("backup_retention_period") {
d.SetPartial("backup_retention_period") d.SetPartial("backup_retention_period")
req.BackupRetentionPeriod = aws.Long(int64(d.Get("backup_retention_period").(int))) req.BackupRetentionPeriod = aws.Int64(int64(d.Get("backup_retention_period").(int)))
requestUpdate = true requestUpdate = true
} }
if d.HasChange("instance_class") { if d.HasChange("instance_class") {
@ -638,7 +638,7 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
} }
if d.HasChange("iops") { if d.HasChange("iops") {
d.SetPartial("iops") d.SetPartial("iops")
req.IOPS = aws.Long(int64(d.Get("iops").(int))) req.IOPS = aws.Int64(int64(d.Get("iops").(int)))
requestUpdate = true requestUpdate = true
} }
if d.HasChange("backup_window") { if d.HasChange("backup_window") {
@ -658,7 +658,7 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
} }
if d.HasChange("multi_az") { if d.HasChange("multi_az") {
d.SetPartial("multi_az") d.SetPartial("multi_az")
req.MultiAZ = aws.Boolean(d.Get("multi_az").(bool)) req.MultiAZ = aws.Bool(d.Get("multi_az").(bool))
requestUpdate = true requestUpdate = true
} }
if d.HasChange("storage_type") { if d.HasChange("storage_type") {
@ -706,7 +706,7 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
DBInstanceIdentifier: aws.String(d.Id()), DBInstanceIdentifier: aws.String(d.Id()),
} }
attr := d.Get("backup_retention_period") attr := d.Get("backup_retention_period")
opts.BackupRetentionPeriod = aws.Long(int64(attr.(int))) opts.BackupRetentionPeriod = aws.Int64(int64(attr.(int)))
if attr, ok := d.GetOk("backup_window"); ok { if attr, ok := d.GetOk("backup_window"); ok {
opts.PreferredBackupWindow = aws.String(attr.(string)) opts.PreferredBackupWindow = aws.String(attr.(string))
} }

View File

@ -166,8 +166,8 @@ func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) er
log.Printf("[DEBUG] DynamoDB table create: %s", name) log.Printf("[DEBUG] DynamoDB table create: %s", name)
throughput := &dynamodb.ProvisionedThroughput{ throughput := &dynamodb.ProvisionedThroughput{
ReadCapacityUnits: aws.Long(int64(d.Get("read_capacity").(int))), ReadCapacityUnits: aws.Int64(int64(d.Get("read_capacity").(int))),
WriteCapacityUnits: aws.Long(int64(d.Get("write_capacity").(int))), WriteCapacityUnits: aws.Int64(int64(d.Get("write_capacity").(int))),
} }
hash_key_name := d.Get("hash_key").(string) hash_key_name := d.Get("hash_key").(string)
@ -318,8 +318,8 @@ func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) er
} }
throughput := &dynamodb.ProvisionedThroughput{ throughput := &dynamodb.ProvisionedThroughput{
ReadCapacityUnits: aws.Long(int64(d.Get("read_capacity").(int))), ReadCapacityUnits: aws.Int64(int64(d.Get("read_capacity").(int))),
WriteCapacityUnits: aws.Long(int64(d.Get("write_capacity").(int))), WriteCapacityUnits: aws.Int64(int64(d.Get("write_capacity").(int))),
} }
req.ProvisionedThroughput = throughput req.ProvisionedThroughput = throughput
@ -486,8 +486,8 @@ func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) er
Update: &dynamodb.UpdateGlobalSecondaryIndexAction{ Update: &dynamodb.UpdateGlobalSecondaryIndexAction{
IndexName: aws.String(gsidata["name"].(string)), IndexName: aws.String(gsidata["name"].(string)),
ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
WriteCapacityUnits: aws.Long(int64(gsiWriteCapacity)), WriteCapacityUnits: aws.Int64(int64(gsiWriteCapacity)),
ReadCapacityUnits: aws.Long(int64(gsiReadCapacity)), ReadCapacityUnits: aws.Int64(int64(gsiReadCapacity)),
}, },
}, },
} }
@ -634,8 +634,8 @@ func createGSIFromData(data *map[string]interface{}) dynamodb.GlobalSecondaryInd
KeySchema: key_schema, KeySchema: key_schema,
Projection: projection, Projection: projection,
ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
WriteCapacityUnits: aws.Long(int64(writeCapacity)), WriteCapacityUnits: aws.Int64(int64(writeCapacity)),
ReadCapacityUnits: aws.Long(int64(readCapacity)), ReadCapacityUnits: aws.Int64(int64(readCapacity)),
}, },
} }
} }

View File

@ -74,16 +74,16 @@ func resourceAwsEbsVolumeCreate(d *schema.ResourceData, meta interface{}) error
AvailabilityZone: aws.String(d.Get("availability_zone").(string)), AvailabilityZone: aws.String(d.Get("availability_zone").(string)),
} }
if value, ok := d.GetOk("encrypted"); ok { if value, ok := d.GetOk("encrypted"); ok {
request.Encrypted = aws.Boolean(value.(bool)) request.Encrypted = aws.Bool(value.(bool))
} }
if value, ok := d.GetOk("iops"); ok { if value, ok := d.GetOk("iops"); ok {
request.IOPS = aws.Long(int64(value.(int))) request.IOPS = aws.Int64(int64(value.(int)))
} }
if value, ok := d.GetOk("kms_key_id"); ok { if value, ok := d.GetOk("kms_key_id"); ok {
request.KMSKeyID = aws.String(value.(string)) request.KMSKeyID = aws.String(value.(string))
} }
if value, ok := d.GetOk("size"); ok { if value, ok := d.GetOk("size"); ok {
request.Size = aws.Long(int64(value.(int))) request.Size = aws.Int64(int64(value.(int)))
} }
if value, ok := d.GetOk("snapshot_id"); ok { if value, ok := d.GetOk("snapshot_id"); ok {
request.SnapshotID = aws.String(value.(string)) request.SnapshotID = aws.String(value.(string))

View File

@ -6,7 +6,6 @@ import (
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ecs" "github.com/aws/aws-sdk-go/service/ecs"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
@ -58,7 +57,7 @@ func resourceAwsEcsClusterRead(d *schema.ResourceData, meta interface{}) error {
if err != nil { if err != nil {
return err return err
} }
log.Printf("[DEBUG] Received ECS clusters: %s", awsutil.StringValue(out.Clusters)) log.Printf("[DEBUG] Received ECS clusters: %s", out.Clusters)
d.SetId(*out.Clusters[0].ClusterARN) d.SetId(*out.Clusters[0].ClusterARN)
d.Set("name", *out.Clusters[0].ClusterName) d.Set("name", *out.Clusters[0].ClusterName)
@ -77,7 +76,7 @@ func resourceAwsEcsClusterDelete(d *schema.ResourceData, meta interface{}) error
}) })
if err == nil { if err == nil {
log.Printf("[DEBUG] ECS cluster %s deleted: %s", d.Id(), awsutil.StringValue(out)) log.Printf("[DEBUG] ECS cluster %s deleted: %s", d.Id(), out)
return nil return nil
} }

View File

@ -9,7 +9,6 @@ import (
"time" "time"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ecs" "github.com/aws/aws-sdk-go/service/ecs"
"github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/hashcode"
@ -88,7 +87,7 @@ func resourceAwsEcsServiceCreate(d *schema.ResourceData, meta interface{}) error
input := ecs.CreateServiceInput{ input := ecs.CreateServiceInput{
ServiceName: aws.String(d.Get("name").(string)), ServiceName: aws.String(d.Get("name").(string)),
TaskDefinition: aws.String(d.Get("task_definition").(string)), TaskDefinition: aws.String(d.Get("task_definition").(string)),
DesiredCount: aws.Long(int64(d.Get("desired_count").(int))), DesiredCount: aws.Int64(int64(d.Get("desired_count").(int))),
ClientToken: aws.String(resource.UniqueId()), ClientToken: aws.String(resource.UniqueId()),
} }
@ -98,14 +97,14 @@ func resourceAwsEcsServiceCreate(d *schema.ResourceData, meta interface{}) error
loadBalancers := expandEcsLoadBalancers(d.Get("load_balancer").(*schema.Set).List()) loadBalancers := expandEcsLoadBalancers(d.Get("load_balancer").(*schema.Set).List())
if len(loadBalancers) > 0 { if len(loadBalancers) > 0 {
log.Printf("[DEBUG] Adding ECS load balancers: %s", awsutil.StringValue(loadBalancers)) log.Printf("[DEBUG] Adding ECS load balancers: %s", loadBalancers)
input.LoadBalancers = loadBalancers input.LoadBalancers = loadBalancers
} }
if v, ok := d.GetOk("iam_role"); ok { if v, ok := d.GetOk("iam_role"); ok {
input.Role = aws.String(v.(string)) input.Role = aws.String(v.(string))
} }
log.Printf("[DEBUG] Creating ECS service: %s", awsutil.StringValue(input)) log.Printf("[DEBUG] Creating ECS service: %s", input)
out, err := conn.CreateService(&input) out, err := conn.CreateService(&input)
if err != nil { if err != nil {
return err return err
@ -139,7 +138,7 @@ func resourceAwsEcsServiceRead(d *schema.ResourceData, meta interface{}) error {
} }
service := out.Services[0] service := out.Services[0]
log.Printf("[DEBUG] Received ECS service %s", awsutil.StringValue(service)) log.Printf("[DEBUG] Received ECS service %s", service)
d.SetId(*service.ServiceARN) d.SetId(*service.ServiceARN)
d.Set("name", *service.ServiceName) d.Set("name", *service.ServiceName)
@ -177,7 +176,7 @@ func resourceAwsEcsServiceUpdate(d *schema.ResourceData, meta interface{}) error
if d.HasChange("desired_count") { if d.HasChange("desired_count") {
_, n := d.GetChange("desired_count") _, n := d.GetChange("desired_count")
input.DesiredCount = aws.Long(int64(n.(int))) input.DesiredCount = aws.Int64(int64(n.(int)))
} }
if d.HasChange("task_definition") { if d.HasChange("task_definition") {
_, n := d.GetChange("task_definition") _, n := d.GetChange("task_definition")
@ -189,7 +188,7 @@ func resourceAwsEcsServiceUpdate(d *schema.ResourceData, meta interface{}) error
return err return err
} }
service := out.Service service := out.Service
log.Printf("[DEBUG] Updated ECS service %s", awsutil.StringValue(service)) log.Printf("[DEBUG] Updated ECS service %s", service)
return resourceAwsEcsServiceRead(d, meta) return resourceAwsEcsServiceRead(d, meta)
} }
@ -217,7 +216,7 @@ func resourceAwsEcsServiceDelete(d *schema.ResourceData, meta interface{}) error
_, err = conn.UpdateService(&ecs.UpdateServiceInput{ _, err = conn.UpdateService(&ecs.UpdateServiceInput{
Service: aws.String(d.Id()), Service: aws.String(d.Id()),
Cluster: aws.String(d.Get("cluster").(string)), Cluster: aws.String(d.Get("cluster").(string)),
DesiredCount: aws.Long(int64(0)), DesiredCount: aws.Int64(int64(0)),
}) })
if err != nil { if err != nil {
return err return err
@ -229,7 +228,7 @@ func resourceAwsEcsServiceDelete(d *schema.ResourceData, meta interface{}) error
Cluster: aws.String(d.Get("cluster").(string)), Cluster: aws.String(d.Get("cluster").(string)),
} }
log.Printf("[DEBUG] Deleting ECS service %s", awsutil.StringValue(input)) log.Printf("[DEBUG] Deleting ECS service %s", input)
out, err := conn.DeleteService(&input) out, err := conn.DeleteService(&input)
if err != nil { if err != nil {
return err return err

View File

@ -8,7 +8,6 @@ import (
"log" "log"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ecs" "github.com/aws/aws-sdk-go/service/ecs"
"github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
@ -91,7 +90,7 @@ func resourceAwsEcsTaskDefinitionCreate(d *schema.ResourceData, meta interface{}
input.Volumes = volumes input.Volumes = volumes
} }
log.Printf("[DEBUG] Registering ECS task definition: %s", awsutil.StringValue(input)) log.Printf("[DEBUG] Registering ECS task definition: %s", input)
out, err := conn.RegisterTaskDefinition(&input) out, err := conn.RegisterTaskDefinition(&input)
if err != nil { if err != nil {
return err return err
@ -118,7 +117,7 @@ func resourceAwsEcsTaskDefinitionRead(d *schema.ResourceData, meta interface{})
if err != nil { if err != nil {
return err return err
} }
log.Printf("[DEBUG] Received task definition %s", awsutil.StringValue(out)) log.Printf("[DEBUG] Received task definition %s", out)
taskDefinition := out.TaskDefinition taskDefinition := out.TaskDefinition

View File

@ -9,7 +9,6 @@ import (
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/elasticache" "github.com/aws/aws-sdk-go/service/elasticache"
"github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/hashcode"
@ -160,10 +159,10 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{
req := &elasticache.CreateCacheClusterInput{ req := &elasticache.CreateCacheClusterInput{
CacheClusterID: aws.String(clusterId), CacheClusterID: aws.String(clusterId),
CacheNodeType: aws.String(nodeType), CacheNodeType: aws.String(nodeType),
NumCacheNodes: aws.Long(numNodes), NumCacheNodes: aws.Int64(numNodes),
Engine: aws.String(engine), Engine: aws.String(engine),
EngineVersion: aws.String(engineVersion), EngineVersion: aws.String(engineVersion),
Port: aws.Long(port), Port: aws.Int64(port),
CacheSubnetGroupName: aws.String(subnetGroupName), CacheSubnetGroupName: aws.String(subnetGroupName),
CacheSecurityGroupNames: securityNames, CacheSecurityGroupNames: securityNames,
SecurityGroupIDs: securityIds, SecurityGroupIDs: securityIds,
@ -216,7 +215,7 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{})
conn := meta.(*AWSClient).elasticacheconn conn := meta.(*AWSClient).elasticacheconn
req := &elasticache.DescribeCacheClustersInput{ req := &elasticache.DescribeCacheClustersInput{
CacheClusterID: aws.String(d.Id()), CacheClusterID: aws.String(d.Id()),
ShowCacheNodeInfo: aws.Boolean(true), ShowCacheNodeInfo: aws.Bool(true),
} }
res, err := conn.DescribeCacheClusters(req) res, err := conn.DescribeCacheClusters(req)
@ -281,7 +280,7 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{
req := &elasticache.ModifyCacheClusterInput{ req := &elasticache.ModifyCacheClusterInput{
CacheClusterID: aws.String(d.Id()), CacheClusterID: aws.String(d.Id()),
ApplyImmediately: aws.Boolean(d.Get("apply_immediately").(bool)), ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)),
} }
requestUpdate := false requestUpdate := false
@ -308,12 +307,12 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{
} }
if d.HasChange("num_cache_nodes") { if d.HasChange("num_cache_nodes") {
req.NumCacheNodes = aws.Long(int64(d.Get("num_cache_nodes").(int))) req.NumCacheNodes = aws.Int64(int64(d.Get("num_cache_nodes").(int)))
requestUpdate = true requestUpdate = true
} }
if requestUpdate { if requestUpdate {
log.Printf("[DEBUG] Modifying ElastiCache Cluster (%s), opts:\n%s", d.Id(), awsutil.StringValue(req)) log.Printf("[DEBUG] Modifying ElastiCache Cluster (%s), opts:\n%s", d.Id(), req)
_, err := conn.ModifyCacheCluster(req) _, err := conn.ModifyCacheCluster(req)
if err != nil { if err != nil {
return fmt.Errorf("[WARN] Error updating ElastiCache cluster (%s), error: %s", d.Id(), err) return fmt.Errorf("[WARN] Error updating ElastiCache cluster (%s), error: %s", d.Id(), err)
@ -348,7 +347,7 @@ func setCacheNodeData(d *schema.ResourceData, c *elasticache.CacheCluster) error
for _, node := range sortedCacheNodes { for _, node := range sortedCacheNodes {
if node.CacheNodeID == nil || node.Endpoint == nil || node.Endpoint.Address == nil || node.Endpoint.Port == nil { if node.CacheNodeID == nil || node.Endpoint == nil || node.Endpoint.Address == nil || node.Endpoint.Port == nil {
return fmt.Errorf("Unexpected nil pointer in: %s", awsutil.StringValue(node)) return fmt.Errorf("Unexpected nil pointer in: %s", node)
} }
cacheNodeData = append(cacheNodeData, map[string]interface{}{ cacheNodeData = append(cacheNodeData, map[string]interface{}{
"id": *node.CacheNodeID, "id": *node.CacheNodeID,
@ -404,7 +403,7 @@ func cacheClusterStateRefreshFunc(conn *elasticache.ElastiCache, clusterID, give
return func() (interface{}, string, error) { return func() (interface{}, string, error) {
resp, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{ resp, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{
CacheClusterID: aws.String(clusterID), CacheClusterID: aws.String(clusterID),
ShowCacheNodeInfo: aws.Boolean(true), ShowCacheNodeInfo: aws.Bool(true),
}) })
if err != nil { if err != nil {
apierr := err.(awserr.Error) apierr := err.(awserr.Error)

View File

@ -429,10 +429,10 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error {
LoadBalancerName: aws.String(d.Get("name").(string)), LoadBalancerName: aws.String(d.Get("name").(string)),
LoadBalancerAttributes: &elb.LoadBalancerAttributes{ LoadBalancerAttributes: &elb.LoadBalancerAttributes{
CrossZoneLoadBalancing: &elb.CrossZoneLoadBalancing{ CrossZoneLoadBalancing: &elb.CrossZoneLoadBalancing{
Enabled: aws.Boolean(d.Get("cross_zone_load_balancing").(bool)), Enabled: aws.Bool(d.Get("cross_zone_load_balancing").(bool)),
}, },
ConnectionSettings: &elb.ConnectionSettings{ ConnectionSettings: &elb.ConnectionSettings{
IdleTimeout: aws.Long(int64(d.Get("idle_timeout").(int))), IdleTimeout: aws.Int64(int64(d.Get("idle_timeout").(int))),
}, },
}, },
} }
@ -459,8 +459,8 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error {
LoadBalancerName: aws.String(d.Get("name").(string)), LoadBalancerName: aws.String(d.Get("name").(string)),
LoadBalancerAttributes: &elb.LoadBalancerAttributes{ LoadBalancerAttributes: &elb.LoadBalancerAttributes{
ConnectionDraining: &elb.ConnectionDraining{ ConnectionDraining: &elb.ConnectionDraining{
Enabled: aws.Boolean(true), Enabled: aws.Bool(true),
Timeout: aws.Long(int64(d.Get("connection_draining_timeout").(int))), Timeout: aws.Int64(int64(d.Get("connection_draining_timeout").(int))),
}, },
}, },
} }
@ -480,7 +480,7 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error {
LoadBalancerName: aws.String(d.Get("name").(string)), LoadBalancerName: aws.String(d.Get("name").(string)),
LoadBalancerAttributes: &elb.LoadBalancerAttributes{ LoadBalancerAttributes: &elb.LoadBalancerAttributes{
ConnectionDraining: &elb.ConnectionDraining{ ConnectionDraining: &elb.ConnectionDraining{
Enabled: aws.Boolean(d.Get("connection_draining").(bool)), Enabled: aws.Bool(d.Get("connection_draining").(bool)),
}, },
}, },
} }
@ -500,11 +500,11 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error {
configureHealthCheckOpts := elb.ConfigureHealthCheckInput{ configureHealthCheckOpts := elb.ConfigureHealthCheckInput{
LoadBalancerName: aws.String(d.Id()), LoadBalancerName: aws.String(d.Id()),
HealthCheck: &elb.HealthCheck{ HealthCheck: &elb.HealthCheck{
HealthyThreshold: aws.Long(int64(check["healthy_threshold"].(int))), HealthyThreshold: aws.Int64(int64(check["healthy_threshold"].(int))),
UnhealthyThreshold: aws.Long(int64(check["unhealthy_threshold"].(int))), UnhealthyThreshold: aws.Int64(int64(check["unhealthy_threshold"].(int))),
Interval: aws.Long(int64(check["interval"].(int))), Interval: aws.Int64(int64(check["interval"].(int))),
Target: aws.String(check["target"].(string)), Target: aws.String(check["target"].(string)),
Timeout: aws.Long(int64(check["timeout"].(int))), Timeout: aws.Int64(int64(check["timeout"].(int))),
}, },
} }
_, err := elbconn.ConfigureHealthCheck(&configureHealthCheckOpts) _, err := elbconn.ConfigureHealthCheck(&configureHealthCheckOpts)

View File

@ -465,9 +465,9 @@ func testAccCheckAWSELBAttributes(conf *elb.LoadBalancerDescription) resource.Te
} }
l := elb.Listener{ l := elb.Listener{
InstancePort: aws.Long(int64(8000)), InstancePort: aws.Int64(int64(8000)),
InstanceProtocol: aws.String("HTTP"), InstanceProtocol: aws.String("HTTP"),
LoadBalancerPort: aws.Long(int64(80)), LoadBalancerPort: aws.Int64(int64(80)),
Protocol: aws.String("HTTP"), Protocol: aws.String("HTTP"),
} }
@ -503,10 +503,10 @@ func testAccCheckAWSELBAttributesHealthCheck(conf *elb.LoadBalancerDescription)
} }
check := &elb.HealthCheck{ check := &elb.HealthCheck{
Timeout: aws.Long(int64(30)), Timeout: aws.Int64(int64(30)),
UnhealthyThreshold: aws.Long(int64(5)), UnhealthyThreshold: aws.Int64(int64(5)),
HealthyThreshold: aws.Long(int64(5)), HealthyThreshold: aws.Int64(int64(5)),
Interval: aws.Long(int64(60)), Interval: aws.Int64(int64(60)),
Target: aws.String("HTTP:8000/"), Target: aws.String("HTTP:8000/"),
} }

View File

@ -5,7 +5,6 @@ import (
"log" "log"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
) )
@ -94,7 +93,7 @@ func resourceAwsLogFlowCreate(d *schema.ResourceData, meta interface{}) error {
} }
log.Printf( log.Printf(
"[DEBUG] Flow Log Create configuration: %s", awsutil.StringValue(opts)) "[DEBUG] Flow Log Create configuration: %s", opts)
resp, err := conn.CreateFlowLogs(opts) resp, err := conn.CreateFlowLogs(opts)
if err != nil { if err != nil {
return fmt.Errorf("Error creating Flow Log for (%s), error: %s", resourceId, err) return fmt.Errorf("Error creating Flow Log for (%s), error: %s", resourceId, err)

View File

@ -97,7 +97,7 @@ func resourceAwsIamPolicyUpdate(d *schema.ResourceData, meta interface{}) error
request := &iam.CreatePolicyVersionInput{ request := &iam.CreatePolicyVersionInput{
PolicyARN: aws.String(d.Id()), PolicyARN: aws.String(d.Id()),
PolicyDocument: aws.String(d.Get("policy").(string)), PolicyDocument: aws.String(d.Get("policy").(string)),
SetAsDefault: aws.Boolean(true), SetAsDefault: aws.Bool(true),
} }
if _, err := iamconn.CreatePolicyVersion(request); err != nil { if _, err := iamconn.CreatePolicyVersion(request); err != nil {

View File

@ -12,7 +12,6 @@ import (
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
@ -334,8 +333,8 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error {
ImageID: instanceOpts.ImageID, ImageID: instanceOpts.ImageID,
InstanceType: instanceOpts.InstanceType, InstanceType: instanceOpts.InstanceType,
KeyName: instanceOpts.KeyName, KeyName: instanceOpts.KeyName,
MaxCount: aws.Long(int64(1)), MaxCount: aws.Int64(int64(1)),
MinCount: aws.Long(int64(1)), MinCount: aws.Int64(int64(1)),
NetworkInterfaces: instanceOpts.NetworkInterfaces, NetworkInterfaces: instanceOpts.NetworkInterfaces,
Placement: instanceOpts.Placement, Placement: instanceOpts.Placement,
PrivateIPAddress: instanceOpts.PrivateIPAddress, PrivateIPAddress: instanceOpts.PrivateIPAddress,
@ -346,7 +345,7 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error {
} }
// Create the instance // Create the instance
log.Printf("[DEBUG] Run configuration: %s", awsutil.StringValue(runOpts)) log.Printf("[DEBUG] Run configuration: %s", runOpts)
var runResp *ec2.Reservation var runResp *ec2.Reservation
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
@ -543,7 +542,7 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
_, err := conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{ _, err := conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{
InstanceID: aws.String(d.Id()), InstanceID: aws.String(d.Id()),
SourceDestCheck: &ec2.AttributeBooleanValue{ SourceDestCheck: &ec2.AttributeBooleanValue{
Value: aws.Boolean(d.Get("source_dest_check").(bool)), Value: aws.Bool(d.Get("source_dest_check").(bool)),
}, },
}) })
if err != nil { if err != nil {
@ -571,7 +570,7 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
_, err := conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{ _, err := conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeInput{
InstanceID: aws.String(d.Id()), InstanceID: aws.String(d.Id()),
DisableAPITermination: &ec2.AttributeBooleanValue{ DisableAPITermination: &ec2.AttributeBooleanValue{
Value: aws.Boolean(d.Get("disable_api_termination").(bool)), Value: aws.Bool(d.Get("disable_api_termination").(bool)),
}, },
}) })
if err != nil { if err != nil {
@ -794,7 +793,7 @@ func readBlockDeviceMappingsFromConfig(
for _, v := range vL { for _, v := range vL {
bd := v.(map[string]interface{}) bd := v.(map[string]interface{})
ebs := &ec2.EBSBlockDevice{ ebs := &ec2.EBSBlockDevice{
DeleteOnTermination: aws.Boolean(bd["delete_on_termination"].(bool)), DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)),
} }
if v, ok := bd["snapshot_id"].(string); ok && v != "" { if v, ok := bd["snapshot_id"].(string); ok && v != "" {
@ -802,11 +801,11 @@ func readBlockDeviceMappingsFromConfig(
} }
if v, ok := bd["encrypted"].(bool); ok && v { if v, ok := bd["encrypted"].(bool); ok && v {
ebs.Encrypted = aws.Boolean(v) ebs.Encrypted = aws.Bool(v)
} }
if v, ok := bd["volume_size"].(int); ok && v != 0 { if v, ok := bd["volume_size"].(int); ok && v != 0 {
ebs.VolumeSize = aws.Long(int64(v)) ebs.VolumeSize = aws.Int64(int64(v))
} }
if v, ok := bd["volume_type"].(string); ok && v != "" { if v, ok := bd["volume_type"].(string); ok && v != "" {
@ -814,7 +813,7 @@ func readBlockDeviceMappingsFromConfig(
} }
if v, ok := bd["iops"].(int); ok && v > 0 { if v, ok := bd["iops"].(int); ok && v > 0 {
ebs.IOPS = aws.Long(int64(v)) ebs.IOPS = aws.Int64(int64(v))
} }
blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{ blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{
@ -843,11 +842,11 @@ func readBlockDeviceMappingsFromConfig(
for _, v := range vL { for _, v := range vL {
bd := v.(map[string]interface{}) bd := v.(map[string]interface{})
ebs := &ec2.EBSBlockDevice{ ebs := &ec2.EBSBlockDevice{
DeleteOnTermination: aws.Boolean(bd["delete_on_termination"].(bool)), DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)),
} }
if v, ok := bd["volume_size"].(int); ok && v != 0 { if v, ok := bd["volume_size"].(int); ok && v != 0 {
ebs.VolumeSize = aws.Long(int64(v)) ebs.VolumeSize = aws.Int64(int64(v))
} }
if v, ok := bd["volume_type"].(string); ok && v != "" { if v, ok := bd["volume_type"].(string); ok && v != "" {
@ -855,7 +854,7 @@ func readBlockDeviceMappingsFromConfig(
} }
if v, ok := bd["iops"].(int); ok && v > 0 { if v, ok := bd["iops"].(int); ok && v > 0 {
ebs.IOPS = aws.Long(int64(v)) ebs.IOPS = aws.Int64(int64(v))
} }
if dn, err := fetchRootDeviceName(d.Get("ami").(string), conn); err == nil { if dn, err := fetchRootDeviceName(d.Get("ami").(string), conn); err == nil {
@ -902,14 +901,14 @@ func buildAwsInstanceOpts(
conn := meta.(*AWSClient).ec2conn conn := meta.(*AWSClient).ec2conn
opts := &awsInstanceOpts{ opts := &awsInstanceOpts{
DisableAPITermination: aws.Boolean(d.Get("disable_api_termination").(bool)), DisableAPITermination: aws.Bool(d.Get("disable_api_termination").(bool)),
EBSOptimized: aws.Boolean(d.Get("ebs_optimized").(bool)), EBSOptimized: aws.Bool(d.Get("ebs_optimized").(bool)),
ImageID: aws.String(d.Get("ami").(string)), ImageID: aws.String(d.Get("ami").(string)),
InstanceType: aws.String(d.Get("instance_type").(string)), InstanceType: aws.String(d.Get("instance_type").(string)),
} }
opts.Monitoring = &ec2.RunInstancesMonitoringEnabled{ opts.Monitoring = &ec2.RunInstancesMonitoringEnabled{
Enabled: aws.Boolean(d.Get("monitoring").(bool)), Enabled: aws.Bool(d.Get("monitoring").(bool)),
} }
opts.IAMInstanceProfile = &ec2.IAMInstanceProfileSpecification{ opts.IAMInstanceProfile = &ec2.IAMInstanceProfileSpecification{
@ -965,8 +964,8 @@ func buildAwsInstanceOpts(
// to avoid: Network interfaces and an instance-level security groups may not be specified on // to avoid: Network interfaces and an instance-level security groups may not be specified on
// the same request // the same request
ni := &ec2.InstanceNetworkInterfaceSpecification{ ni := &ec2.InstanceNetworkInterfaceSpecification{
AssociatePublicIPAddress: aws.Boolean(associatePublicIPAddress), AssociatePublicIPAddress: aws.Bool(associatePublicIPAddress),
DeviceIndex: aws.Long(int64(0)), DeviceIndex: aws.Int64(int64(0)),
SubnetID: aws.String(subnetID), SubnetID: aws.String(subnetID),
Groups: groups, Groups: groups,
} }

View File

@ -7,7 +7,6 @@ import (
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
@ -47,7 +46,7 @@ func TestAccAWSInstance_basic(t *testing.T) {
var err error var err error
vol, err = conn.CreateVolume(&ec2.CreateVolumeInput{ vol, err = conn.CreateVolume(&ec2.CreateVolumeInput{
AvailabilityZone: aws.String("us-west-2a"), AvailabilityZone: aws.String("us-west-2a"),
Size: aws.Long(int64(5)), Size: aws.Int64(int64(5)),
}) })
return err return err
}, },
@ -467,8 +466,8 @@ func TestAccAWSInstance_keyPairCheck(t *testing.T) {
if v.KeyName == nil { if v.KeyName == nil {
return fmt.Errorf("No Key Pair found, expected(%s)", keyName) return fmt.Errorf("No Key Pair found, expected(%s)", keyName)
} }
if *v.KeyName != keyName { if v.KeyName != nil && *v.KeyName != keyName {
return fmt.Errorf("Bad key name, expected (%s), got (%s)", keyName, awsutil.StringValue(v.KeyName)) return fmt.Errorf("Bad key name, expected (%s), got (%s)", keyName, *v.KeyName)
} }
return nil return nil

View File

@ -43,7 +43,7 @@ func resourceAwsKinesisStreamCreate(d *schema.ResourceData, meta interface{}) er
conn := meta.(*AWSClient).kinesisconn conn := meta.(*AWSClient).kinesisconn
sn := d.Get("name").(string) sn := d.Get("name").(string)
createOpts := &kinesis.CreateStreamInput{ createOpts := &kinesis.CreateStreamInput{
ShardCount: aws.Long(int64(d.Get("shard_count").(int))), ShardCount: aws.Int64(int64(d.Get("shard_count").(int))),
StreamName: aws.String(sn), StreamName: aws.String(sn),
} }
@ -82,7 +82,7 @@ func resourceAwsKinesisStreamRead(d *schema.ResourceData, meta interface{}) erro
conn := meta.(*AWSClient).kinesisconn conn := meta.(*AWSClient).kinesisconn
describeOpts := &kinesis.DescribeStreamInput{ describeOpts := &kinesis.DescribeStreamInput{
StreamName: aws.String(d.Get("name").(string)), StreamName: aws.String(d.Get("name").(string)),
Limit: aws.Long(1), Limit: aws.Int64(1),
} }
resp, err := conn.DescribeStream(describeOpts) resp, err := conn.DescribeStream(describeOpts)
if err != nil { if err != nil {
@ -138,7 +138,7 @@ func streamStateRefreshFunc(conn *kinesis.Kinesis, sn string) resource.StateRefr
return func() (interface{}, string, error) { return func() (interface{}, string, error) {
describeOpts := &kinesis.DescribeStreamInput{ describeOpts := &kinesis.DescribeStreamInput{
StreamName: aws.String(sn), StreamName: aws.String(sn),
Limit: aws.Long(1), Limit: aws.Int64(1),
} }
resp, err := conn.DescribeStream(describeOpts) resp, err := conn.DescribeStream(describeOpts)
if err != nil { if err != nil {

View File

@ -46,7 +46,7 @@ func testAccCheckKinesisStreamExists(n string, stream *kinesis.StreamDescription
conn := testAccProvider.Meta().(*AWSClient).kinesisconn conn := testAccProvider.Meta().(*AWSClient).kinesisconn
describeOpts := &kinesis.DescribeStreamInput{ describeOpts := &kinesis.DescribeStreamInput{
StreamName: aws.String(rs.Primary.Attributes["name"]), StreamName: aws.String(rs.Primary.Attributes["name"]),
Limit: aws.Long(1), Limit: aws.Int64(1),
} }
resp, err := conn.DescribeStream(describeOpts) resp, err := conn.DescribeStream(describeOpts)
if err != nil { if err != nil {
@ -84,7 +84,7 @@ func testAccCheckKinesisStreamDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).kinesisconn conn := testAccProvider.Meta().(*AWSClient).kinesisconn
describeOpts := &kinesis.DescribeStreamInput{ describeOpts := &kinesis.DescribeStreamInput{
StreamName: aws.String(rs.Primary.Attributes["name"]), StreamName: aws.String(rs.Primary.Attributes["name"]),
Limit: aws.Long(1), Limit: aws.Int64(1),
} }
resp, err := conn.DescribeStream(describeOpts) resp, err := conn.DescribeStream(describeOpts)
if err == nil { if err == nil {

View File

@ -112,10 +112,10 @@ func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) e
Description: aws.String(d.Get("description").(string)), Description: aws.String(d.Get("description").(string)),
FunctionName: aws.String(functionName), FunctionName: aws.String(functionName),
Handler: aws.String(d.Get("handler").(string)), Handler: aws.String(d.Get("handler").(string)),
MemorySize: aws.Long(int64(d.Get("memory_size").(int))), MemorySize: aws.Int64(int64(d.Get("memory_size").(int))),
Role: aws.String(iamRole), Role: aws.String(iamRole),
Runtime: aws.String(d.Get("runtime").(string)), Runtime: aws.String(d.Get("runtime").(string)),
Timeout: aws.Long(int64(d.Get("timeout").(int))), Timeout: aws.Int64(int64(d.Get("timeout").(int))),
} }
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {

View File

@ -264,7 +264,7 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
LaunchConfigurationName: aws.String(d.Get("name").(string)), LaunchConfigurationName: aws.String(d.Get("name").(string)),
ImageID: aws.String(d.Get("image_id").(string)), ImageID: aws.String(d.Get("image_id").(string)),
InstanceType: aws.String(d.Get("instance_type").(string)), InstanceType: aws.String(d.Get("instance_type").(string)),
EBSOptimized: aws.Boolean(d.Get("ebs_optimized").(bool)), EBSOptimized: aws.Bool(d.Get("ebs_optimized").(bool)),
} }
if v, ok := d.GetOk("user_data"); ok { if v, ok := d.GetOk("user_data"); ok {
@ -273,7 +273,7 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
} }
createLaunchConfigurationOpts.InstanceMonitoring = &autoscaling.InstanceMonitoring{ createLaunchConfigurationOpts.InstanceMonitoring = &autoscaling.InstanceMonitoring{
Enabled: aws.Boolean(d.Get("enable_monitoring").(bool)), Enabled: aws.Bool(d.Get("enable_monitoring").(bool)),
} }
if v, ok := d.GetOk("iam_instance_profile"); ok { if v, ok := d.GetOk("iam_instance_profile"); ok {
@ -285,7 +285,7 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
} }
if v, ok := d.GetOk("associate_public_ip_address"); ok { if v, ok := d.GetOk("associate_public_ip_address"); ok {
createLaunchConfigurationOpts.AssociatePublicIPAddress = aws.Boolean(v.(bool)) createLaunchConfigurationOpts.AssociatePublicIPAddress = aws.Bool(v.(bool))
} }
if v, ok := d.GetOk("key_name"); ok { if v, ok := d.GetOk("key_name"); ok {
@ -308,7 +308,7 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
for _, v := range vL { for _, v := range vL {
bd := v.(map[string]interface{}) bd := v.(map[string]interface{})
ebs := &autoscaling.EBS{ ebs := &autoscaling.EBS{
DeleteOnTermination: aws.Boolean(bd["delete_on_termination"].(bool)), DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)),
} }
if v, ok := bd["snapshot_id"].(string); ok && v != "" { if v, ok := bd["snapshot_id"].(string); ok && v != "" {
@ -316,7 +316,7 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
} }
if v, ok := bd["volume_size"].(int); ok && v != 0 { if v, ok := bd["volume_size"].(int); ok && v != 0 {
ebs.VolumeSize = aws.Long(int64(v)) ebs.VolumeSize = aws.Int64(int64(v))
} }
if v, ok := bd["volume_type"].(string); ok && v != "" { if v, ok := bd["volume_type"].(string); ok && v != "" {
@ -324,7 +324,7 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
} }
if v, ok := bd["iops"].(int); ok && v > 0 { if v, ok := bd["iops"].(int); ok && v > 0 {
ebs.IOPS = aws.Long(int64(v)) ebs.IOPS = aws.Int64(int64(v))
} }
blockDevices = append(blockDevices, &autoscaling.BlockDeviceMapping{ blockDevices = append(blockDevices, &autoscaling.BlockDeviceMapping{
@ -353,11 +353,11 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
for _, v := range vL { for _, v := range vL {
bd := v.(map[string]interface{}) bd := v.(map[string]interface{})
ebs := &autoscaling.EBS{ ebs := &autoscaling.EBS{
DeleteOnTermination: aws.Boolean(bd["delete_on_termination"].(bool)), DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)),
} }
if v, ok := bd["volume_size"].(int); ok && v != 0 { if v, ok := bd["volume_size"].(int); ok && v != 0 {
ebs.VolumeSize = aws.Long(int64(v)) ebs.VolumeSize = aws.Int64(int64(v))
} }
if v, ok := bd["volume_type"].(string); ok && v != "" { if v, ok := bd["volume_type"].(string); ok && v != "" {
@ -365,7 +365,7 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
} }
if v, ok := bd["iops"].(int); ok && v > 0 { if v, ok := bd["iops"].(int); ok && v > 0 {
ebs.IOPS = aws.Long(int64(v)) ebs.IOPS = aws.Int64(int64(v))
} }
if dn, err := fetchRootDeviceName(d.Get("image_id").(string), ec2conn); err == nil { if dn, err := fetchRootDeviceName(d.Get("image_id").(string), ec2conn); err == nil {

View File

@ -53,7 +53,7 @@ func resourceAwsLBCookieStickinessPolicyCreate(d *schema.ResourceData, meta inte
// Provision the LBStickinessPolicy // Provision the LBStickinessPolicy
lbspOpts := &elb.CreateLBCookieStickinessPolicyInput{ lbspOpts := &elb.CreateLBCookieStickinessPolicyInput{
CookieExpirationPeriod: aws.Long(int64(d.Get("cookie_expiration_period").(int))), CookieExpirationPeriod: aws.Int64(int64(d.Get("cookie_expiration_period").(int))),
LoadBalancerName: aws.String(d.Get("load_balancer").(string)), LoadBalancerName: aws.String(d.Get("load_balancer").(string)),
PolicyName: aws.String(d.Get("name").(string)), PolicyName: aws.String(d.Get("name").(string)),
} }
@ -64,7 +64,7 @@ func resourceAwsLBCookieStickinessPolicyCreate(d *schema.ResourceData, meta inte
setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{
LoadBalancerName: aws.String(d.Get("load_balancer").(string)), LoadBalancerName: aws.String(d.Get("load_balancer").(string)),
LoadBalancerPort: aws.Long(int64(d.Get("lb_port").(int))), LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))),
PolicyNames: []*string{aws.String(d.Get("name").(string))}, PolicyNames: []*string{aws.String(d.Get("name").(string))},
} }
@ -129,7 +129,7 @@ func resourceAwsLBCookieStickinessPolicyDelete(d *schema.ResourceData, meta inte
// policy itself. // policy itself.
setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{
LoadBalancerName: aws.String(d.Get("load_balancer").(string)), LoadBalancerName: aws.String(d.Get("load_balancer").(string)),
LoadBalancerPort: aws.Long(int64(d.Get("lb_port").(int))), LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))),
PolicyNames: []*string{}, PolicyNames: []*string{},
} }

View File

@ -175,7 +175,7 @@ func resourceAwsNetworkInterfaceDetach(oa *schema.Set, meta interface{}, eniId s
old_attachment := oa.List()[0].(map[string]interface{}) old_attachment := oa.List()[0].(map[string]interface{})
detach_request := &ec2.DetachNetworkInterfaceInput{ detach_request := &ec2.DetachNetworkInterfaceInput{
AttachmentID: aws.String(old_attachment["attachment_id"].(string)), AttachmentID: aws.String(old_attachment["attachment_id"].(string)),
Force: aws.Boolean(true), Force: aws.Bool(true),
} }
conn := meta.(*AWSClient).ec2conn conn := meta.(*AWSClient).ec2conn
_, detach_err := conn.DetachNetworkInterface(detach_request) _, detach_err := conn.DetachNetworkInterface(detach_request)
@ -216,7 +216,7 @@ func resourceAwsNetworkInterfaceUpdate(d *schema.ResourceData, meta interface{})
new_attachment := na.(*schema.Set).List()[0].(map[string]interface{}) new_attachment := na.(*schema.Set).List()[0].(map[string]interface{})
di := new_attachment["device_index"].(int) di := new_attachment["device_index"].(int)
attach_request := &ec2.AttachNetworkInterfaceInput{ attach_request := &ec2.AttachNetworkInterfaceInput{
DeviceIndex: aws.Long(int64(di)), DeviceIndex: aws.Int64(int64(di)),
InstanceID: aws.String(new_attachment["instance"].(string)), InstanceID: aws.String(new_attachment["instance"].(string)),
NetworkInterfaceID: aws.String(d.Id()), NetworkInterfaceID: aws.String(d.Id()),
} }
@ -231,7 +231,7 @@ func resourceAwsNetworkInterfaceUpdate(d *schema.ResourceData, meta interface{})
request := &ec2.ModifyNetworkInterfaceAttributeInput{ request := &ec2.ModifyNetworkInterfaceAttributeInput{
NetworkInterfaceID: aws.String(d.Id()), NetworkInterfaceID: aws.String(d.Id()),
SourceDestCheck: &ec2.AttributeBooleanValue{Value: aws.Boolean(d.Get("source_dest_check").(bool))}, SourceDestCheck: &ec2.AttributeBooleanValue{Value: aws.Bool(d.Get("source_dest_check").(bool))},
} }
_, err := conn.ModifyNetworkInterfaceAttribute(request) _, err := conn.ModifyNetworkInterfaceAttribute(request)

View File

@ -68,7 +68,7 @@ func resourceAwsRoute53HealthCheckUpdate(d *schema.ResourceData, meta interface{
} }
if d.HasChange("failure_threshold") { if d.HasChange("failure_threshold") {
updateHealthCheck.FailureThreshold = aws.Long(int64(d.Get("failure_threshold").(int))) updateHealthCheck.FailureThreshold = aws.Int64(int64(d.Get("failure_threshold").(int)))
} }
if d.HasChange("fqdn") { if d.HasChange("fqdn") {
@ -76,7 +76,7 @@ func resourceAwsRoute53HealthCheckUpdate(d *schema.ResourceData, meta interface{
} }
if d.HasChange("port") { if d.HasChange("port") {
updateHealthCheck.Port = aws.Long(int64(d.Get("port").(int))) updateHealthCheck.Port = aws.Int64(int64(d.Get("port").(int)))
} }
if d.HasChange("resource_path") { if d.HasChange("resource_path") {
@ -104,8 +104,8 @@ func resourceAwsRoute53HealthCheckCreate(d *schema.ResourceData, meta interface{
healthConfig := &route53.HealthCheckConfig{ healthConfig := &route53.HealthCheckConfig{
Type: aws.String(d.Get("type").(string)), Type: aws.String(d.Get("type").(string)),
FailureThreshold: aws.Long(int64(d.Get("failure_threshold").(int))), FailureThreshold: aws.Int64(int64(d.Get("failure_threshold").(int))),
RequestInterval: aws.Long(int64(d.Get("request_interval").(int))), RequestInterval: aws.Int64(int64(d.Get("request_interval").(int))),
} }
if v, ok := d.GetOk("fqdn"); ok { if v, ok := d.GetOk("fqdn"); ok {
@ -121,7 +121,7 @@ func resourceAwsRoute53HealthCheckCreate(d *schema.ResourceData, meta interface{
} }
if v, ok := d.GetOk("port"); ok { if v, ok := d.GetOk("port"); ok {
healthConfig.Port = aws.Long(int64(v.(int))) healthConfig.Port = aws.Int64(int64(v.(int)))
} }
if v, ok := d.GetOk("resource_path"); ok { if v, ok := d.GetOk("resource_path"); ok {

View File

@ -367,7 +367,7 @@ func resourceAwsRoute53RecordBuildSet(d *schema.ResourceData, zoneName string) (
} }
if v, ok := d.GetOk("ttl"); ok { if v, ok := d.GetOk("ttl"); ok {
rec.TTL = aws.Long(int64(v.(int))) rec.TTL = aws.Int64(int64(v.(int)))
} }
// Resource records // Resource records
@ -385,7 +385,7 @@ func resourceAwsRoute53RecordBuildSet(d *schema.ResourceData, zoneName string) (
alias := aliases[0].(map[string]interface{}) alias := aliases[0].(map[string]interface{})
rec.AliasTarget = &route53.AliasTarget{ rec.AliasTarget = &route53.AliasTarget{
DNSName: aws.String(alias["name"].(string)), DNSName: aws.String(alias["name"].(string)),
EvaluateTargetHealth: aws.Boolean(alias["evaluate_target_health"].(bool)), EvaluateTargetHealth: aws.Bool(alias["evaluate_target_health"].(bool)),
HostedZoneID: aws.String(alias["zone_id"].(string)), HostedZoneID: aws.String(alias["zone_id"].(string)),
} }
log.Printf("[DEBUG] Creating alias: %#v", alias) log.Printf("[DEBUG] Creating alias: %#v", alias)
@ -408,7 +408,7 @@ func resourceAwsRoute53RecordBuildSet(d *schema.ResourceData, zoneName string) (
} }
if v, ok := d.GetOk("weight"); ok { if v, ok := d.GetOk("weight"); ok {
rec.Weight = aws.Long(int64(v.(int))) rec.Weight = aws.Int64(int64(v.(int)))
} }
if v, ok := d.GetOk("set_identifier"); ok { if v, ok := d.GetOk("set_identifier"); ok {

View File

@ -223,8 +223,8 @@ func resourceAwsSecurityGroupCreate(d *schema.ResourceData, meta interface{}) er
GroupID: createResp.GroupID, GroupID: createResp.GroupID,
IPPermissions: []*ec2.IPPermission{ IPPermissions: []*ec2.IPPermission{
&ec2.IPPermission{ &ec2.IPPermission{
FromPort: aws.Long(int64(0)), FromPort: aws.Int64(int64(0)),
ToPort: aws.Long(int64(0)), ToPort: aws.Int64(int64(0)),
IPRanges: []*ec2.IPRange{ IPRanges: []*ec2.IPRange{
&ec2.IPRange{ &ec2.IPRange{
CIDRIP: aws.String("0.0.0.0/0"), CIDRIP: aws.String("0.0.0.0/0"),

View File

@ -9,7 +9,6 @@ import (
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
@ -98,7 +97,7 @@ func resourceAwsSecurityGroupRuleCreate(d *schema.ResourceData, meta interface{}
switch ruleType { switch ruleType {
case "ingress": case "ingress":
log.Printf("[DEBUG] Authorizing security group %s %s rule: %s", log.Printf("[DEBUG] Authorizing security group %s %s rule: %s",
sg_id, "Ingress", awsutil.StringValue(perm)) sg_id, "Ingress", perm)
req := &ec2.AuthorizeSecurityGroupIngressInput{ req := &ec2.AuthorizeSecurityGroupIngressInput{
GroupID: sg.GroupID, GroupID: sg.GroupID,
@ -213,7 +212,7 @@ func resourceAwsSecurityGroupRuleDelete(d *schema.ResourceData, meta interface{}
switch ruleType { switch ruleType {
case "ingress": case "ingress":
log.Printf("[DEBUG] Revoking rule (%s) from security group %s:\n%s", log.Printf("[DEBUG] Revoking rule (%s) from security group %s:\n%s",
"ingress", sg_id, awsutil.StringValue(perm)) "ingress", sg_id, perm)
req := &ec2.RevokeSecurityGroupIngressInput{ req := &ec2.RevokeSecurityGroupIngressInput{
GroupID: sg.GroupID, GroupID: sg.GroupID,
IPPermissions: []*ec2.IPPermission{perm}, IPPermissions: []*ec2.IPPermission{perm},
@ -330,8 +329,8 @@ func ipPermissionIDHash(ruleType string, ip *ec2.IPPermission) string {
func expandIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup) *ec2.IPPermission { func expandIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup) *ec2.IPPermission {
var perm ec2.IPPermission var perm ec2.IPPermission
perm.FromPort = aws.Long(int64(d.Get("from_port").(int))) perm.FromPort = aws.Int64(int64(d.Get("from_port").(int)))
perm.ToPort = aws.Long(int64(d.Get("to_port").(int))) perm.ToPort = aws.Int64(int64(d.Get("to_port").(int)))
perm.IPProtocol = aws.String(d.Get("protocol").(string)) perm.IPProtocol = aws.String(d.Get("protocol").(string))
// build a group map that behaves like a set // build a group map that behaves like a set

View File

@ -56,8 +56,8 @@ func migrateExpandIPPerm(attrs map[string]string) (*ec2.IPPermission, error) {
return nil, fmt.Errorf("Error converting from_port in Security Group migration") return nil, fmt.Errorf("Error converting from_port in Security Group migration")
} }
perm.ToPort = aws.Long(int64(tp)) perm.ToPort = aws.Int64(int64(tp))
perm.FromPort = aws.Long(int64(fp)) perm.FromPort = aws.Int64(int64(fp))
perm.IPProtocol = aws.String(attrs["protocol"]) perm.IPProtocol = aws.String(attrs["protocol"])
groups := make(map[string]bool) groups := make(map[string]bool)

View File

@ -7,7 +7,6 @@ import (
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
@ -16,8 +15,8 @@ import (
func TestIpPermissionIDHash(t *testing.T) { func TestIpPermissionIDHash(t *testing.T) {
simple := &ec2.IPPermission{ simple := &ec2.IPPermission{
IPProtocol: aws.String("tcp"), IPProtocol: aws.String("tcp"),
FromPort: aws.Long(int64(80)), FromPort: aws.Int64(int64(80)),
ToPort: aws.Long(int64(8000)), ToPort: aws.Int64(int64(8000)),
IPRanges: []*ec2.IPRange{ IPRanges: []*ec2.IPRange{
&ec2.IPRange{ &ec2.IPRange{
CIDRIP: aws.String("10.0.0.0/8"), CIDRIP: aws.String("10.0.0.0/8"),
@ -27,8 +26,8 @@ func TestIpPermissionIDHash(t *testing.T) {
egress := &ec2.IPPermission{ egress := &ec2.IPPermission{
IPProtocol: aws.String("tcp"), IPProtocol: aws.String("tcp"),
FromPort: aws.Long(int64(80)), FromPort: aws.Int64(int64(80)),
ToPort: aws.Long(int64(8000)), ToPort: aws.Int64(int64(8000)),
IPRanges: []*ec2.IPRange{ IPRanges: []*ec2.IPRange{
&ec2.IPRange{ &ec2.IPRange{
CIDRIP: aws.String("10.0.0.0/8"), CIDRIP: aws.String("10.0.0.0/8"),
@ -47,8 +46,8 @@ func TestIpPermissionIDHash(t *testing.T) {
vpc_security_group_source := &ec2.IPPermission{ vpc_security_group_source := &ec2.IPPermission{
IPProtocol: aws.String("tcp"), IPProtocol: aws.String("tcp"),
FromPort: aws.Long(int64(80)), FromPort: aws.Int64(int64(80)),
ToPort: aws.Long(int64(8000)), ToPort: aws.Int64(int64(8000)),
UserIDGroupPairs: []*ec2.UserIDGroupPair{ UserIDGroupPairs: []*ec2.UserIDGroupPair{
&ec2.UserIDGroupPair{ &ec2.UserIDGroupPair{
UserID: aws.String("987654321"), UserID: aws.String("987654321"),
@ -67,8 +66,8 @@ func TestIpPermissionIDHash(t *testing.T) {
security_group_source := &ec2.IPPermission{ security_group_source := &ec2.IPPermission{
IPProtocol: aws.String("tcp"), IPProtocol: aws.String("tcp"),
FromPort: aws.Long(int64(80)), FromPort: aws.Int64(int64(80)),
ToPort: aws.Long(int64(8000)), ToPort: aws.Int64(int64(8000)),
UserIDGroupPairs: []*ec2.UserIDGroupPair{ UserIDGroupPairs: []*ec2.UserIDGroupPair{
&ec2.UserIDGroupPair{ &ec2.UserIDGroupPair{
UserID: aws.String("987654321"), UserID: aws.String("987654321"),
@ -101,7 +100,7 @@ func TestIpPermissionIDHash(t *testing.T) {
for _, tc := range cases { for _, tc := range cases {
actual := ipPermissionIDHash(tc.Type, tc.Input) actual := ipPermissionIDHash(tc.Type, tc.Input)
if actual != tc.Output { if actual != tc.Output {
t.Errorf("input: %s - %s\noutput: %s", tc.Type, awsutil.StringValue(tc.Input), actual) t.Errorf("input: %s - %s\noutput: %s", tc.Type, tc.Input, actual)
} }
} }
} }
@ -323,8 +322,8 @@ func testAccCheckAWSSecurityGroupRuleExists(n string, group *ec2.SecurityGroup)
func testAccCheckAWSSecurityGroupRuleAttributes(group *ec2.SecurityGroup, ruleType string) resource.TestCheckFunc { func testAccCheckAWSSecurityGroupRuleAttributes(group *ec2.SecurityGroup, ruleType string) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
p := &ec2.IPPermission{ p := &ec2.IPPermission{
FromPort: aws.Long(80), FromPort: aws.Int64(80),
ToPort: aws.Long(8000), ToPort: aws.Int64(8000),
IPProtocol: aws.String("tcp"), IPProtocol: aws.String("tcp"),
IPRanges: []*ec2.IPRange{&ec2.IPRange{CIDRIP: aws.String("10.0.0.0/8")}}, IPRanges: []*ec2.IPRange{&ec2.IPRange{CIDRIP: aws.String("10.0.0.0/8")}},
} }

View File

@ -356,8 +356,8 @@ func testAccCheckAWSSecurityGroupExists(n string, group *ec2.SecurityGroup) reso
func testAccCheckAWSSecurityGroupAttributes(group *ec2.SecurityGroup) resource.TestCheckFunc { func testAccCheckAWSSecurityGroupAttributes(group *ec2.SecurityGroup) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
p := &ec2.IPPermission{ p := &ec2.IPPermission{
FromPort: aws.Long(80), FromPort: aws.Int64(80),
ToPort: aws.Long(8000), ToPort: aws.Int64(8000),
IPProtocol: aws.String("tcp"), IPProtocol: aws.String("tcp"),
IPRanges: []*ec2.IPRange{&ec2.IPRange{CIDRIP: aws.String("10.0.0.0/8")}}, IPRanges: []*ec2.IPRange{&ec2.IPRange{CIDRIP: aws.String("10.0.0.0/8")}},
} }
@ -449,14 +449,14 @@ func testAccCheckAWSSecurityGroupAttributesChanged(group *ec2.SecurityGroup) res
return func(s *terraform.State) error { return func(s *terraform.State) error {
p := []*ec2.IPPermission{ p := []*ec2.IPPermission{
&ec2.IPPermission{ &ec2.IPPermission{
FromPort: aws.Long(80), FromPort: aws.Int64(80),
ToPort: aws.Long(9000), ToPort: aws.Int64(9000),
IPProtocol: aws.String("tcp"), IPProtocol: aws.String("tcp"),
IPRanges: []*ec2.IPRange{&ec2.IPRange{CIDRIP: aws.String("10.0.0.0/8")}}, IPRanges: []*ec2.IPRange{&ec2.IPRange{CIDRIP: aws.String("10.0.0.0/8")}},
}, },
&ec2.IPPermission{ &ec2.IPPermission{
FromPort: aws.Long(80), FromPort: aws.Int64(80),
ToPort: aws.Long(8000), ToPort: aws.Int64(8000),
IPProtocol: aws.String("tcp"), IPProtocol: aws.String("tcp"),
IPRanges: []*ec2.IPRange{ IPRanges: []*ec2.IPRange{
&ec2.IPRange{ &ec2.IPRange{

View File

@ -7,7 +7,6 @@ import (
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
@ -78,7 +77,7 @@ func resourceAwsSpotInstanceRequestCreate(d *schema.ResourceData, meta interface
// Though the AWS API supports creating spot instance requests for multiple // Though the AWS API supports creating spot instance requests for multiple
// instances, for TF purposes we fix this to one instance per request. // instances, for TF purposes we fix this to one instance per request.
// Users can get equivalent behavior out of TF's "count" meta-parameter. // Users can get equivalent behavior out of TF's "count" meta-parameter.
InstanceCount: aws.Long(1), InstanceCount: aws.Int64(1),
LaunchSpecification: &ec2.RequestSpotLaunchSpecification{ LaunchSpecification: &ec2.RequestSpotLaunchSpecification{
BlockDeviceMappings: instanceOpts.BlockDeviceMappings, BlockDeviceMappings: instanceOpts.BlockDeviceMappings,
@ -95,14 +94,14 @@ func resourceAwsSpotInstanceRequestCreate(d *schema.ResourceData, meta interface
} }
// Make the spot instance request // Make the spot instance request
log.Printf("[DEBUG] Requesting spot bid opts: %s", awsutil.StringValue(spotOpts)) log.Printf("[DEBUG] Requesting spot bid opts: %s", spotOpts)
resp, err := conn.RequestSpotInstances(spotOpts) resp, err := conn.RequestSpotInstances(spotOpts)
if err != nil { if err != nil {
return fmt.Errorf("Error requesting spot instances: %s", err) return fmt.Errorf("Error requesting spot instances: %s", err)
} }
if len(resp.SpotInstanceRequests) != 1 { if len(resp.SpotInstanceRequests) != 1 {
return fmt.Errorf( return fmt.Errorf(
"Expected response with length 1, got: %s", awsutil.StringValue(resp)) "Expected response with length 1, got: %s", resp)
} }
sir := *resp.SpotInstanceRequests[0] sir := *resp.SpotInstanceRequests[0]
@ -123,7 +122,7 @@ func resourceAwsSpotInstanceRequestCreate(d *schema.ResourceData, meta interface
_, err = spotStateConf.WaitForState() _, err = spotStateConf.WaitForState()
if err != nil { if err != nil {
return fmt.Errorf("Error while waiting for spot request (%s) to resolve: %s", awsutil.StringValue(sir), err) return fmt.Errorf("Error while waiting for spot request (%s) to resolve: %s", sir, err)
} }
} }

View File

@ -137,7 +137,7 @@ func resourceAwsSubnetUpdate(d *schema.ResourceData, meta interface{}) error {
modifyOpts := &ec2.ModifySubnetAttributeInput{ modifyOpts := &ec2.ModifySubnetAttributeInput{
SubnetID: aws.String(d.Id()), SubnetID: aws.String(d.Id()),
MapPublicIPOnLaunch: &ec2.AttributeBooleanValue{ MapPublicIPOnLaunch: &ec2.AttributeBooleanValue{
Value: aws.Boolean(d.Get("map_public_ip_on_launch").(bool)), Value: aws.Bool(d.Get("map_public_ip_on_launch").(bool)),
}, },
} }

View File

@ -157,7 +157,7 @@ func resourceAwsVolumeAttachmentDelete(d *schema.ResourceData, meta interface{})
Device: aws.String(d.Get("device_name").(string)), Device: aws.String(d.Get("device_name").(string)),
InstanceID: aws.String(iID), InstanceID: aws.String(iID),
VolumeID: aws.String(vID), VolumeID: aws.String(vID),
Force: aws.Boolean(d.Get("force_detach").(bool)), Force: aws.Bool(d.Get("force_detach").(bool)),
} }
_, err := conn.DetachVolume(opts) _, err := conn.DetachVolume(opts)

View File

@ -143,7 +143,7 @@ func resourceAwsVpnConnectionCreate(d *schema.ResourceData, meta interface{}) er
conn := meta.(*AWSClient).ec2conn conn := meta.(*AWSClient).ec2conn
connectOpts := &ec2.VPNConnectionOptionsSpecification{ connectOpts := &ec2.VPNConnectionOptionsSpecification{
StaticRoutesOnly: aws.Boolean(d.Get("static_routes_only").(bool)), StaticRoutesOnly: aws.Bool(d.Get("static_routes_only").(bool)),
} }
createOpts := &ec2.CreateVPNConnectionInput{ createOpts := &ec2.CreateVPNConnectionInput{

View File

@ -94,7 +94,7 @@ func expandEcsLoadBalancers(configured []interface{}) []*ecs.LoadBalancer {
l := &ecs.LoadBalancer{ l := &ecs.LoadBalancer{
ContainerName: aws.String(data["container_name"].(string)), ContainerName: aws.String(data["container_name"].(string)),
ContainerPort: aws.Long(int64(data["container_port"].(int))), ContainerPort: aws.Int64(int64(data["container_port"].(int))),
LoadBalancerName: aws.String(data["elb_name"].(string)), LoadBalancerName: aws.String(data["elb_name"].(string)),
} }
@ -117,8 +117,8 @@ func expandIPPerms(
var perm ec2.IPPermission var perm ec2.IPPermission
m := mRaw.(map[string]interface{}) m := mRaw.(map[string]interface{})
perm.FromPort = aws.Long(int64(m["from_port"].(int))) perm.FromPort = aws.Int64(int64(m["from_port"].(int)))
perm.ToPort = aws.Long(int64(m["to_port"].(int))) perm.ToPort = aws.Int64(int64(m["to_port"].(int)))
perm.IPProtocol = aws.String(m["protocol"].(string)) perm.IPProtocol = aws.String(m["protocol"].(string))
// When protocol is "-1", AWS won't store any ports for the // When protocol is "-1", AWS won't store any ports for the
@ -405,7 +405,7 @@ func expandPrivateIPAddesses(ips []interface{}) []*ec2.PrivateIPAddressSpecifica
PrivateIPAddress: aws.String(v.(string)), PrivateIPAddress: aws.String(v.(string)),
} }
new_private_ip.Primary = aws.Boolean(i == 0) new_private_ip.Primary = aws.Bool(i == 0)
dtos = append(dtos, new_private_ip) dtos = append(dtos, new_private_ip)
} }

View File

@ -70,8 +70,8 @@ func TestexpandIPPerms(t *testing.T) {
expected := []ec2.IPPermission{ expected := []ec2.IPPermission{
ec2.IPPermission{ ec2.IPPermission{
IPProtocol: aws.String("icmp"), IPProtocol: aws.String("icmp"),
FromPort: aws.Long(int64(1)), FromPort: aws.Int64(int64(1)),
ToPort: aws.Long(int64(-1)), ToPort: aws.Int64(int64(-1)),
IPRanges: []*ec2.IPRange{&ec2.IPRange{CIDRIP: aws.String("0.0.0.0/0")}}, IPRanges: []*ec2.IPRange{&ec2.IPRange{CIDRIP: aws.String("0.0.0.0/0")}},
UserIDGroupPairs: []*ec2.UserIDGroupPair{ UserIDGroupPairs: []*ec2.UserIDGroupPair{
&ec2.UserIDGroupPair{ &ec2.UserIDGroupPair{
@ -85,8 +85,8 @@ func TestexpandIPPerms(t *testing.T) {
}, },
ec2.IPPermission{ ec2.IPPermission{
IPProtocol: aws.String("icmp"), IPProtocol: aws.String("icmp"),
FromPort: aws.Long(int64(1)), FromPort: aws.Int64(int64(1)),
ToPort: aws.Long(int64(-1)), ToPort: aws.Int64(int64(-1)),
UserIDGroupPairs: []*ec2.UserIDGroupPair{ UserIDGroupPairs: []*ec2.UserIDGroupPair{
&ec2.UserIDGroupPair{ &ec2.UserIDGroupPair{
UserID: aws.String("foo"), UserID: aws.String("foo"),
@ -149,8 +149,8 @@ func TestExpandIPPerms_NegOneProtocol(t *testing.T) {
expected := []ec2.IPPermission{ expected := []ec2.IPPermission{
ec2.IPPermission{ ec2.IPPermission{
IPProtocol: aws.String("-1"), IPProtocol: aws.String("-1"),
FromPort: aws.Long(int64(0)), FromPort: aws.Int64(int64(0)),
ToPort: aws.Long(int64(0)), ToPort: aws.Int64(int64(0)),
IPRanges: []*ec2.IPRange{&ec2.IPRange{CIDRIP: aws.String("0.0.0.0/0")}}, IPRanges: []*ec2.IPRange{&ec2.IPRange{CIDRIP: aws.String("0.0.0.0/0")}},
UserIDGroupPairs: []*ec2.UserIDGroupPair{ UserIDGroupPairs: []*ec2.UserIDGroupPair{
&ec2.UserIDGroupPair{ &ec2.UserIDGroupPair{
@ -245,8 +245,8 @@ func TestExpandIPPerms_nonVPC(t *testing.T) {
expected := []ec2.IPPermission{ expected := []ec2.IPPermission{
ec2.IPPermission{ ec2.IPPermission{
IPProtocol: aws.String("icmp"), IPProtocol: aws.String("icmp"),
FromPort: aws.Long(int64(1)), FromPort: aws.Int64(int64(1)),
ToPort: aws.Long(int64(-1)), ToPort: aws.Int64(int64(-1)),
IPRanges: []*ec2.IPRange{&ec2.IPRange{CIDRIP: aws.String("0.0.0.0/0")}}, IPRanges: []*ec2.IPRange{&ec2.IPRange{CIDRIP: aws.String("0.0.0.0/0")}},
UserIDGroupPairs: []*ec2.UserIDGroupPair{ UserIDGroupPairs: []*ec2.UserIDGroupPair{
&ec2.UserIDGroupPair{ &ec2.UserIDGroupPair{
@ -259,8 +259,8 @@ func TestExpandIPPerms_nonVPC(t *testing.T) {
}, },
ec2.IPPermission{ ec2.IPPermission{
IPProtocol: aws.String("icmp"), IPProtocol: aws.String("icmp"),
FromPort: aws.Long(int64(1)), FromPort: aws.Int64(int64(1)),
ToPort: aws.Long(int64(-1)), ToPort: aws.Int64(int64(-1)),
UserIDGroupPairs: []*ec2.UserIDGroupPair{ UserIDGroupPairs: []*ec2.UserIDGroupPair{
&ec2.UserIDGroupPair{ &ec2.UserIDGroupPair{
GroupName: aws.String("foo"), GroupName: aws.String("foo"),
@ -302,8 +302,8 @@ func TestexpandListeners(t *testing.T) {
} }
expected := &elb.Listener{ expected := &elb.Listener{
InstancePort: aws.Long(int64(8000)), InstancePort: aws.Int64(int64(8000)),
LoadBalancerPort: aws.Long(int64(80)), LoadBalancerPort: aws.Int64(int64(80)),
InstanceProtocol: aws.String("http"), InstanceProtocol: aws.String("http"),
Protocol: aws.String("http"), Protocol: aws.String("http"),
} }
@ -324,11 +324,11 @@ func TestflattenHealthCheck(t *testing.T) {
}{ }{
{ {
Input: &elb.HealthCheck{ Input: &elb.HealthCheck{
UnhealthyThreshold: aws.Long(int64(10)), UnhealthyThreshold: aws.Int64(int64(10)),
HealthyThreshold: aws.Long(int64(10)), HealthyThreshold: aws.Int64(int64(10)),
Target: aws.String("HTTP:80/"), Target: aws.String("HTTP:80/"),
Timeout: aws.Long(int64(30)), Timeout: aws.Int64(int64(30)),
Interval: aws.Long(int64(30)), Interval: aws.Int64(int64(30)),
}, },
Output: []map[string]interface{}{ Output: []map[string]interface{}{
map[string]interface{}{ map[string]interface{}{
@ -570,7 +570,7 @@ func TestexpandPrivateIPAddesses(t *testing.T) {
func TestflattenAttachment(t *testing.T) { func TestflattenAttachment(t *testing.T) {
expanded := &ec2.NetworkInterfaceAttachment{ expanded := &ec2.NetworkInterfaceAttachment{
InstanceID: aws.String("i-00001"), InstanceID: aws.String("i-00001"),
DeviceIndex: aws.Long(int64(1)), DeviceIndex: aws.Int64(int64(1)),
AttachmentID: aws.String("at-002"), AttachmentID: aws.String("at-002"),
} }

View File

@ -4,7 +4,6 @@ import (
"log" "log"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
) )
@ -39,7 +38,7 @@ func setTags(conn *ec2.EC2, d *schema.ResourceData) error {
} }
} }
if len(create) > 0 { if len(create) > 0 {
log.Printf("[DEBUG] Creating tags: %s for %s", awsutil.StringValue(create), d.Id()) log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id())
_, err := conn.CreateTags(&ec2.CreateTagsInput{ _, err := conn.CreateTags(&ec2.CreateTagsInput{
Resources: []*string{aws.String(d.Id())}, Resources: []*string{aws.String(d.Id())},
Tags: create, Tags: create,

View File

@ -67,7 +67,7 @@ func s3Factory(conf map[string]string) (Client, error) {
awsConfig := &aws.Config{ awsConfig := &aws.Config{
Credentials: credentialsProvider, Credentials: credentialsProvider,
Region: regionName, Region: aws.String(regionName),
} }
nativeClient := s3.New(awsConfig) nativeClient := s3.New(awsConfig)

View File

@ -43,7 +43,7 @@ func TestS3Factory(t *testing.T) {
s3Client := client.(*S3Client) s3Client := client.(*S3Client)
if s3Client.nativeClient.Config.Region != "us-west-1" { if *s3Client.nativeClient.Config.Region != "us-west-1" {
t.Fatalf("Incorrect region was populated") t.Fatalf("Incorrect region was populated")
} }
if s3Client.bucketName != "foo" { if s3Client.bucketName != "foo" {