Merge branch 'master' into f-aws-rds-tags

* master: (66 commits)
  provider/aws: Fix dependency violation when deleting Internet Gateways
  command/remote-config: failing tests
  update CHANGELOG
  command/remote-config: do a pull with `terraform remote config`
  command/remote-{pull,push}: colorize and show success output
  command/remote-config: lowercase the type so that Atlas works, for example
  command/remote-config: show flag parse errors
  command/remote-config: remove weird error case that shows no error message
  command: when setting up state, only write back if local is newer
  minor code cleanups to get acceptance tests passing
  update CHANGELOG
  providers/digitalocean: add dot in GET response
  providers/digitalocean: force fqdn in dns rr value
  update CHANGELOG
  small code cleanup
  Add proper reading/updating of tags for S3
  provider/aws: Add tags to S3
  Documentation for ASG Tags added
  Tags support added for AWS ASG
  command/output: don't panic if no root module in state [GH-1263]
  ...
This commit is contained in:
Clint Shryock 2015-03-27 13:49:05 -05:00
commit 38c386487b
90 changed files with 2617 additions and 215 deletions

View File

@ -40,6 +40,9 @@ IMPROVEMENTS:
like refresh. like refresh.
* core: Autoload `terraform.tfvars.json` as well as `terraform.tfvars` [GH-1030] * core: Autoload `terraform.tfvars.json` as well as `terraform.tfvars` [GH-1030]
* core: `.tf` files that start with a period are now ignored. [GH-1227] * core: `.tf` files that start with a period are now ignored. [GH-1227]
* command/remote-config: After enabling remote state, a `pull` is
automatically done initially.
* providers/google: Add `size` option to disk blocks for instances. [GH-1284]
BUG FIXES: BUG FIXES:
@ -58,6 +61,8 @@ BUG FIXES:
* providers/aws: Longer wait times for route53 records (30 mins). [GH-1164] * providers/aws: Longer wait times for route53 records (30 mins). [GH-1164]
* providers/digitalocean: Waits until droplet is ready to be destroyed [GH-1057] * providers/digitalocean: Waits until droplet is ready to be destroyed [GH-1057]
* providers/digitalocean: More lenient about 404's while waiting [GH-1062] * providers/digitalocean: More lenient about 404's while waiting [GH-1062]
* providers/digitalocean: FQDN for domain records in CNAME, MX, NS, etc.
Also fixes invalid updates in plans. [GH-863]
* providers/google: Network data in state was not being stored. [GH-1095] * providers/google: Network data in state was not being stored. [GH-1095]
PLUGIN CHANGES: PLUGIN CHANGES:

View File

@ -0,0 +1,170 @@
package aws
import (
"bytes"
"fmt"
"log"
"github.com/hashicorp/aws-sdk-go/aws"
"github.com/hashicorp/aws-sdk-go/gen/autoscaling"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
)
// tagsSchema returns the schema to use for tags.
func autoscalingTagsSchema() *schema.Schema {
return &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"key": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"value": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"propagate_at_launch": &schema.Schema{
Type: schema.TypeBool,
Required: true,
},
},
},
Set: autoscalingTagsToHash,
}
}
func autoscalingTagsToHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-", m["key"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["value"].(string)))
buf.WriteString(fmt.Sprintf("%t-", m["propagate_at_launch"].(bool)))
return hashcode.String(buf.String())
}
// setTags is a helper to set the tags for a resource. It expects the
// tags field to be named "tag"
func setAutoscalingTags(conn *autoscaling.AutoScaling, d *schema.ResourceData) error {
if d.HasChange("tag") {
oraw, nraw := d.GetChange("tag")
o := setToMapByKey(oraw.(*schema.Set), "key")
n := setToMapByKey(nraw.(*schema.Set), "key")
resourceID := d.Get("name").(string)
c, r := diffAutoscalingTags(
autoscalingTagsFromMap(o, resourceID),
autoscalingTagsFromMap(n, resourceID),
resourceID)
create := autoscaling.CreateOrUpdateTagsType{
Tags: c,
}
remove := autoscaling.DeleteTagsType{
Tags: r,
}
// Set tags
if len(r) > 0 {
log.Printf("[DEBUG] Removing autoscaling tags: %#v", r)
if err := conn.DeleteTags(&remove); err != nil {
return err
}
}
if len(c) > 0 {
log.Printf("[DEBUG] Creating autoscaling tags: %#v", c)
if err := conn.CreateOrUpdateTags(&create); err != nil {
return err
}
}
}
return nil
}
// diffTags takes our tags locally and the ones remotely and returns
// the set of tags that must be created, and the set of tags that must
// be destroyed.
func diffAutoscalingTags(oldTags, newTags []autoscaling.Tag, resourceID string) ([]autoscaling.Tag, []autoscaling.Tag) {
// First, we're creating everything we have
create := make(map[string]interface{})
for _, t := range newTags {
tag := map[string]interface{}{
"value": *t.Value,
"propagate_at_launch": *t.PropagateAtLaunch,
}
create[*t.Key] = tag
}
// Build the list of what to remove
var remove []autoscaling.Tag
for _, t := range oldTags {
old, ok := create[*t.Key].(map[string]interface{})
if !ok || old["value"] != *t.Value || old["propagate_at_launch"] != *t.PropagateAtLaunch {
// Delete it!
remove = append(remove, t)
}
}
return autoscalingTagsFromMap(create, resourceID), remove
}
// tagsFromMap returns the tags for the given map of data.
func autoscalingTagsFromMap(m map[string]interface{}, resourceID string) []autoscaling.Tag {
result := make([]autoscaling.Tag, 0, len(m))
for k, v := range m {
attr := v.(map[string]interface{})
result = append(result, autoscaling.Tag{
Key: aws.String(k),
Value: aws.String(attr["value"].(string)),
PropagateAtLaunch: aws.Boolean(attr["propagate_at_launch"].(bool)),
ResourceID: aws.String(resourceID),
ResourceType: aws.String("auto-scaling-group"),
})
}
return result
}
// autoscalingTagsToMap turns the list of tags into a map.
func autoscalingTagsToMap(ts []autoscaling.Tag) map[string]interface{} {
tags := make(map[string]interface{})
for _, t := range ts {
tag := map[string]interface{}{
"value": *t.Value,
"propagate_at_launch": *t.PropagateAtLaunch,
}
tags[*t.Key] = tag
}
return tags
}
// autoscalingTagDescriptionsToMap turns the list of tags into a map.
func autoscalingTagDescriptionsToMap(ts []autoscaling.TagDescription) map[string]map[string]interface{} {
tags := make(map[string]map[string]interface{})
for _, t := range ts {
tag := map[string]interface{}{
"value": *t.Value,
"propagate_at_launch": *t.PropagateAtLaunch,
}
tags[*t.Key] = tag
}
return tags
}
func setToMapByKey(s *schema.Set, key string) map[string]interface{} {
result := make(map[string]interface{})
for _, rawData := range s.List() {
data := rawData.(map[string]interface{})
result[data[key].(string)] = data
}
return result
}

View File

@ -0,0 +1,122 @@
package aws
import (
"fmt"
"reflect"
"testing"
"github.com/hashicorp/aws-sdk-go/gen/autoscaling"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestDiffAutoscalingTags(t *testing.T) {
cases := []struct {
Old, New map[string]interface{}
Create, Remove map[string]interface{}
}{
// Basic add/remove
{
Old: map[string]interface{}{
"Name": map[string]interface{}{
"value": "bar",
"propagate_at_launch": true,
},
},
New: map[string]interface{}{
"DifferentTag": map[string]interface{}{
"value": "baz",
"propagate_at_launch": true,
},
},
Create: map[string]interface{}{
"DifferentTag": map[string]interface{}{
"value": "baz",
"propagate_at_launch": true,
},
},
Remove: map[string]interface{}{
"Name": map[string]interface{}{
"value": "bar",
"propagate_at_launch": true,
},
},
},
// Modify
{
Old: map[string]interface{}{
"Name": map[string]interface{}{
"value": "bar",
"propagate_at_launch": true,
},
},
New: map[string]interface{}{
"Name": map[string]interface{}{
"value": "baz",
"propagate_at_launch": false,
},
},
Create: map[string]interface{}{
"Name": map[string]interface{}{
"value": "baz",
"propagate_at_launch": false,
},
},
Remove: map[string]interface{}{
"Name": map[string]interface{}{
"value": "bar",
"propagate_at_launch": true,
},
},
},
}
var resourceID = "sample"
for i, tc := range cases {
awsTagsOld := autoscalingTagsFromMap(tc.Old, resourceID)
awsTagsNew := autoscalingTagsFromMap(tc.New, resourceID)
c, r := diffAutoscalingTags(awsTagsOld, awsTagsNew, resourceID)
cm := autoscalingTagsToMap(c)
rm := autoscalingTagsToMap(r)
if !reflect.DeepEqual(cm, tc.Create) {
t.Fatalf("%d: bad create: \n%#v\n%#v", i, cm, tc.Create)
}
if !reflect.DeepEqual(rm, tc.Remove) {
t.Fatalf("%d: bad remove: \n%#v\n%#v", i, rm, tc.Remove)
}
}
}
// testAccCheckTags can be used to check the tags on a resource.
func testAccCheckAutoscalingTags(
ts *[]autoscaling.TagDescription, key string, expected map[string]interface{}) resource.TestCheckFunc {
return func(s *terraform.State) error {
m := autoscalingTagDescriptionsToMap(*ts)
v, ok := m[key]
if !ok {
return fmt.Errorf("Missing tag: %s", key)
}
if v["value"] != expected["value"].(string) ||
v["propagate_at_launch"] != expected["propagate_at_launch"].(bool) {
return fmt.Errorf("%s: bad value: %s", key, v)
}
return nil
}
}
func testAccCheckAutoscalingTagNotExists(ts *[]autoscaling.TagDescription, key string) resource.TestCheckFunc {
return func(s *terraform.State) error {
m := autoscalingTagDescriptionsToMap(*ts)
if _, ok := m[key]; ok {
return fmt.Errorf("Tag exists when it should not: %s", key)
}
return nil
}
}

View File

@ -118,6 +118,8 @@ func resourceAwsAutoscalingGroup() *schema.Resource {
return hashcode.String(v.(string)) return hashcode.String(v.(string))
}, },
}, },
"tag": autoscalingTagsSchema(),
}, },
} }
} }
@ -133,6 +135,11 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{})
autoScalingGroupOpts.AvailabilityZones = expandStringList( autoScalingGroupOpts.AvailabilityZones = expandStringList(
d.Get("availability_zones").(*schema.Set).List()) d.Get("availability_zones").(*schema.Set).List())
if v, ok := d.GetOk("tag"); ok {
autoScalingGroupOpts.Tags = autoscalingTagsFromMap(
setToMapByKey(v.(*schema.Set), "key"), d.Get("name").(string))
}
if v, ok := d.GetOk("default_cooldown"); ok { if v, ok := d.GetOk("default_cooldown"); ok {
autoScalingGroupOpts.DefaultCooldown = aws.Integer(v.(int)) autoScalingGroupOpts.DefaultCooldown = aws.Integer(v.(int))
} }
@ -186,15 +193,16 @@ func resourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) e
} }
d.Set("availability_zones", g.AvailabilityZones) d.Set("availability_zones", g.AvailabilityZones)
d.Set("default_cooldown", *g.DefaultCooldown) d.Set("default_cooldown", g.DefaultCooldown)
d.Set("desired_capacity", *g.DesiredCapacity) d.Set("desired_capacity", g.DesiredCapacity)
d.Set("health_check_grace_period", *g.HealthCheckGracePeriod) d.Set("health_check_grace_period", g.HealthCheckGracePeriod)
d.Set("health_check_type", *g.HealthCheckType) d.Set("health_check_type", g.HealthCheckType)
d.Set("launch_configuration", *g.LaunchConfigurationName) d.Set("launch_configuration", g.LaunchConfigurationName)
d.Set("load_balancers", g.LoadBalancerNames) d.Set("load_balancers", g.LoadBalancerNames)
d.Set("min_size", *g.MinSize) d.Set("min_size", g.MinSize)
d.Set("max_size", *g.MaxSize) d.Set("max_size", g.MaxSize)
d.Set("name", *g.AutoScalingGroupName) d.Set("name", g.AutoScalingGroupName)
d.Set("tag", g.Tags)
d.Set("vpc_zone_identifier", strings.Split(*g.VPCZoneIdentifier, ",")) d.Set("vpc_zone_identifier", strings.Split(*g.VPCZoneIdentifier, ","))
d.Set("termination_policies", g.TerminationPolicies) d.Set("termination_policies", g.TerminationPolicies)
@ -224,6 +232,12 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{})
opts.MaxSize = aws.Integer(d.Get("max_size").(int)) opts.MaxSize = aws.Integer(d.Get("max_size").(int))
} }
if err := setAutoscalingTags(autoscalingconn, d); err != nil {
return err
} else {
d.SetPartial("tag")
}
log.Printf("[DEBUG] AutoScaling Group update configuration: %#v", opts) log.Printf("[DEBUG] AutoScaling Group update configuration: %#v", opts)
err := autoscalingconn.UpdateAutoScalingGroup(&opts) err := autoscalingconn.UpdateAutoScalingGroup(&opts)
if err != nil { if err != nil {

View File

@ -2,6 +2,7 @@ package aws
import ( import (
"fmt" "fmt"
"reflect"
"testing" "testing"
"github.com/hashicorp/aws-sdk-go/aws" "github.com/hashicorp/aws-sdk-go/aws"
@ -25,7 +26,7 @@ func TestAccAWSAutoScalingGroup_basic(t *testing.T) {
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
testAccCheckAWSAutoScalingGroupAttributes(&group), testAccCheckAWSAutoScalingGroupAttributes(&group),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "availability_zones.2487133097", "us-west-2a"), "aws_autoscaling_group.bar", "availability_zones.1807834199", "us-west-2a"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "name", "foobar3-terraform-test"), "aws_autoscaling_group.bar", "name", "foobar3-terraform-test"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
@ -53,6 +54,44 @@ func TestAccAWSAutoScalingGroup_basic(t *testing.T) {
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "desired_capacity", "5"), "aws_autoscaling_group.bar", "desired_capacity", "5"),
testLaunchConfigurationName("aws_autoscaling_group.bar", &lc), testLaunchConfigurationName("aws_autoscaling_group.bar", &lc),
testAccCheckAutoscalingTags(&group.Tags, "Bar", map[string]interface{}{
"value": "bar-foo",
"propagate_at_launch": true,
}),
),
},
},
})
}
func TestAccAWSAutoScalingGroup_tags(t *testing.T) {
var group autoscaling.AutoScalingGroup
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSAutoScalingGroupConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
testAccCheckAutoscalingTags(&group.Tags, "Foo", map[string]interface{}{
"value": "foo-bar",
"propagate_at_launch": true,
}),
),
},
resource.TestStep{
Config: testAccAWSAutoScalingGroupConfigUpdate,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
testAccCheckAutoscalingTagNotExists(&group.Tags, "Foo"),
testAccCheckAutoscalingTags(&group.Tags, "Bar", map[string]interface{}{
"value": "bar-foo",
"propagate_at_launch": true,
}),
), ),
}, },
}, },
@ -145,6 +184,21 @@ func testAccCheckAWSAutoScalingGroupAttributes(group *autoscaling.AutoScalingGro
return fmt.Errorf("Bad launch configuration name: %s", *group.LaunchConfigurationName) return fmt.Errorf("Bad launch configuration name: %s", *group.LaunchConfigurationName)
} }
t := autoscaling.TagDescription{
Key: aws.String("Foo"),
Value: aws.String("foo-bar"),
PropagateAtLaunch: aws.Boolean(true),
ResourceType: aws.String("auto-scaling-group"),
ResourceID: group.AutoScalingGroupName,
}
if !reflect.DeepEqual(group.Tags[0], t) {
return fmt.Errorf(
"Got:\n\n%#v\n\nExpected:\n\n%#v\n",
group.Tags[0],
t)
}
return nil return nil
} }
} }
@ -226,6 +280,12 @@ resource "aws_autoscaling_group" "bar" {
termination_policies = ["OldestInstance"] termination_policies = ["OldestInstance"]
launch_configuration = "${aws_launch_configuration.foobar.name}" launch_configuration = "${aws_launch_configuration.foobar.name}"
tag {
key = "Foo"
value = "foo-bar"
propagate_at_launch = true
}
} }
` `
@ -253,6 +313,12 @@ resource "aws_autoscaling_group" "bar" {
force_delete = true force_delete = true
launch_configuration = "${aws_launch_configuration.new.name}" launch_configuration = "${aws_launch_configuration.new.name}"
tag {
key = "Bar"
value = "bar-foo"
propagate_at_launch = true
}
} }
` `

View File

@ -154,6 +154,8 @@ func resourceAwsElb() *schema.Resource {
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
}, },
"tags": tagsSchema(),
}, },
} }
} }
@ -167,11 +169,12 @@ func resourceAwsElbCreate(d *schema.ResourceData, meta interface{}) error {
return err return err
} }
tags := tagsFromMapELB(d.Get("tags").(map[string]interface{}))
// Provision the elb // Provision the elb
elbOpts := &elb.CreateAccessPointInput{ elbOpts := &elb.CreateAccessPointInput{
LoadBalancerName: aws.String(d.Get("name").(string)), LoadBalancerName: aws.String(d.Get("name").(string)),
Listeners: listeners, Listeners: listeners,
Tags: tags,
} }
if scheme, ok := d.GetOk("internal"); ok && scheme.(bool) { if scheme, ok := d.GetOk("internal"); ok && scheme.(bool) {
@ -208,6 +211,8 @@ func resourceAwsElbCreate(d *schema.ResourceData, meta interface{}) error {
d.SetPartial("security_groups") d.SetPartial("security_groups")
d.SetPartial("subnets") d.SetPartial("subnets")
d.Set("tags", tagsToMapELB(tags))
if d.HasChange("health_check") { if d.HasChange("health_check") {
vs := d.Get("health_check").(*schema.Set).List() vs := d.Get("health_check").(*schema.Set).List()
if len(vs) > 0 { if len(vs) > 0 {
@ -267,6 +272,15 @@ func resourceAwsElbRead(d *schema.ResourceData, meta interface{}) error {
d.Set("security_groups", lb.SecurityGroups) d.Set("security_groups", lb.SecurityGroups)
d.Set("subnets", lb.Subnets) d.Set("subnets", lb.Subnets)
resp, err := elbconn.DescribeTags(&elb.DescribeTagsInput{
LoadBalancerNames: []string{*lb.LoadBalancerName},
})
var et []elb.Tag
if len(resp.TagDescriptions) > 0 {
et = resp.TagDescriptions[0].Tags
}
d.Set("tags", tagsToMapELB(et))
// There's only one health check, so save that to state as we // There's only one health check, so save that to state as we
// currently can // currently can
if *lb.HealthCheck.Target != "" { if *lb.HealthCheck.Target != "" {
@ -357,6 +371,11 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error {
} }
} }
if err := setTagsELB(elbconn, d); err != nil {
return err
} else {
d.SetPartial("tags")
}
d.Partial(false) d.Partial(false)
return resourceAwsElbRead(d, meta) return resourceAwsElbRead(d, meta)

View File

@ -53,6 +53,61 @@ func TestAccAWSELB_basic(t *testing.T) {
}) })
} }
func TestAccAWSELB_tags(t *testing.T) {
var conf elb.LoadBalancerDescription
var td elb.TagDescription
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSELBDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSELBConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSELBExists("aws_elb.bar", &conf),
testAccCheckAWSELBAttributes(&conf),
resource.TestCheckResourceAttr(
"aws_elb.bar", "name", "foobar-terraform-test"),
testAccLoadTags(&conf, &td),
testAccCheckELBTags(&td.Tags, "bar", "baz"),
),
},
resource.TestStep{
Config: testAccAWSELBConfig_TagUpdate,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSELBExists("aws_elb.bar", &conf),
testAccCheckAWSELBAttributes(&conf),
resource.TestCheckResourceAttr(
"aws_elb.bar", "name", "foobar-terraform-test"),
testAccLoadTags(&conf, &td),
testAccCheckELBTags(&td.Tags, "foo", "bar"),
testAccCheckELBTags(&td.Tags, "new", "type"),
),
},
},
})
}
func testAccLoadTags(conf *elb.LoadBalancerDescription, td *elb.TagDescription) resource.TestCheckFunc {
return func(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).elbconn
describe, err := conn.DescribeTags(&elb.DescribeTagsInput{
LoadBalancerNames: []string{*conf.LoadBalancerName},
})
if err != nil {
return err
}
if len(describe.TagDescriptions) > 0 {
*td = describe.TagDescriptions[0]
}
return nil
}
}
func TestAccAWSELB_InstanceAttaching(t *testing.T) { func TestAccAWSELB_InstanceAttaching(t *testing.T) {
var conf elb.LoadBalancerDescription var conf elb.LoadBalancerDescription
@ -288,6 +343,31 @@ resource "aws_elb" "bar" {
lb_protocol = "http" lb_protocol = "http"
} }
tags {
bar = "baz"
}
cross_zone_load_balancing = true
}
`
const testAccAWSELBConfig_TagUpdate = `
resource "aws_elb" "bar" {
name = "foobar-terraform-test"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener {
instance_port = 8000
instance_protocol = "http"
lb_port = 80
lb_protocol = "http"
}
tags {
foo = "bar"
new = "type"
}
cross_zone_load_balancing = true cross_zone_load_balancing = true
} }
` `

View File

@ -265,13 +265,6 @@ func resourceAwsInstance() *schema.Resource {
ForceNew: true, ForceNew: true,
}, },
"device_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: "/dev/sda1",
},
"iops": &schema.Schema{ "iops": &schema.Schema{
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
@ -298,7 +291,6 @@ func resourceAwsInstance() *schema.Resource {
var buf bytes.Buffer var buf bytes.Buffer
m := v.(map[string]interface{}) m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%t-", m["delete_on_termination"].(bool))) buf.WriteString(fmt.Sprintf("%t-", m["delete_on_termination"].(bool)))
buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string)))
// See the NOTE in "ebs_block_device" for why we skip iops here. // See the NOTE in "ebs_block_device" for why we skip iops here.
// buf.WriteString(fmt.Sprintf("%d-", m["iops"].(int))) // buf.WriteString(fmt.Sprintf("%d-", m["iops"].(int)))
buf.WriteString(fmt.Sprintf("%d-", m["volume_size"].(int))) buf.WriteString(fmt.Sprintf("%d-", m["volume_size"].(int)))
@ -478,10 +470,14 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error {
ebs.IOPS = aws.Integer(v) ebs.IOPS = aws.Integer(v)
} }
if dn, err := fetchRootDeviceName(d.Get("ami").(string), ec2conn); err == nil {
blockDevices = append(blockDevices, ec2.BlockDeviceMapping{ blockDevices = append(blockDevices, ec2.BlockDeviceMapping{
DeviceName: aws.String(bd["device_name"].(string)), DeviceName: dn,
EBS: ebs, EBS: ebs,
}) })
} else {
return err
}
} }
} }
@ -778,9 +774,6 @@ func readBlockDevicesFromInstance(instance *ec2.Instance, ec2conn *ec2.EC2) (map
if instanceBd.EBS != nil && instanceBd.EBS.DeleteOnTermination != nil { if instanceBd.EBS != nil && instanceBd.EBS.DeleteOnTermination != nil {
bd["delete_on_termination"] = *instanceBd.EBS.DeleteOnTermination bd["delete_on_termination"] = *instanceBd.EBS.DeleteOnTermination
} }
if instanceBd.DeviceName != nil {
bd["device_name"] = *instanceBd.DeviceName
}
if vol.Size != nil { if vol.Size != nil {
bd["volume_size"] = *vol.Size bd["volume_size"] = *vol.Size
} }
@ -794,6 +787,9 @@ func readBlockDevicesFromInstance(instance *ec2.Instance, ec2conn *ec2.EC2) (map
if blockDeviceIsRoot(instanceBd, instance) { if blockDeviceIsRoot(instanceBd, instance) {
blockDevices["root"] = bd blockDevices["root"] = bd
} else { } else {
if instanceBd.DeviceName != nil {
bd["device_name"] = *instanceBd.DeviceName
}
if vol.Encrypted != nil { if vol.Encrypted != nil {
bd["encrypted"] = *vol.Encrypted bd["encrypted"] = *vol.Encrypted
} }
@ -813,3 +809,21 @@ func blockDeviceIsRoot(bd ec2.InstanceBlockDeviceMapping, instance *ec2.Instance
instance.RootDeviceName != nil && instance.RootDeviceName != nil &&
*bd.DeviceName == *instance.RootDeviceName) *bd.DeviceName == *instance.RootDeviceName)
} }
func fetchRootDeviceName(ami string, conn *ec2.EC2) (aws.StringValue, error) {
if ami == "" {
return nil, fmt.Errorf("Cannot fetch root device name for blank AMI ID.")
}
log.Printf("[DEBUG] Describing AMI %q to get root block device name", ami)
req := &ec2.DescribeImagesRequest{ImageIDs: []string{ami}}
if res, err := conn.DescribeImages(req); err == nil {
if len(res.Images) == 1 {
return res.Images[0].RootDeviceName, nil
} else {
return nil, fmt.Errorf("Expected 1 AMI for ID: %s, got: %#v", ami, res.Images)
}
} else {
return nil, err
}
}

View File

@ -140,11 +140,9 @@ func TestAccAWSInstance_blockDevices(t *testing.T) {
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_instance.foo", "root_block_device.#", "1"), "aws_instance.foo", "root_block_device.#", "1"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_instance.foo", "root_block_device.1246122048.device_name", "/dev/sda1"), "aws_instance.foo", "root_block_device.1023169747.volume_size", "11"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_instance.foo", "root_block_device.1246122048.volume_size", "11"), "aws_instance.foo", "root_block_device.1023169747.volume_type", "gp2"),
resource.TestCheckResourceAttr(
"aws_instance.foo", "root_block_device.1246122048.volume_type", "gp2"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_instance.foo", "ebs_block_device.#", "2"), "aws_instance.foo", "ebs_block_device.#", "2"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
@ -467,7 +465,6 @@ resource "aws_instance" "foo" {
instance_type = "m1.small" instance_type = "m1.small"
root_block_device { root_block_device {
device_name = "/dev/sda1"
volume_type = "gp2" volume_type = "gp2"
volume_size = 11 volume_size = 11
} }

View File

@ -199,39 +199,14 @@ func resourceAwsInternetGatewayDetach(d *schema.ResourceData, meta interface{})
d.Id(), d.Id(),
vpcID.(string)) vpcID.(string))
wait := true
err := ec2conn.DetachInternetGateway(&ec2.DetachInternetGatewayRequest{
InternetGatewayID: aws.String(d.Id()),
VPCID: aws.String(vpcID.(string)),
})
if err != nil {
ec2err, ok := err.(aws.APIError)
if ok {
if ec2err.Code == "InvalidInternetGatewayID.NotFound" {
err = nil
wait = false
} else if ec2err.Code == "Gateway.NotAttached" {
err = nil
wait = false
}
}
if err != nil {
return err
}
}
if !wait {
return nil
}
// Wait for it to be fully detached before continuing // Wait for it to be fully detached before continuing
log.Printf("[DEBUG] Waiting for internet gateway (%s) to detach", d.Id()) log.Printf("[DEBUG] Waiting for internet gateway (%s) to detach", d.Id())
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"attached", "detaching", "available"}, Pending: []string{"detaching"},
Target: "detached", Target: "detached",
Refresh: IGAttachStateRefreshFunc(ec2conn, d.Id(), "detached"), Refresh: detachIGStateRefreshFunc(ec2conn, d.Id(), vpcID.(string)),
Timeout: 1 * time.Minute, Timeout: 2 * time.Minute,
Delay: 10 * time.Second,
} }
if _, err := stateConf.WaitForState(); err != nil { if _, err := stateConf.WaitForState(); err != nil {
return fmt.Errorf( return fmt.Errorf(
@ -242,6 +217,32 @@ func resourceAwsInternetGatewayDetach(d *schema.ResourceData, meta interface{})
return nil return nil
} }
// InstanceStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch
// an EC2 instance.
func detachIGStateRefreshFunc(conn *ec2.EC2, instanceID, vpcID string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
err := conn.DetachInternetGateway(&ec2.DetachInternetGatewayRequest{
InternetGatewayID: aws.String(instanceID),
VPCID: aws.String(vpcID),
})
if err != nil {
ec2err, ok := err.(aws.APIError)
if ok {
if ec2err.Code == "InvalidInternetGatewayID.NotFound" {
return nil, "Not Found", err
} else if ec2err.Code == "Gateway.NotAttached" {
return "detached", "detached", nil
} else if ec2err.Code == "DependencyViolation" {
return nil, "detaching", nil
}
}
}
// DetachInternetGateway only returns an error, so if it's nil, assume we're
// detached
return "detached", "detached", nil
}
}
// IGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch // IGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch
// an internet gateway. // an internet gateway.
func IGStateRefreshFunc(ec2conn *ec2.EC2, id string) resource.StateRefreshFunc { func IGStateRefreshFunc(ec2conn *ec2.EC2, id string) resource.StateRefreshFunc {
@ -300,10 +301,6 @@ func IGAttachStateRefreshFunc(ec2conn *ec2.EC2, id string, expected string) reso
ig := &resp.InternetGateways[0] ig := &resp.InternetGateways[0]
if time.Now().Sub(start) > 10*time.Second {
return ig, expected, nil
}
if len(ig.Attachments) == 0 { if len(ig.Attachments) == 0 {
// No attachments, we're detached // No attachments, we're detached
return ig, "detached", nil return ig, "detached", nil

View File

@ -67,17 +67,8 @@ func resourceAwsRoute53RecordCreate(d *schema.ResourceData, meta interface{}) er
return err return err
} }
// Check if the current record name contains the zone suffix.
// If it does not, add the zone name to form a fully qualified name
// and keep AWS happy.
recordName := d.Get("name").(string)
zoneName := strings.Trim(*zoneRecord.HostedZone.Name, ".")
if !strings.HasSuffix(recordName, zoneName) {
d.Set("name", strings.Join([]string{recordName, zoneName}, "."))
}
// Get the record // Get the record
rec, err := resourceAwsRoute53RecordBuildSet(d) rec, err := resourceAwsRoute53RecordBuildSet(d, *zoneRecord.HostedZone.Name)
if err != nil { if err != nil {
return err return err
} }
@ -101,7 +92,7 @@ func resourceAwsRoute53RecordCreate(d *schema.ResourceData, meta interface{}) er
} }
log.Printf("[DEBUG] Creating resource records for zone: %s, name: %s", log.Printf("[DEBUG] Creating resource records for zone: %s, name: %s",
zone, d.Get("name").(string)) zone, *rec.Name)
wait := resource.StateChangeConf{ wait := resource.StateChangeConf{
Pending: []string{"rejected"}, Pending: []string{"rejected"},
@ -111,11 +102,13 @@ func resourceAwsRoute53RecordCreate(d *schema.ResourceData, meta interface{}) er
Refresh: func() (interface{}, string, error) { Refresh: func() (interface{}, string, error) {
resp, err := conn.ChangeResourceRecordSets(req) resp, err := conn.ChangeResourceRecordSets(req)
if err != nil { if err != nil {
if strings.Contains(err.Error(), "PriorRequestNotComplete") { if r53err, ok := err.(aws.APIError); ok {
if r53err.Code == "PriorRequestNotComplete" {
// There is some pending operation, so just retry // There is some pending operation, so just retry
// in a bit. // in a bit.
return nil, "rejected", nil return nil, "rejected", nil
} }
}
return nil, "failure", err return nil, "failure", err
} }
@ -159,9 +152,17 @@ func resourceAwsRoute53RecordRead(d *schema.ResourceData, meta interface{}) erro
conn := meta.(*AWSClient).r53conn conn := meta.(*AWSClient).r53conn
zone := d.Get("zone_id").(string) zone := d.Get("zone_id").(string)
// get expanded name
zoneRecord, err := conn.GetHostedZone(&route53.GetHostedZoneRequest{ID: aws.String(zone)})
if err != nil {
return err
}
en := expandRecordName(d.Get("name").(string), *zoneRecord.HostedZone.Name)
lopts := &route53.ListResourceRecordSetsRequest{ lopts := &route53.ListResourceRecordSetsRequest{
HostedZoneID: aws.String(cleanZoneID(zone)), HostedZoneID: aws.String(cleanZoneID(zone)),
StartRecordName: aws.String(d.Get("name").(string)), StartRecordName: aws.String(en),
StartRecordType: aws.String(d.Get("type").(string)), StartRecordType: aws.String(d.Get("type").(string)),
} }
@ -202,9 +203,12 @@ func resourceAwsRoute53RecordDelete(d *schema.ResourceData, meta interface{}) er
zone := d.Get("zone_id").(string) zone := d.Get("zone_id").(string)
log.Printf("[DEBUG] Deleting resource records for zone: %s, name: %s", log.Printf("[DEBUG] Deleting resource records for zone: %s, name: %s",
zone, d.Get("name").(string)) zone, d.Get("name").(string))
zoneRecord, err := conn.GetHostedZone(&route53.GetHostedZoneRequest{ID: aws.String(zone)})
if err != nil {
return err
}
// Get the records // Get the records
rec, err := resourceAwsRoute53RecordBuildSet(d) rec, err := resourceAwsRoute53RecordBuildSet(d, *zoneRecord.HostedZone.Name)
if err != nil { if err != nil {
return err return err
} }
@ -260,7 +264,7 @@ func resourceAwsRoute53RecordDelete(d *schema.ResourceData, meta interface{}) er
return nil return nil
} }
func resourceAwsRoute53RecordBuildSet(d *schema.ResourceData) (*route53.ResourceRecordSet, error) { func resourceAwsRoute53RecordBuildSet(d *schema.ResourceData, zoneName string) (*route53.ResourceRecordSet, error) {
recs := d.Get("records").(*schema.Set).List() recs := d.Get("records").(*schema.Set).List()
records := make([]route53.ResourceRecord, 0, len(recs)) records := make([]route53.ResourceRecord, 0, len(recs))
@ -275,8 +279,15 @@ func resourceAwsRoute53RecordBuildSet(d *schema.ResourceData) (*route53.Resource
} }
} }
// get expanded name
en := expandRecordName(d.Get("name").(string), zoneName)
// Create the RecordSet request with the fully expanded name, e.g.
// sub.domain.com. Route 53 requires a fully qualified domain name, but does
// not require the trailing ".", which it will itself, so we don't call FQDN
// here.
rec := &route53.ResourceRecordSet{ rec := &route53.ResourceRecordSet{
Name: aws.String(d.Get("name").(string)), Name: aws.String(en),
Type: aws.String(d.Get("type").(string)), Type: aws.String(d.Get("type").(string)),
TTL: aws.Long(int64(d.Get("ttl").(int))), TTL: aws.Long(int64(d.Get("ttl").(int))),
ResourceRecords: records, ResourceRecords: records,
@ -304,3 +315,15 @@ func cleanRecordName(name string) string {
} }
return str return str
} }
// Check if the current record name contains the zone suffix.
// If it does not, add the zone name to form a fully qualified name
// and keep AWS happy.
func expandRecordName(name, zone string) string {
rn := strings.TrimSuffix(name, ".")
zone = strings.TrimSuffix(zone, ".")
if !strings.HasSuffix(rn, zone) {
rn = strings.Join([]string{name, zone}, ".")
}
return rn
}

View File

@ -29,6 +29,27 @@ func TestCleanRecordName(t *testing.T) {
} }
} }
func TestExpandRecordName(t *testing.T) {
cases := []struct {
Input, Output string
}{
{"www", "www.nonexample.com"},
{"dev.www", "dev.www.nonexample.com"},
{"*", "*.nonexample.com"},
{"nonexample.com", "nonexample.com"},
{"test.nonexample.com", "test.nonexample.com"},
{"test.nonexample.com.", "test.nonexample.com"},
}
zone_name := "nonexample.com"
for _, tc := range cases {
actual := expandRecordName(tc.Input, zone_name)
if actual != tc.Output {
t.Fatalf("input: %s\noutput: %s", tc.Input, actual)
}
}
}
func TestAccRoute53Record(t *testing.T) { func TestAccRoute53Record(t *testing.T) {
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
@ -151,9 +172,11 @@ func testAccCheckRoute53RecordExists(n string) resource.TestCheckFunc {
name := parts[1] name := parts[1]
rType := parts[2] rType := parts[2]
en := expandRecordName(name, "notexample.com")
lopts := &route53.ListResourceRecordSetsRequest{ lopts := &route53.ListResourceRecordSetsRequest{
HostedZoneID: aws.String(cleanZoneID(zone)), HostedZoneID: aws.String(cleanZoneID(zone)),
StartRecordName: aws.String(name), StartRecordName: aws.String(en),
StartRecordType: aws.String(rType), StartRecordType: aws.String(rType),
} }
@ -167,7 +190,7 @@ func testAccCheckRoute53RecordExists(n string) resource.TestCheckFunc {
// rec := resp.ResourceRecordSets[0] // rec := resp.ResourceRecordSets[0]
for _, rec := range resp.ResourceRecordSets { for _, rec := range resp.ResourceRecordSets {
recName := cleanRecordName(*rec.Name) recName := cleanRecordName(*rec.Name)
if FQDN(recName) == FQDN(name) && *rec.Type == rType { if FQDN(recName) == FQDN(en) && *rec.Type == rType {
return nil return nil
} }
} }

View File

@ -107,6 +107,7 @@ func resourceAwsRouteTableRead(d *schema.ResourceData, meta interface{}) error {
return err return err
} }
if rtRaw == nil { if rtRaw == nil {
d.SetId("")
return nil return nil
} }

View File

@ -14,6 +14,7 @@ func resourceAwsS3Bucket() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
Create: resourceAwsS3BucketCreate, Create: resourceAwsS3BucketCreate,
Read: resourceAwsS3BucketRead, Read: resourceAwsS3BucketRead,
Update: resourceAwsS3BucketUpdate,
Delete: resourceAwsS3BucketDelete, Delete: resourceAwsS3BucketDelete,
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{
@ -29,6 +30,8 @@ func resourceAwsS3Bucket() *schema.Resource {
Optional: true, Optional: true,
ForceNew: true, ForceNew: true,
}, },
"tags": tagsSchema(),
}, },
} }
} }
@ -64,7 +67,15 @@ func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error {
// Assign the bucket name as the resource ID // Assign the bucket name as the resource ID
d.SetId(bucket) d.SetId(bucket)
return nil return resourceAwsS3BucketUpdate(d, meta)
}
func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error {
s3conn := meta.(*AWSClient).s3conn
if err := setTagsS3(s3conn, d); err != nil {
return err
}
return resourceAwsS3BucketRead(d, meta)
} }
func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error {
@ -76,6 +87,18 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error {
if err != nil { if err != nil {
return err return err
} }
resp, err := s3conn.GetBucketTagging(&s3.GetBucketTaggingRequest{
Bucket: aws.String(d.Id()),
})
if err != nil {
return err
}
if err := d.Set("tags", tagsToMapS3(resp.TagSet)); err != nil {
return err
}
return nil return nil
} }

View File

@ -185,29 +185,32 @@ func resourceAwsVpcUpdate(d *schema.ResourceData, meta interface{}) error {
// Turn on partial mode // Turn on partial mode
d.Partial(true) d.Partial(true)
vpcid := d.Id() vpcid := d.Id()
modifyOpts := &ec2.ModifyVPCAttributeRequest{
VPCID: &vpcid,
}
if d.HasChange("enable_dns_hostnames") { if d.HasChange("enable_dns_hostnames") {
val := d.Get("enable_dns_hostnames").(bool) val := d.Get("enable_dns_hostnames").(bool)
modifyOpts.EnableDNSHostnames = &ec2.AttributeBooleanValue{ modifyOpts := &ec2.ModifyVPCAttributeRequest{
VPCID: &vpcid,
EnableDNSHostnames: &ec2.AttributeBooleanValue{
Value: &val, Value: &val,
},
} }
log.Printf( log.Printf(
"[INFO] Modifying enable_dns_hostnames vpc attribute for %s: %#v", "[INFO] Modifying enable_dns_support vpc attribute for %s: %#v",
d.Id(), modifyOpts) d.Id(), modifyOpts)
if err := ec2conn.ModifyVPCAttribute(modifyOpts); err != nil { if err := ec2conn.ModifyVPCAttribute(modifyOpts); err != nil {
return err return err
} }
d.SetPartial("enable_dns_hostnames") d.SetPartial("enable_dns_support")
} }
if d.HasChange("enable_dns_support") { if d.HasChange("enable_dns_support") {
val := d.Get("enable_dns_hostnames").(bool) val := d.Get("enable_dns_support").(bool)
modifyOpts.EnableDNSSupport = &ec2.AttributeBooleanValue{ modifyOpts := &ec2.ModifyVPCAttributeRequest{
VPCID: &vpcid,
EnableDNSSupport: &ec2.AttributeBooleanValue{
Value: &val, Value: &val,
},
} }
log.Printf( log.Printf(
@ -238,7 +241,7 @@ func resourceAwsVpcDelete(d *schema.ResourceData, meta interface{}) error {
} }
log.Printf("[INFO] Deleting VPC: %s", d.Id()) log.Printf("[INFO] Deleting VPC: %s", d.Id())
if err := ec2conn.DeleteVPC(DeleteVpcOpts); err != nil { if err := ec2conn.DeleteVPC(DeleteVpcOpts); err != nil {
ec2err, ok := err.(*aws.APIError) ec2err, ok := err.(aws.APIError)
if ok && ec2err.Code == "InvalidVpcID.NotFound" { if ok && ec2err.Code == "InvalidVpcID.NotFound" {
return nil return nil
} }
@ -258,7 +261,7 @@ func VPCStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc {
} }
resp, err := conn.DescribeVPCs(DescribeVpcOpts) resp, err := conn.DescribeVPCs(DescribeVpcOpts)
if err != nil { if err != nil {
if ec2err, ok := err.(*aws.APIError); ok && ec2err.Code == "InvalidVpcID.NotFound" { if ec2err, ok := err.(aws.APIError); ok && ec2err.Code == "InvalidVpcID.NotFound" {
resp = nil resp = nil
} else { } else {
log.Printf("Error on VPCStateRefresh: %s", err) log.Printf("Error on VPCStateRefresh: %s", err)

View File

@ -2,11 +2,12 @@ package aws
import ( import (
"fmt" "fmt"
"testing"
"github.com/hashicorp/aws-sdk-go/aws" "github.com/hashicorp/aws-sdk-go/aws"
"github.com/hashicorp/aws-sdk-go/gen/ec2" "github.com/hashicorp/aws-sdk-go/gen/ec2"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
"testing"
) )
func TestAccVpc_basic(t *testing.T) { func TestAccVpc_basic(t *testing.T) {
@ -132,7 +133,7 @@ func testAccCheckVpcDestroy(s *terraform.State) error {
} }
// Verify the error is what we want // Verify the error is what we want
ec2err, ok := err.(*aws.APIError) ec2err, ok := err.(aws.APIError)
if !ok { if !ok {
return err return err
} }
@ -184,6 +185,26 @@ func testAccCheckVpcExists(n string, vpc *ec2.VPC) resource.TestCheckFunc {
} }
} }
// https://github.com/hashicorp/terraform/issues/1301
func TestAccVpc_bothDnsOptionsSet(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckVpcDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccVpcConfig_BothDnsOptions,
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(
"aws_vpc.bar", "enable_dns_hostnames", "true"),
resource.TestCheckResourceAttr(
"aws_vpc.bar", "enable_dns_support", "true"),
),
},
},
})
}
const testAccVpcConfig = ` const testAccVpcConfig = `
resource "aws_vpc" "foo" { resource "aws_vpc" "foo" {
cidr_block = "10.1.0.0/16" cidr_block = "10.1.0.0/16"
@ -223,3 +244,12 @@ resource "aws_vpc" "bar" {
cidr_block = "10.2.0.0/16" cidr_block = "10.2.0.0/16"
} }
` `
const testAccVpcConfig_BothDnsOptions = `
resource "aws_vpc" "bar" {
cidr_block = "10.2.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
}
`

View File

@ -0,0 +1,112 @@
package aws
import (
"crypto/md5"
"encoding/base64"
"encoding/xml"
"log"
"github.com/hashicorp/aws-sdk-go/aws"
"github.com/hashicorp/aws-sdk-go/gen/s3"
"github.com/hashicorp/terraform/helper/schema"
)
// setTags is a helper to set the tags for a resource. It expects the
// tags field to be named "tags"
func setTagsS3(conn *s3.S3, d *schema.ResourceData) error {
if d.HasChange("tags") {
oraw, nraw := d.GetChange("tags")
o := oraw.(map[string]interface{})
n := nraw.(map[string]interface{})
create, remove := diffTagsS3(tagsFromMapS3(o), tagsFromMapS3(n))
// Set tags
if len(remove) > 0 {
log.Printf("[DEBUG] Removing tags: %#v", remove)
err := conn.DeleteBucketTagging(&s3.DeleteBucketTaggingRequest{
Bucket: aws.String(d.Get("bucket").(string)),
})
if err != nil {
return err
}
}
if len(create) > 0 {
log.Printf("[DEBUG] Creating tags: %#v", create)
tagging := s3.Tagging{
TagSet: create,
XMLName: xml.Name{
Space: "http://s3.amazonaws.com/doc/2006-03-01/",
Local: "Tagging",
},
}
// AWS S3 API requires us to send a base64 encoded md5 hash of the
// content, which we need to build ourselves since aws-sdk-go does not.
b, err := xml.Marshal(tagging)
if err != nil {
return err
}
h := md5.New()
h.Write(b)
base := base64.StdEncoding.EncodeToString(h.Sum(nil))
req := &s3.PutBucketTaggingRequest{
Bucket: aws.String(d.Get("bucket").(string)),
ContentMD5: aws.String(base),
Tagging: &tagging,
}
err = conn.PutBucketTagging(req)
if err != nil {
return err
}
}
}
return nil
}
// diffTags takes our tags locally and the ones remotely and returns
// the set of tags that must be created, and the set of tags that must
// be destroyed.
func diffTagsS3(oldTags, newTags []s3.Tag) ([]s3.Tag, []s3.Tag) {
// First, we're creating everything we have
create := make(map[string]interface{})
for _, t := range newTags {
create[*t.Key] = *t.Value
}
// Build the list of what to remove
var remove []s3.Tag
for _, t := range oldTags {
old, ok := create[*t.Key]
if !ok || old != *t.Value {
// Delete it!
remove = append(remove, t)
}
}
return tagsFromMapS3(create), remove
}
// tagsFromMap returns the tags for the given map of data.
func tagsFromMapS3(m map[string]interface{}) []s3.Tag {
result := make([]s3.Tag, 0, len(m))
for k, v := range m {
result = append(result, s3.Tag{
Key: aws.String(k),
Value: aws.String(v.(string)),
})
}
return result
}
// tagsToMap turns the list of tags into a map.
func tagsToMapS3(ts []s3.Tag) map[string]string {
result := make(map[string]string)
for _, t := range ts {
result[*t.Key] = *t.Value
}
return result
}

View File

@ -0,0 +1,85 @@
package aws
import (
"fmt"
"reflect"
"testing"
"github.com/hashicorp/aws-sdk-go/gen/s3"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestDiffTagsS3(t *testing.T) {
cases := []struct {
Old, New map[string]interface{}
Create, Remove map[string]string
}{
// Basic add/remove
{
Old: map[string]interface{}{
"foo": "bar",
},
New: map[string]interface{}{
"bar": "baz",
},
Create: map[string]string{
"bar": "baz",
},
Remove: map[string]string{
"foo": "bar",
},
},
// Modify
{
Old: map[string]interface{}{
"foo": "bar",
},
New: map[string]interface{}{
"foo": "baz",
},
Create: map[string]string{
"foo": "baz",
},
Remove: map[string]string{
"foo": "bar",
},
},
}
for i, tc := range cases {
c, r := diffTagsS3(tagsFromMapS3(tc.Old), tagsFromMapS3(tc.New))
cm := tagsToMapS3(c)
rm := tagsToMapS3(r)
if !reflect.DeepEqual(cm, tc.Create) {
t.Fatalf("%d: bad create: %#v", i, cm)
}
if !reflect.DeepEqual(rm, tc.Remove) {
t.Fatalf("%d: bad remove: %#v", i, rm)
}
}
}
// testAccCheckTags can be used to check the tags on a resource.
func testAccCheckTagsS3(
ts *[]s3.Tag, key string, value string) resource.TestCheckFunc {
return func(s *terraform.State) error {
m := tagsToMapS3(*ts)
v, ok := m[key]
if value != "" && !ok {
return fmt.Errorf("Missing tag: %s", key)
} else if value == "" && ok {
return fmt.Errorf("Extra tag: %s", key)
}
if value == "" {
return nil
}
if v != value {
return fmt.Errorf("%s: bad value: %s", key, v)
}
return nil
}
}

View File

@ -0,0 +1,94 @@
package aws
import (
"log"
"github.com/hashicorp/aws-sdk-go/aws"
"github.com/hashicorp/aws-sdk-go/gen/elb"
"github.com/hashicorp/terraform/helper/schema"
)
// setTags is a helper to set the tags for a resource. It expects the
// tags field to be named "tags"
func setTagsELB(conn *elb.ELB, d *schema.ResourceData) error {
if d.HasChange("tags") {
oraw, nraw := d.GetChange("tags")
o := oraw.(map[string]interface{})
n := nraw.(map[string]interface{})
create, remove := diffTagsELB(tagsFromMapELB(o), tagsFromMapELB(n))
// Set tags
if len(remove) > 0 {
log.Printf("[DEBUG] Removing tags: %#v", remove)
k := make([]elb.TagKeyOnly, 0, len(remove))
for _, t := range remove {
k = append(k, elb.TagKeyOnly{Key: t.Key})
}
_, err := conn.RemoveTags(&elb.RemoveTagsInput{
LoadBalancerNames: []string{d.Get("name").(string)},
Tags: k,
})
if err != nil {
return err
}
}
if len(create) > 0 {
log.Printf("[DEBUG] Creating tags: %#v", create)
_, err := conn.AddTags(&elb.AddTagsInput{
LoadBalancerNames: []string{d.Get("name").(string)},
Tags: create,
})
if err != nil {
return err
}
}
}
return nil
}
// diffTags takes our tags locally and the ones remotely and returns
// the set of tags that must be created, and the set of tags that must
// be destroyed.
func diffTagsELB(oldTags, newTags []elb.Tag) ([]elb.Tag, []elb.Tag) {
// First, we're creating everything we have
create := make(map[string]interface{})
for _, t := range newTags {
create[*t.Key] = *t.Value
}
// Build the list of what to remove
var remove []elb.Tag
for _, t := range oldTags {
old, ok := create[*t.Key]
if !ok || old != *t.Value {
// Delete it!
remove = append(remove, t)
}
}
return tagsFromMapELB(create), remove
}
// tagsFromMap returns the tags for the given map of data.
func tagsFromMapELB(m map[string]interface{}) []elb.Tag {
result := make([]elb.Tag, 0, len(m))
for k, v := range m {
result = append(result, elb.Tag{
Key: aws.String(k),
Value: aws.String(v.(string)),
})
}
return result
}
// tagsToMap turns the list of tags into a map.
func tagsToMapELB(ts []elb.Tag) map[string]string {
result := make(map[string]string)
for _, t := range ts {
result[*t.Key] = *t.Value
}
return result
}

View File

@ -0,0 +1,85 @@
package aws
import (
"fmt"
"reflect"
"testing"
"github.com/hashicorp/aws-sdk-go/gen/elb"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestDiffELBTags(t *testing.T) {
cases := []struct {
Old, New map[string]interface{}
Create, Remove map[string]string
}{
// Basic add/remove
{
Old: map[string]interface{}{
"foo": "bar",
},
New: map[string]interface{}{
"bar": "baz",
},
Create: map[string]string{
"bar": "baz",
},
Remove: map[string]string{
"foo": "bar",
},
},
// Modify
{
Old: map[string]interface{}{
"foo": "bar",
},
New: map[string]interface{}{
"foo": "baz",
},
Create: map[string]string{
"foo": "baz",
},
Remove: map[string]string{
"foo": "bar",
},
},
}
for i, tc := range cases {
c, r := diffTagsELB(tagsFromMapELB(tc.Old), tagsFromMapELB(tc.New))
cm := tagsToMapELB(c)
rm := tagsToMapELB(r)
if !reflect.DeepEqual(cm, tc.Create) {
t.Fatalf("%d: bad create: %#v", i, cm)
}
if !reflect.DeepEqual(rm, tc.Remove) {
t.Fatalf("%d: bad remove: %#v", i, rm)
}
}
}
// testAccCheckTags can be used to check the tags on a resource.
func testAccCheckELBTags(
ts *[]elb.Tag, key string, value string) resource.TestCheckFunc {
return func(s *terraform.State) error {
m := tagsToMapELB(*ts)
v, ok := m[key]
if value != "" && !ok {
return fmt.Errorf("Missing tag: %s", key)
} else if value == "" && ok {
return fmt.Errorf("Extra tag: %s", key)
}
if value == "" {
return nil
}
if v != value {
return fmt.Errorf("%s: bad value: %s", key, v)
}
return nil
}
}

View File

@ -84,7 +84,7 @@ func resourceCloudStackDiskCreate(d *schema.ResourceData, meta interface{}) erro
if d.Get("size").(int) != 0 { if d.Get("size").(int) != 0 {
// Set the volume size // Set the volume size
p.SetSize(d.Get("size").(int)) p.SetSize(int64(d.Get("size").(int)))
} }
// Retrieve the zone UUID // Retrieve the zone UUID
@ -141,7 +141,7 @@ func resourceCloudStackDiskRead(d *schema.ResourceData, meta interface{}) error
d.Set("name", v.Name) d.Set("name", v.Name)
d.Set("attach", v.Attached != "") // If attached this will contain a timestamp when attached d.Set("attach", v.Attached != "") // If attached this will contain a timestamp when attached
d.Set("disk_offering", v.Diskofferingname) d.Set("disk_offering", v.Diskofferingname)
d.Set("size", v.Size/(1024*1024*1024)) // Needed to get GB's again d.Set("size", int(v.Size/(1024*1024*1024))) // Needed to get GB's again
d.Set("zone", v.Zonename) d.Set("zone", v.Zonename)
if v.Attached != "" { if v.Attached != "" {
@ -196,7 +196,7 @@ func resourceCloudStackDiskUpdate(d *schema.ResourceData, meta interface{}) erro
if d.Get("size").(int) != 0 { if d.Get("size").(int) != 0 {
// Set the size // Set the size
p.SetSize(d.Get("size").(int)) p.SetSize(int64(d.Get("size").(int)))
} }
// Set the shrink bit // Set the shrink bit
@ -367,7 +367,7 @@ func isAttached(cs *cloudstack.CloudStackClient, id string) (bool, error) {
return v.Attached != "", nil return v.Attached != "", nil
} }
func retrieveDeviceID(device string) int { func retrieveDeviceID(device string) int64 {
switch device { switch device {
case "/dev/xvdb", "D:": case "/dev/xvdb", "D:":
return 1 return 1
@ -402,7 +402,7 @@ func retrieveDeviceID(device string) int {
} }
} }
func retrieveDeviceName(device int, os string) string { func retrieveDeviceName(device int64, os string) string {
switch device { switch device {
case 1: case 1:
if os == "Windows" { if os == "Windows" {

View File

@ -87,11 +87,11 @@ func resourceCloudStackVPNCustomerGatewayCreate(d *schema.ResourceData, meta int
} }
if esplifetime, ok := d.GetOk("esp_lifetime"); ok { if esplifetime, ok := d.GetOk("esp_lifetime"); ok {
p.SetEsplifetime(esplifetime.(int)) p.SetEsplifetime(int64(esplifetime.(int)))
} }
if ikelifetime, ok := d.GetOk("ike_lifetime"); ok { if ikelifetime, ok := d.GetOk("ike_lifetime"); ok {
p.SetIkelifetime(ikelifetime.(int)) p.SetIkelifetime(int64(ikelifetime.(int)))
} }
// Create the new VPN Customer Gateway // Create the new VPN Customer Gateway
@ -128,8 +128,8 @@ func resourceCloudStackVPNCustomerGatewayRead(d *schema.ResourceData, meta inter
d.Set("ike_policy", v.Ikepolicy) d.Set("ike_policy", v.Ikepolicy)
d.Set("ipsec_psk", v.Ipsecpsk) d.Set("ipsec_psk", v.Ipsecpsk)
d.Set("dpd", v.Dpd) d.Set("dpd", v.Dpd)
d.Set("esp_lifetime", v.Esplifetime) d.Set("esp_lifetime", int(v.Esplifetime))
d.Set("ike_lifetime", v.Ikelifetime) d.Set("ike_lifetime", int(v.Ikelifetime))
return nil return nil
} }
@ -154,11 +154,11 @@ func resourceCloudStackVPNCustomerGatewayUpdate(d *schema.ResourceData, meta int
} }
if esplifetime, ok := d.GetOk("esp_lifetime"); ok { if esplifetime, ok := d.GetOk("esp_lifetime"); ok {
p.SetEsplifetime(esplifetime.(int)) p.SetEsplifetime(int64(esplifetime.(int)))
} }
if ikelifetime, ok := d.GetOk("ike_lifetime"); ok { if ikelifetime, ok := d.GetOk("ike_lifetime"); ok {
p.SetIkelifetime(ikelifetime.(int)) p.SetIkelifetime(int64(ikelifetime.(int)))
} }
// Update the VPN Customer Gateway // Update the VPN Customer Gateway

View File

@ -91,8 +91,9 @@ func resourceDigitalOceanRecordCreate(d *schema.ResourceData, meta interface{})
func resourceDigitalOceanRecordRead(d *schema.ResourceData, meta interface{}) error { func resourceDigitalOceanRecordRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*digitalocean.Client) client := meta.(*digitalocean.Client)
domain := d.Get("domain").(string)
rec, err := client.RetrieveRecord(d.Get("domain").(string), d.Id()) rec, err := client.RetrieveRecord(domain, d.Id())
if err != nil { if err != nil {
// If the record is somehow already destroyed, mark as // If the record is somehow already destroyed, mark as
// succesfully gone // succesfully gone
@ -104,6 +105,18 @@ func resourceDigitalOceanRecordRead(d *schema.ResourceData, meta interface{}) er
return err return err
} }
// Update response data for records with domain value
if t := rec.Type; t == "CNAME" || t == "MX" || t == "NS" || t == "SRV" {
// Append dot to response if resource value is absolute
if value := d.Get("value").(string); strings.HasSuffix(value, ".") {
rec.Data += "."
// If resource value ends with current domain, make response data absolute
if strings.HasSuffix(value, domain+".") {
rec.Data += domain + "."
}
}
}
d.Set("name", rec.Name) d.Set("name", rec.Name)
d.Set("type", rec.Type) d.Set("type", rec.Type)
d.Set("value", rec.Data) d.Set("value", rec.Data)

View File

@ -76,6 +76,87 @@ func TestAccDigitalOceanRecord_Updated(t *testing.T) {
}) })
} }
func TestAccDigitalOceanRecord_HostnameValue(t *testing.T) {
var record digitalocean.Record
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDigitalOceanRecordDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckDigitalOceanRecordConfig_cname,
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanRecordExists("digitalocean_record.foobar", &record),
testAccCheckDigitalOceanRecordAttributesHostname("a", &record),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "name", "terraform"),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "domain", "foobar-test-terraform.com"),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "value", "a.foobar-test-terraform.com."),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "type", "CNAME"),
),
},
},
})
}
func TestAccDigitalOceanRecord_RelativeHostnameValue(t *testing.T) {
var record digitalocean.Record
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDigitalOceanRecordDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckDigitalOceanRecordConfig_relative_cname,
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanRecordExists("digitalocean_record.foobar", &record),
testAccCheckDigitalOceanRecordAttributesHostname("a.b", &record),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "name", "terraform"),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "domain", "foobar-test-terraform.com"),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "value", "a.b"),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "type", "CNAME"),
),
},
},
})
}
func TestAccDigitalOceanRecord_ExternalHostnameValue(t *testing.T) {
var record digitalocean.Record
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDigitalOceanRecordDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckDigitalOceanRecordConfig_external_cname,
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanRecordExists("digitalocean_record.foobar", &record),
testAccCheckDigitalOceanRecordAttributesHostname("a.foobar-test-terraform.net", &record),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "name", "terraform"),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "domain", "foobar-test-terraform.com"),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "value", "a.foobar-test-terraform.net."),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "type", "CNAME"),
),
},
},
})
}
func testAccCheckDigitalOceanRecordDestroy(s *terraform.State) error { func testAccCheckDigitalOceanRecordDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*digitalocean.Client) client := testAccProvider.Meta().(*digitalocean.Client)
@ -146,6 +227,17 @@ func testAccCheckDigitalOceanRecordExists(n string, record *digitalocean.Record)
} }
} }
func testAccCheckDigitalOceanRecordAttributesHostname(data string, record *digitalocean.Record) resource.TestCheckFunc {
return func(s *terraform.State) error {
if record.Data != data {
return fmt.Errorf("Bad value: expected %s, got %s", data, record.Data)
}
return nil
}
}
const testAccCheckDigitalOceanRecordConfig_basic = ` const testAccCheckDigitalOceanRecordConfig_basic = `
resource "digitalocean_domain" "foobar" { resource "digitalocean_domain" "foobar" {
name = "foobar-test-terraform.com" name = "foobar-test-terraform.com"
@ -173,3 +265,45 @@ resource "digitalocean_record" "foobar" {
value = "192.168.0.11" value = "192.168.0.11"
type = "A" type = "A"
}` }`
const testAccCheckDigitalOceanRecordConfig_cname = `
resource "digitalocean_domain" "foobar" {
name = "foobar-test-terraform.com"
ip_address = "192.168.0.10"
}
resource "digitalocean_record" "foobar" {
domain = "${digitalocean_domain.foobar.name}"
name = "terraform"
value = "a.foobar-test-terraform.com."
type = "CNAME"
}`
const testAccCheckDigitalOceanRecordConfig_relative_cname = `
resource "digitalocean_domain" "foobar" {
name = "foobar-test-terraform.com"
ip_address = "192.168.0.10"
}
resource "digitalocean_record" "foobar" {
domain = "${digitalocean_domain.foobar.name}"
name = "terraform"
value = "a.b"
type = "CNAME"
}`
const testAccCheckDigitalOceanRecordConfig_external_cname = `
resource "digitalocean_domain" "foobar" {
name = "foobar-test-terraform.com"
ip_address = "192.168.0.10"
}
resource "digitalocean_record" "foobar" {
domain = "${digitalocean_domain.foobar.name}"
name = "terraform"
value = "a.foobar-test-terraform.net."
type = "CNAME"
}`

View File

@ -7,11 +7,10 @@ import (
"net/http" "net/http"
"os" "os"
"code.google.com/p/google-api-go-client/compute/v1"
"golang.org/x/oauth2" "golang.org/x/oauth2"
"golang.org/x/oauth2/google" "golang.org/x/oauth2/google"
"golang.org/x/oauth2/jwt" "golang.org/x/oauth2/jwt"
"google.golang.org/api/compute/v1"
) )
// Config is the configuration structure used to instantiate the Google // Config is the configuration structure used to instantiate the Google

View File

@ -1,7 +1,7 @@
package google package google
import ( import (
"code.google.com/p/google-api-go-client/compute/v1" "google.golang.org/api/compute/v1"
) )
// readDiskType finds the disk type with the given name. // readDiskType finds the disk type with the given name.

View File

@ -4,7 +4,8 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"code.google.com/p/google-api-go-client/compute/v1" "google.golang.org/api/compute/v1"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
) )

View File

@ -5,9 +5,9 @@ import (
"log" "log"
"time" "time"
"code.google.com/p/google-api-go-client/compute/v1"
"code.google.com/p/google-api-go-client/googleapi"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
) )
func resourceComputeAddress() *schema.Resource { func resourceComputeAddress() *schema.Resource {

View File

@ -4,9 +4,9 @@ import (
"fmt" "fmt"
"testing" "testing"
"code.google.com/p/google-api-go-client/compute/v1"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
) )
func TestAccComputeAddress_basic(t *testing.T) { func TestAccComputeAddress_basic(t *testing.T) {

View File

@ -5,9 +5,9 @@ import (
"log" "log"
"time" "time"
"code.google.com/p/google-api-go-client/compute/v1"
"code.google.com/p/google-api-go-client/googleapi"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
) )
func resourceComputeDisk() *schema.Resource { func resourceComputeDisk() *schema.Resource {

View File

@ -4,9 +4,9 @@ import (
"fmt" "fmt"
"testing" "testing"
"code.google.com/p/google-api-go-client/compute/v1"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
) )
func TestAccComputeDisk_basic(t *testing.T) { func TestAccComputeDisk_basic(t *testing.T) {

View File

@ -6,10 +6,10 @@ import (
"sort" "sort"
"time" "time"
"code.google.com/p/google-api-go-client/compute/v1"
"code.google.com/p/google-api-go-client/googleapi"
"github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
) )
func resourceComputeFirewall() *schema.Resource { func resourceComputeFirewall() *schema.Resource {

View File

@ -4,9 +4,9 @@ import (
"fmt" "fmt"
"testing" "testing"
"code.google.com/p/google-api-go-client/compute/v1"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
) )
func TestAccComputeFirewall_basic(t *testing.T) { func TestAccComputeFirewall_basic(t *testing.T) {

View File

@ -5,9 +5,9 @@ import (
"log" "log"
"time" "time"
"code.google.com/p/google-api-go-client/compute/v1"
"code.google.com/p/google-api-go-client/googleapi"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
) )
func resourceComputeForwardingRule() *schema.Resource { func resourceComputeForwardingRule() *schema.Resource {

View File

@ -5,9 +5,9 @@ import (
"log" "log"
"time" "time"
"code.google.com/p/google-api-go-client/compute/v1"
"code.google.com/p/google-api-go-client/googleapi"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
) )
func resourceComputeHttpHealthCheck() *schema.Resource { func resourceComputeHttpHealthCheck() *schema.Resource {

View File

@ -5,10 +5,10 @@ import (
"log" "log"
"time" "time"
"code.google.com/p/google-api-go-client/compute/v1"
"code.google.com/p/google-api-go-client/googleapi"
"github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
) )
func resourceComputeInstance() *schema.Resource { func resourceComputeInstance() *schema.Resource {
@ -72,6 +72,13 @@ func resourceComputeInstance() *schema.Resource {
"auto_delete": &schema.Schema{ "auto_delete": &schema.Schema{
Type: schema.TypeBool, Type: schema.TypeBool,
Optional: true, Optional: true,
Default: true,
ForceNew: true,
},
"size": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ForceNew: true, ForceNew: true,
}, },
}, },
@ -283,11 +290,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
disk.Type = "PERSISTENT" disk.Type = "PERSISTENT"
disk.Mode = "READ_WRITE" disk.Mode = "READ_WRITE"
disk.Boot = i == 0 disk.Boot = i == 0
disk.AutoDelete = true disk.AutoDelete = d.Get(prefix + ".auto_delete").(bool)
if v, ok := d.GetOk(prefix + ".auto_delete"); ok {
disk.AutoDelete = v.(bool)
}
// Load up the disk for this disk if specified // Load up the disk for this disk if specified
if v, ok := d.GetOk(prefix + ".disk"); ok { if v, ok := d.GetOk(prefix + ".disk"); ok {
@ -331,6 +334,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
disk.InitializeParams.DiskType = diskType.SelfLink disk.InitializeParams.DiskType = diskType.SelfLink
} }
if v, ok := d.GetOk(prefix + ".size"); ok {
diskSizeGb := v.(int)
disk.InitializeParams.DiskSizeGb = int64(diskSizeGb)
}
disks = append(disks, &disk) disks = append(disks, &disk)
} }
@ -564,6 +572,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error
networkInterfaces = append(networkInterfaces, map[string]interface{}{ networkInterfaces = append(networkInterfaces, map[string]interface{}{
"name": iface.Name, "name": iface.Name,
"address": iface.NetworkIP, "address": iface.NetworkIP,
"network": iface.Network,
"access_config": accessConfigs, "access_config": accessConfigs,
}) })
} }

View File

@ -4,10 +4,10 @@ import (
"fmt" "fmt"
"time" "time"
"code.google.com/p/google-api-go-client/compute/v1"
"code.google.com/p/google-api-go-client/googleapi"
"github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
) )
func resourceComputeInstanceTemplate() *schema.Resource { func resourceComputeInstanceTemplate() *schema.Resource {
@ -58,6 +58,7 @@ func resourceComputeInstanceTemplate() *schema.Resource {
"auto_delete": &schema.Schema{ "auto_delete": &schema.Schema{
Type: schema.TypeBool, Type: schema.TypeBool,
Optional: true, Optional: true,
Default: true,
ForceNew: true, ForceNew: true,
}, },
@ -235,11 +236,7 @@ func buildDisks(d *schema.ResourceData, meta interface{}) []*compute.AttachedDis
disk.Mode = "READ_WRITE" disk.Mode = "READ_WRITE"
disk.Interface = "SCSI" disk.Interface = "SCSI"
disk.Boot = i == 0 disk.Boot = i == 0
disk.AutoDelete = true disk.AutoDelete = d.Get(prefix + ".auto_delete").(bool)
if v, ok := d.GetOk(prefix + ".auto_delete"); ok {
disk.AutoDelete = v.(bool)
}
if v, ok := d.GetOk(prefix + ".boot"); ok { if v, ok := d.GetOk(prefix + ".boot"); ok {
disk.Boot = v.(bool) disk.Boot = v.(bool)

View File

@ -4,9 +4,9 @@ import (
"fmt" "fmt"
"testing" "testing"
"code.google.com/p/google-api-go-client/compute/v1"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
) )
func TestAccComputeInstanceTemplate_basic(t *testing.T) { func TestAccComputeInstanceTemplate_basic(t *testing.T) {
@ -65,7 +65,7 @@ func TestAccComputeInstanceTemplate_disks(t *testing.T) {
testAccCheckComputeInstanceTemplateExists( testAccCheckComputeInstanceTemplateExists(
"google_compute_instance_template.foobar", &instanceTemplate), "google_compute_instance_template.foobar", &instanceTemplate),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "debian-7-wheezy-v20140814", true, true), testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "debian-7-wheezy-v20140814", true, true),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "foo_existing_disk", false, false), testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "terraform-test-foobar", false, false),
), ),
}, },
}, },
@ -252,6 +252,14 @@ resource "google_compute_instance_template" "foobar" {
}` }`
const testAccComputeInstanceTemplate_disks = ` const testAccComputeInstanceTemplate_disks = `
resource "google_compute_disk" "foobar" {
name = "terraform-test-foobar"
image = "debian-7-wheezy-v20140814"
size = 10
type = "pd-ssd"
zone = "us-central1-a"
}
resource "google_compute_instance_template" "foobar" { resource "google_compute_instance_template" "foobar" {
name = "terraform-test" name = "terraform-test"
machine_type = "n1-standard-1" machine_type = "n1-standard-1"
@ -263,7 +271,7 @@ resource "google_compute_instance_template" "foobar" {
} }
disk { disk {
source = "foo_existing_disk" source = "terraform-test-foobar"
auto_delete = false auto_delete = false
boot = false boot = false
} }

View File

@ -5,9 +5,9 @@ import (
"strings" "strings"
"testing" "testing"
"code.google.com/p/google-api-go-client/compute/v1"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
) )
func TestAccComputeInstance_basic_deprecated_network(t *testing.T) { func TestAccComputeInstance_basic_deprecated_network(t *testing.T) {

View File

@ -5,9 +5,9 @@ import (
"log" "log"
"time" "time"
"code.google.com/p/google-api-go-client/compute/v1"
"code.google.com/p/google-api-go-client/googleapi"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
) )
func resourceComputeNetwork() *schema.Resource { func resourceComputeNetwork() *schema.Resource {

View File

@ -4,9 +4,9 @@ import (
"fmt" "fmt"
"testing" "testing"
"code.google.com/p/google-api-go-client/compute/v1"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
) )
func TestAccComputeNetwork_basic(t *testing.T) { func TestAccComputeNetwork_basic(t *testing.T) {

View File

@ -5,10 +5,10 @@ import (
"log" "log"
"time" "time"
"code.google.com/p/google-api-go-client/compute/v1"
"code.google.com/p/google-api-go-client/googleapi"
"github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
) )
func resourceComputeRoute() *schema.Resource { func resourceComputeRoute() *schema.Resource {

View File

@ -4,9 +4,9 @@ import (
"fmt" "fmt"
"testing" "testing"
"code.google.com/p/google-api-go-client/compute/v1"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
) )
func TestAccComputeRoute_basic(t *testing.T) { func TestAccComputeRoute_basic(t *testing.T) {

View File

@ -6,9 +6,9 @@ import (
"strings" "strings"
"time" "time"
"code.google.com/p/google-api-go-client/compute/v1"
"code.google.com/p/google-api-go-client/googleapi"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
) )
func resourceComputeTargetPool() *schema.Resource { func resourceComputeTargetPool() *schema.Resource {

View File

@ -148,6 +148,27 @@ func testStateFileDefault(t *testing.T, s *terraform.State) string {
return DefaultStateFilename return DefaultStateFilename
} }
// testStateFileRemote writes the state out to the remote statefile
// in the cwd. Use `testCwd` to change into a temp cwd.
func testStateFileRemote(t *testing.T, s *terraform.State) string {
path := filepath.Join(DefaultDataDir, DefaultStateFilename)
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
t.Fatalf("err: %s", err)
}
f, err := os.Create(path)
if err != nil {
t.Fatalf("err: %s", err)
}
defer f.Close()
if err := terraform.WriteState(s, f); err != nil {
t.Fatalf("err: %s", err)
}
return path
}
// testStateOutput tests that the state at the given path contains // testStateOutput tests that the state at the given path contains
// the expected state string. // the expected state string.
func testStateOutput(t *testing.T, path string, expected string) { func testStateOutput(t *testing.T, path string, expected string) {

View File

@ -138,11 +138,7 @@ func (m *Meta) Context(copts contextOpts) (*terraform.Context, bool, error) {
return nil, false, fmt.Errorf("Error loading config: %s", err) return nil, false, fmt.Errorf("Error loading config: %s", err)
} }
dataDir := DefaultDataDirectory err = mod.Load(m.moduleStorage(m.DataDir()), copts.GetMode)
if m.dataDir != "" {
dataDir = m.dataDir
}
err = mod.Load(m.moduleStorage(dataDir), copts.GetMode)
if err != nil { if err != nil {
return nil, false, fmt.Errorf("Error downloading modules: %s", err) return nil, false, fmt.Errorf("Error downloading modules: %s", err)
} }
@ -153,6 +149,16 @@ func (m *Meta) Context(copts contextOpts) (*terraform.Context, bool, error) {
return ctx, false, nil return ctx, false, nil
} }
// DataDir returns the directory where local data will be stored.
func (m *Meta) DataDir() string {
dataDir := DefaultDataDirectory
if m.dataDir != "" {
dataDir = m.dataDir
}
return dataDir
}
// InputMode returns the type of input we should ask for in the form of // InputMode returns the type of input we should ask for in the form of
// terraform.InputMode which is passed directly to Context.Input. // terraform.InputMode which is passed directly to Context.Input.
func (m *Meta) InputMode() terraform.InputMode { func (m *Meta) InputMode() terraform.InputMode {
@ -164,6 +170,7 @@ func (m *Meta) InputMode() terraform.InputMode {
mode |= terraform.InputModeProvider mode |= terraform.InputModeProvider
if len(m.variables) == 0 && m.autoKey == "" { if len(m.variables) == 0 && m.autoKey == "" {
mode |= terraform.InputModeVar mode |= terraform.InputModeVar
mode |= terraform.InputModeVarUnset
} }
return mode return mode
@ -205,7 +212,7 @@ func (m *Meta) StateOpts() *StateOpts {
if localPath == "" { if localPath == "" {
localPath = DefaultStateFilename localPath = DefaultStateFilename
} }
remotePath := filepath.Join(DefaultDataDir, DefaultStateFilename) remotePath := filepath.Join(m.DataDir(), DefaultStateFilename)
return &StateOpts{ return &StateOpts{
LocalPath: localPath, LocalPath: localPath,

View File

@ -65,7 +65,7 @@ func TestMetaInputMode(t *testing.T) {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
if m.InputMode() != terraform.InputModeStd { if m.InputMode() != terraform.InputModeStd|terraform.InputModeVarUnset {
t.Fatalf("bad: %#v", m.InputMode()) t.Fatalf("bad: %#v", m.InputMode())
} }
} }

View File

@ -39,7 +39,7 @@ func (c *OutputCommand) Run(args []string) int {
} }
state := stateStore.State() state := stateStore.State()
if len(state.RootModule().Outputs) == 0 { if state.Empty() || len(state.RootModule().Outputs) == 0 {
c.Ui.Error(fmt.Sprintf( c.Ui.Error(fmt.Sprintf(
"The state file has no outputs defined. Define an output\n" + "The state file has no outputs defined. Define an output\n" +
"in your configuration with the `output` directive and re-run\n" + "in your configuration with the `output` directive and re-run\n" +

View File

@ -142,6 +142,27 @@ func TestOutput_noArgs(t *testing.T) {
} }
} }
func TestOutput_noState(t *testing.T) {
originalState := &terraform.State{}
statePath := testStateFile(t, originalState)
ui := new(cli.MockUi)
c := &OutputCommand{
Meta: Meta{
ContextOpts: testCtxConfig(testProvider()),
Ui: ui,
},
}
args := []string{
"-state", statePath,
"foo",
}
if code := c.Run(args); code != 1 {
t.Fatalf("bad: \n%s", ui.ErrorWriter.String())
}
}
func TestOutput_noVars(t *testing.T) { func TestOutput_noVars(t *testing.T) {
originalState := &terraform.State{ originalState := &terraform.State{
Modules: []*terraform.ModuleState{ Modules: []*terraform.ModuleState{

312
command/push.go Normal file
View File

@ -0,0 +1,312 @@
package command
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/hashicorp/atlas-go/archive"
"github.com/hashicorp/atlas-go/v1"
)
type PushCommand struct {
Meta
// client is the client to use for the actual push operations.
// If this isn't set, then the Atlas client is used. This should
// really only be set for testing reasons (and is hence not exported).
client pushClient
}
func (c *PushCommand) Run(args []string) int {
var atlasAddress, atlasToken string
var archiveVCS, moduleUpload bool
var name string
args = c.Meta.process(args, false)
cmdFlags := c.Meta.flagSet("push")
cmdFlags.StringVar(&atlasAddress, "atlas-address", "", "")
cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path")
cmdFlags.StringVar(&atlasToken, "token", "", "")
cmdFlags.BoolVar(&moduleUpload, "upload-modules", true, "")
cmdFlags.StringVar(&name, "name", "", "")
cmdFlags.BoolVar(&archiveVCS, "vcs", true, "")
cmdFlags.Usage = func() { c.Ui.Error(c.Help()) }
if err := cmdFlags.Parse(args); err != nil {
return 1
}
// The pwd is used for the configuration path if one is not given
pwd, err := os.Getwd()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error getting pwd: %s", err))
return 1
}
// Get the path to the configuration depending on the args.
var configPath string
args = cmdFlags.Args()
if len(args) > 1 {
c.Ui.Error("The apply command expects at most one argument.")
cmdFlags.Usage()
return 1
} else if len(args) == 1 {
configPath = args[0]
} else {
configPath = pwd
}
// Verify the state is remote, we can't push without a remote state
s, err := c.State()
if err != nil {
c.Ui.Error(fmt.Sprintf("Failed to read state: %s", err))
return 1
}
if !s.State().IsRemote() {
c.Ui.Error(
"Remote state is not enabled. For Atlas to run Terraform\n" +
"for you, remote state must be used and configured. Remote\n" +
"state via any backend is accepted, not just Atlas. To\n" +
"configure remote state, use the `terraform remote config`\n" +
"command.")
return 1
}
// Build the context based on the arguments given
ctx, planned, err := c.Context(contextOpts{
Path: configPath,
StatePath: c.Meta.statePath,
})
if err != nil {
c.Ui.Error(err.Error())
return 1
}
if planned {
c.Ui.Error(
"A plan file cannot be given as the path to the configuration.\n" +
"A path to a module (directory with configuration) must be given.")
return 1
}
// Get the configuration
config := ctx.Module().Config()
if name == "" {
if config.Atlas == nil || config.Atlas.Name == "" {
c.Ui.Error(
"The name of this Terraform configuration in Atlas must be\n" +
"specified within your configuration or the command-line. To\n" +
"set it on the command-line, use the `-name` parameter.")
return 1
}
name = config.Atlas.Name
}
// Initialize the client if it isn't given.
if c.client == nil {
// Make sure to nil out our client so our token isn't sitting around
defer func() { c.client = nil }()
// Initialize it to the default client, we set custom settings later
client := atlas.DefaultClient()
if atlasAddress != "" {
client, err = atlas.NewClient(atlasAddress)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing Atlas client: %s", err))
return 1
}
}
if atlasToken != "" {
client.Token = atlasToken
}
c.client = &atlasPushClient{Client: client}
}
// Get the variables we might already have
vars, err := c.client.Get(name)
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error looking up previously pushed configuration: %s", err))
return 1
}
for k, v := range vars {
ctx.SetVariable(k, v)
}
// Ask for input
if err := ctx.Input(c.InputMode()); err != nil {
c.Ui.Error(fmt.Sprintf(
"Error while asking for variable input:\n\n%s", err))
return 1
}
// Build the archiving options, which includes everything it can
// by default according to VCS rules but forcing the data directory.
archiveOpts := &archive.ArchiveOpts{
VCS: archiveVCS,
Extra: map[string]string{
DefaultDataDir: c.DataDir(),
},
}
if !moduleUpload {
// If we're not uploading modules, then exclude the modules dir.
archiveOpts.Exclude = append(
archiveOpts.Exclude,
filepath.Join(c.DataDir(), "modules"))
}
archiveR, err := archive.CreateArchive(configPath, archiveOpts)
if err != nil {
c.Ui.Error(fmt.Sprintf(
"An error has occurred while archiving the module for uploading:\n"+
"%s", err))
return 1
}
// Upsert!
opts := &pushUpsertOptions{
Name: name,
Archive: archiveR,
Variables: ctx.Variables(),
}
vsn, err := c.client.Upsert(opts)
if err != nil {
c.Ui.Error(fmt.Sprintf(
"An error occurred while uploading the module:\n\n%s", err))
return 1
}
c.Ui.Output(c.Colorize().Color(fmt.Sprintf(
"[reset][bold][green]Configuration %q uploaded! (v%d)",
name, vsn)))
return 0
}
func (c *PushCommand) Help() string {
helpText := `
Usage: terraform push [options] [DIR]
Upload this Terraform module to an Atlas server for remote
infrastructure management.
Options:
-atlas-address=<url> An alternate address to an Atlas instance. Defaults
to https://atlas.hashicorp.com
-upload-modules=true If true (default), then the modules are locked at
their current checkout and uploaded completely. This
prevents Atlas from running "terraform get".
-name=<name> Name of the configuration in Atlas. This can also
be set in the configuration itself. Format is
typically: "username/name".
-token=<token> Access token to use to upload. If blank or unspecified,
the ATLAS_TOKEN environmental variable will be used.
-vcs=true If true (default), push will upload only files
comitted to your VCS, if detected.
`
return strings.TrimSpace(helpText)
}
func (c *PushCommand) Synopsis() string {
return "Upload this Terraform module to Atlas to run"
}
// pushClient is implementd internally to control where pushes go. This is
// either to Atlas or a mock for testing.
type pushClient interface {
Get(string) (map[string]string, error)
Upsert(*pushUpsertOptions) (int, error)
}
type pushUpsertOptions struct {
Name string
Archive *archive.Archive
Variables map[string]string
}
type atlasPushClient struct {
Client *atlas.Client
}
func (c *atlasPushClient) Get(name string) (map[string]string, error) {
user, name, err := atlas.ParseSlug(name)
if err != nil {
return nil, err
}
version, err := c.Client.TerraformConfigLatest(user, name)
if err != nil {
return nil, err
}
var variables map[string]string
if version != nil {
variables = version.Variables
}
return variables, nil
}
func (c *atlasPushClient) Upsert(opts *pushUpsertOptions) (int, error) {
user, name, err := atlas.ParseSlug(opts.Name)
if err != nil {
return 0, err
}
data := &atlas.TerraformConfigVersion{
Variables: opts.Variables,
}
version, err := c.Client.CreateTerraformConfigVersion(
user, name, data, opts.Archive, opts.Archive.Size)
if err != nil {
return 0, err
}
return version, nil
}
type mockPushClient struct {
File string
GetCalled bool
GetName string
GetResult map[string]string
GetError error
UpsertCalled bool
UpsertOptions *pushUpsertOptions
UpsertVersion int
UpsertError error
}
func (c *mockPushClient) Get(name string) (map[string]string, error) {
c.GetCalled = true
c.GetName = name
return c.GetResult, c.GetError
}
func (c *mockPushClient) Upsert(opts *pushUpsertOptions) (int, error) {
f, err := os.Create(c.File)
if err != nil {
return 0, err
}
defer f.Close()
data := opts.Archive
size := opts.Archive.Size
if _, err := io.CopyN(f, data, size); err != nil {
return 0, err
}
c.UpsertCalled = true
c.UpsertOptions = opts
return c.UpsertVersion, c.UpsertError
}

337
command/push_test.go Normal file
View File

@ -0,0 +1,337 @@
package command
import (
"archive/tar"
"bytes"
"compress/gzip"
"io"
"os"
"reflect"
"sort"
"testing"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/cli"
)
func TestPush_good(t *testing.T) {
tmp, cwd := testCwd(t)
defer testFixCwd(t, tmp, cwd)
// Create remote state file, this should be pulled
conf, srv := testRemoteState(t, testState(), 200)
defer srv.Close()
// Persist local remote state
s := terraform.NewState()
s.Serial = 5
s.Remote = conf
testStateFileRemote(t, s)
// Path where the archive will be "uploaded" to
archivePath := testTempFile(t)
defer os.Remove(archivePath)
client := &mockPushClient{File: archivePath}
ui := new(cli.MockUi)
c := &PushCommand{
Meta: Meta{
ContextOpts: testCtxConfig(testProvider()),
Ui: ui,
},
client: client,
}
args := []string{
testFixturePath("push"),
}
if code := c.Run(args); code != 0 {
t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
}
actual := testArchiveStr(t, archivePath)
expected := []string{
".terraform/",
".terraform/terraform.tfstate",
"main.tf",
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
variables := make(map[string]string)
if !reflect.DeepEqual(client.UpsertOptions.Variables, variables) {
t.Fatalf("bad: %#v", client.UpsertOptions)
}
if client.UpsertOptions.Name != "foo" {
t.Fatalf("bad: %#v", client.UpsertOptions)
}
}
func TestPush_input(t *testing.T) {
tmp, cwd := testCwd(t)
defer testFixCwd(t, tmp, cwd)
// Create remote state file, this should be pulled
conf, srv := testRemoteState(t, testState(), 200)
defer srv.Close()
// Persist local remote state
s := terraform.NewState()
s.Serial = 5
s.Remote = conf
testStateFileRemote(t, s)
// Path where the archive will be "uploaded" to
archivePath := testTempFile(t)
defer os.Remove(archivePath)
client := &mockPushClient{File: archivePath}
ui := new(cli.MockUi)
c := &PushCommand{
Meta: Meta{
ContextOpts: testCtxConfig(testProvider()),
Ui: ui,
},
client: client,
}
// Disable test mode so input would be asked and setup the
// input reader/writers.
test = false
defer func() { test = true }()
defaultInputReader = bytes.NewBufferString("foo\n")
defaultInputWriter = new(bytes.Buffer)
args := []string{
testFixturePath("push-input"),
}
if code := c.Run(args); code != 0 {
t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
}
variables := map[string]string{
"foo": "foo",
}
if !reflect.DeepEqual(client.UpsertOptions.Variables, variables) {
t.Fatalf("bad: %#v", client.UpsertOptions)
}
}
func TestPush_inputPartial(t *testing.T) {
tmp, cwd := testCwd(t)
defer testFixCwd(t, tmp, cwd)
// Create remote state file, this should be pulled
conf, srv := testRemoteState(t, testState(), 200)
defer srv.Close()
// Persist local remote state
s := terraform.NewState()
s.Serial = 5
s.Remote = conf
testStateFileRemote(t, s)
// Path where the archive will be "uploaded" to
archivePath := testTempFile(t)
defer os.Remove(archivePath)
client := &mockPushClient{
File: archivePath,
GetResult: map[string]string{"foo": "bar"},
}
ui := new(cli.MockUi)
c := &PushCommand{
Meta: Meta{
ContextOpts: testCtxConfig(testProvider()),
Ui: ui,
},
client: client,
}
// Disable test mode so input would be asked and setup the
// input reader/writers.
test = false
defer func() { test = true }()
defaultInputReader = bytes.NewBufferString("foo\n")
defaultInputWriter = new(bytes.Buffer)
args := []string{
testFixturePath("push-input-partial"),
}
if code := c.Run(args); code != 0 {
t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
}
variables := map[string]string{
"foo": "bar",
"bar": "foo",
}
if !reflect.DeepEqual(client.UpsertOptions.Variables, variables) {
t.Fatalf("bad: %#v", client.UpsertOptions)
}
}
func TestPush_name(t *testing.T) {
tmp, cwd := testCwd(t)
defer testFixCwd(t, tmp, cwd)
// Create remote state file, this should be pulled
conf, srv := testRemoteState(t, testState(), 200)
defer srv.Close()
// Persist local remote state
s := terraform.NewState()
s.Serial = 5
s.Remote = conf
testStateFileRemote(t, s)
// Path where the archive will be "uploaded" to
archivePath := testTempFile(t)
defer os.Remove(archivePath)
client := &mockPushClient{File: archivePath}
ui := new(cli.MockUi)
c := &PushCommand{
Meta: Meta{
ContextOpts: testCtxConfig(testProvider()),
Ui: ui,
},
client: client,
}
args := []string{
"-name", "bar",
testFixturePath("push"),
}
if code := c.Run(args); code != 0 {
t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
}
if client.UpsertOptions.Name != "bar" {
t.Fatalf("bad: %#v", client.UpsertOptions)
}
}
func TestPush_noState(t *testing.T) {
tmp, cwd := testCwd(t)
defer testFixCwd(t, tmp, cwd)
ui := new(cli.MockUi)
c := &PushCommand{
Meta: Meta{
ContextOpts: testCtxConfig(testProvider()),
Ui: ui,
},
}
args := []string{}
if code := c.Run(args); code != 1 {
t.Fatalf("bad: \n%s", ui.ErrorWriter.String())
}
}
func TestPush_noRemoteState(t *testing.T) {
state := &terraform.State{
Modules: []*terraform.ModuleState{
&terraform.ModuleState{
Path: []string{"root"},
Resources: map[string]*terraform.ResourceState{
"test_instance.foo": &terraform.ResourceState{
Type: "test_instance",
Primary: &terraform.InstanceState{
ID: "bar",
},
},
},
},
},
}
statePath := testStateFile(t, state)
ui := new(cli.MockUi)
c := &PushCommand{
Meta: Meta{
Ui: ui,
},
}
args := []string{
"-state", statePath,
}
if code := c.Run(args); code != 1 {
t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
}
}
func TestPush_plan(t *testing.T) {
tmp, cwd := testCwd(t)
defer testFixCwd(t, tmp, cwd)
// Create remote state file, this should be pulled
conf, srv := testRemoteState(t, testState(), 200)
defer srv.Close()
// Persist local remote state
s := terraform.NewState()
s.Serial = 5
s.Remote = conf
testStateFileRemote(t, s)
// Create a plan
planPath := testPlanFile(t, &terraform.Plan{
Module: testModule(t, "apply"),
})
ui := new(cli.MockUi)
c := &PushCommand{
Meta: Meta{
ContextOpts: testCtxConfig(testProvider()),
Ui: ui,
},
}
args := []string{planPath}
if code := c.Run(args); code != 1 {
t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
}
}
func testArchiveStr(t *testing.T, path string) []string {
f, err := os.Open(path)
if err != nil {
t.Fatalf("err: %s", err)
}
defer f.Close()
// Ungzip
gzipR, err := gzip.NewReader(f)
if err != nil {
t.Fatalf("err: %s", err)
}
// Accumulator
result := make([]string, 0, 10)
// Untar
tarR := tar.NewReader(gzipR)
for {
header, err := tarR.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("err: %s", err)
}
result = append(result, header.Name)
}
sort.Strings(result)
return result
}

View File

@ -41,14 +41,12 @@ func (c *RemoteConfigCommand) Run(args []string) int {
cmdFlags.Var((*FlagKV)(&config), "backend-config", "config") cmdFlags.Var((*FlagKV)(&config), "backend-config", "config")
cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } cmdFlags.Usage = func() { c.Ui.Error(c.Help()) }
if err := cmdFlags.Parse(args); err != nil { if err := cmdFlags.Parse(args); err != nil {
c.Ui.Error(fmt.Sprintf("\nError parsing CLI flags: %s", err))
return 1 return 1
} }
// Show help if given no inputs // Lowercase the type
if !c.conf.disableRemote && c.remoteConf.Type == "atlas" && len(config) == 0 { c.remoteConf.Type = strings.ToLower(c.remoteConf.Type)
cmdFlags.Usage()
return 1
}
// Set the local state path // Set the local state path
c.statePath = c.conf.statePath c.statePath = c.conf.statePath
@ -88,29 +86,63 @@ func (c *RemoteConfigCommand) Run(args []string) int {
return c.disableRemoteState() return c.disableRemoteState()
} }
// Ensure there is no conflict // Ensure there is no conflict, and then do the correct operation
var result int
haveCache := !remoteState.Empty() haveCache := !remoteState.Empty()
haveLocal := !localState.Empty() haveLocal := !localState.Empty()
switch { switch {
case haveCache && haveLocal: case haveCache && haveLocal:
c.Ui.Error(fmt.Sprintf("Remote state is enabled, but non-managed state file '%s' is also present!", c.Ui.Error(fmt.Sprintf("Remote state is enabled, but non-managed state file '%s' is also present!",
c.conf.statePath)) c.conf.statePath))
return 1 result = 1
case !haveCache && !haveLocal: case !haveCache && !haveLocal:
// If we don't have either state file, initialize a blank state file // If we don't have either state file, initialize a blank state file
return c.initBlankState() result = c.initBlankState()
case haveCache && !haveLocal: case haveCache && !haveLocal:
// Update the remote state target potentially // Update the remote state target potentially
return c.updateRemoteConfig() result = c.updateRemoteConfig()
case !haveCache && haveLocal: case !haveCache && haveLocal:
// Enable remote state management // Enable remote state management
return c.enableRemoteState() result = c.enableRemoteState()
} }
panic("unhandled case") // If there was an error, return right away
if result != 0 {
return result
}
// If we're not pulling, then do nothing
if !c.conf.pullOnDisable {
return result
}
// Otherwise, refresh the state
stateResult, err := c.StateRaw(c.StateOpts())
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error while performing the initial pull. The error message is shown\n"+
"below. Note that remote state was properly configured, so you don't\n"+
"need to reconfigure. You can now use `push` and `pull` directly.\n"+
"\n%s", err))
return 1
}
state := stateResult.State
if err := state.RefreshState(); err != nil {
c.Ui.Error(fmt.Sprintf(
"Error while performing the initial pull. The error message is shown\n"+
"below. Note that remote state was properly configured, so you don't\n"+
"need to reconfigure. You can now use `push` and `pull` directly.\n"+
"\n%s", err))
return 1
}
c.Ui.Output(c.Colorize().Color(fmt.Sprintf(
"[reset][bold][green]Remote state configured and pulled.")))
return 0
} }
// disableRemoteState is used to disable remote state management, // disableRemoteState is used to disable remote state management,
@ -177,7 +209,12 @@ func (c *RemoteConfigCommand) validateRemoteConfig() error {
conf := c.remoteConf conf := c.remoteConf
_, err := remote.NewClient(conf.Type, conf.Config) _, err := remote.NewClient(conf.Type, conf.Config)
if err != nil { if err != nil {
c.Ui.Error(fmt.Sprintf("%s", err)) c.Ui.Error(fmt.Sprintf(
"%s\n\n"+
"If the error message above mentions requiring or modifying configuration\n"+
"options, these are set using the `-backend-config` flag. Example:\n"+
"-backend-config=\"name=foo\" to set the `name` configuration",
err))
} }
return err return err
} }
@ -323,9 +360,10 @@ Options:
-disable Disables remote state management and migrates the state -disable Disables remote state management and migrates the state
to the -state path. to the -state path.
-pull=true Controls if the remote state is pulled before disabling. -pull=true If disabling, this controls if the remote state is
This defaults to true to ensure the latest state is cached pulled before disabling. If enabling, this controls
before disabling. if the remote state is pulled after enabling. This
defaults to true.
-state=path Path to read state. Defaults to "terraform.tfstate" -state=path Path to read state. Defaults to "terraform.tfstate"
unless remote state is enabled. unless remote state is enabled.

View File

@ -245,6 +245,7 @@ func TestRemoteConfig_initBlank(t *testing.T) {
"-backend=http", "-backend=http",
"-backend-config", "address=http://example.com", "-backend-config", "address=http://example.com",
"-backend-config", "access_token=test", "-backend-config", "access_token=test",
"-pull=false",
} }
if code := c.Run(args); code != 0 { if code := c.Run(args); code != 0 {
t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) t.Fatalf("bad: \n%s", ui.ErrorWriter.String())
@ -321,6 +322,7 @@ func TestRemoteConfig_updateRemote(t *testing.T) {
"-backend=http", "-backend=http",
"-backend-config", "address=http://example.com", "-backend-config", "address=http://example.com",
"-backend-config", "access_token=test", "-backend-config", "access_token=test",
"-pull=false",
} }
if code := c.Run(args); code != 0 { if code := c.Run(args); code != 0 {
t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) t.Fatalf("bad: \n%s", ui.ErrorWriter.String())
@ -376,6 +378,7 @@ func TestRemoteConfig_enableRemote(t *testing.T) {
"-backend=http", "-backend=http",
"-backend-config", "address=http://example.com", "-backend-config", "address=http://example.com",
"-backend-config", "access_token=test", "-backend-config", "access_token=test",
"-pull=false",
} }
if code := c.Run(args); code != 0 { if code := c.Run(args); code != 0 {
t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) t.Fatalf("bad: \n%s", ui.ErrorWriter.String())

View File

@ -61,7 +61,8 @@ func (c *RemotePullCommand) Run(args []string) int {
c.Ui.Error(fmt.Sprintf("%s", change)) c.Ui.Error(fmt.Sprintf("%s", change))
return 1 return 1
} else { } else {
c.Ui.Output(fmt.Sprintf("%s", change)) c.Ui.Output(c.Colorize().Color(fmt.Sprintf(
"[reset][bold][green]%s", change)))
} }
return 0 return 0

View File

@ -80,15 +80,6 @@ func testRemoteState(t *testing.T, s *terraform.State, c int) (*terraform.Remote
var b64md5 string var b64md5 string
buf := bytes.NewBuffer(nil) buf := bytes.NewBuffer(nil)
if s != nil {
enc := json.NewEncoder(buf)
if err := enc.Encode(s); err != nil {
t.Fatalf("err: %v", err)
}
md5 := md5.Sum(buf.Bytes())
b64md5 = base64.StdEncoding.EncodeToString(md5[:16])
}
cb := func(resp http.ResponseWriter, req *http.Request) { cb := func(resp http.ResponseWriter, req *http.Request) {
if req.Method == "PUT" { if req.Method == "PUT" {
resp.WriteHeader(c) resp.WriteHeader(c)
@ -98,13 +89,28 @@ func testRemoteState(t *testing.T, s *terraform.State, c int) (*terraform.Remote
resp.WriteHeader(404) resp.WriteHeader(404)
return return
} }
resp.Header().Set("Content-MD5", b64md5) resp.Header().Set("Content-MD5", b64md5)
resp.Write(buf.Bytes()) resp.Write(buf.Bytes())
} }
srv := httptest.NewServer(http.HandlerFunc(cb)) srv := httptest.NewServer(http.HandlerFunc(cb))
remote := &terraform.RemoteState{ remote := &terraform.RemoteState{
Type: "http", Type: "http",
Config: map[string]string{"address": srv.URL}, Config: map[string]string{"address": srv.URL},
} }
if s != nil {
// Set the remote data
s.Remote = remote
enc := json.NewEncoder(buf)
if err := enc.Encode(s); err != nil {
t.Fatalf("err: %v", err)
}
md5 := md5.Sum(buf.Bytes())
b64md5 = base64.StdEncoding.EncodeToString(md5[:16])
}
return remote, srv return remote, srv
} }

View File

@ -68,6 +68,8 @@ func (c *RemotePushCommand) Run(args []string) int {
return 1 return 1
} }
c.Ui.Output(c.Colorize().Color(
"[reset][bold][green]State successfully pushed!"))
return 0 return 0
} }

View File

@ -231,10 +231,20 @@ func remoteState(
"Error reloading remote state: {{err}}", err) "Error reloading remote state: {{err}}", err)
} }
switch cache.RefreshResult() { switch cache.RefreshResult() {
// All the results below can be safely ignored since it means the
// pull was successful in some way. Noop = nothing happened.
// Init = both are empty. UpdateLocal = local state was older and
// updated.
//
// We don't have to do anything, the pull was successful.
case state.CacheRefreshNoop: case state.CacheRefreshNoop:
case state.CacheRefreshInit: case state.CacheRefreshInit:
case state.CacheRefreshLocalNewer:
case state.CacheRefreshUpdateLocal: case state.CacheRefreshUpdateLocal:
// Our local state has a higher serial number than remote, so we
// want to explicitly sync the remote side with our local so that
// the remote gets the latest serial number.
case state.CacheRefreshLocalNewer:
// Write our local state out to the durable storage to start. // Write our local state out to the durable storage to start.
if err := cache.WriteState(local); err != nil { if err := cache.WriteState(local); err != nil {
return nil, errwrap.Wrapf( return nil, errwrap.Wrapf(

View File

@ -0,0 +1,8 @@
variable "foo" {}
variable "bar" {}
resource "test_instance" "foo" {}
atlas {
name = "foo"
}

View File

@ -0,0 +1,7 @@
variable "foo" {}
resource "test_instance" "foo" {}
atlas {
name = "foo"
}

View File

@ -0,0 +1,5 @@
resource "aws_instance" "foo" {}
atlas {
name = "foo"
}

View File

@ -80,6 +80,12 @@ func init() {
}, nil }, nil
}, },
"push": func() (cli.Command, error) {
return &command.PushCommand{
Meta: meta,
}, nil
},
"refresh": func() (cli.Command, error) { "refresh": func() (cli.Command, error) {
return &command.RefreshCommand{ return &command.RefreshCommand{
Meta: meta, Meta: meta,

View File

@ -21,6 +21,7 @@ func Append(c1, c2 *Config) (*Config, error) {
c.unknownKeys = append(c.unknownKeys, k) c.unknownKeys = append(c.unknownKeys, k)
} }
} }
for _, k := range c2.unknownKeys { for _, k := range c2.unknownKeys {
_, present := unknowns[k] _, present := unknowns[k]
if !present { if !present {
@ -29,6 +30,11 @@ func Append(c1, c2 *Config) (*Config, error) {
} }
} }
c.Atlas = c1.Atlas
if c2.Atlas != nil {
c.Atlas = c2.Atlas
}
if len(c1.Modules) > 0 || len(c2.Modules) > 0 { if len(c1.Modules) > 0 || len(c2.Modules) > 0 {
c.Modules = make( c.Modules = make(
[]*Module, 0, len(c1.Modules)+len(c2.Modules)) []*Module, 0, len(c1.Modules)+len(c2.Modules))

View File

@ -12,6 +12,9 @@ func TestAppend(t *testing.T) {
}{ }{
{ {
&Config{ &Config{
Atlas: &AtlasConfig{
Name: "foo",
},
Modules: []*Module{ Modules: []*Module{
&Module{Name: "foo"}, &Module{Name: "foo"},
}, },
@ -32,6 +35,9 @@ func TestAppend(t *testing.T) {
}, },
&Config{ &Config{
Atlas: &AtlasConfig{
Name: "bar",
},
Modules: []*Module{ Modules: []*Module{
&Module{Name: "bar"}, &Module{Name: "bar"},
}, },
@ -52,6 +58,9 @@ func TestAppend(t *testing.T) {
}, },
&Config{ &Config{
Atlas: &AtlasConfig{
Name: "bar",
},
Modules: []*Module{ Modules: []*Module{
&Module{Name: "foo"}, &Module{Name: "foo"},
&Module{Name: "bar"}, &Module{Name: "bar"},

View File

@ -28,6 +28,7 @@ type Config struct {
// any meaningful directory. // any meaningful directory.
Dir string Dir string
Atlas *AtlasConfig
Modules []*Module Modules []*Module
ProviderConfigs []*ProviderConfig ProviderConfigs []*ProviderConfig
Resources []*Resource Resources []*Resource
@ -39,6 +40,13 @@ type Config struct {
unknownKeys []string unknownKeys []string
} }
// AtlasConfig is the configuration for building in HashiCorp's Atlas.
type AtlasConfig struct {
Name string
Include []string
Exclude []string
}
// Module is a module used within a configuration. // Module is a module used within a configuration.
// //
// This does not represent a module itself, this represents a module // This does not represent a module itself, this represents a module

View File

@ -17,6 +17,7 @@ type hclConfigurable struct {
func (t *hclConfigurable) Config() (*Config, error) { func (t *hclConfigurable) Config() (*Config, error) {
validKeys := map[string]struct{}{ validKeys := map[string]struct{}{
"atlas": struct{}{},
"module": struct{}{}, "module": struct{}{},
"output": struct{}{}, "output": struct{}{},
"provider": struct{}{}, "provider": struct{}{},
@ -70,6 +71,15 @@ func (t *hclConfigurable) Config() (*Config, error) {
} }
} }
// Get Atlas configuration
if atlas := t.Object.Get("atlas", false); atlas != nil {
var err error
config.Atlas, err = loadAtlasHcl(atlas)
if err != nil {
return nil, err
}
}
// Build the modules // Build the modules
if modules := t.Object.Get("module", false); modules != nil { if modules := t.Object.Get("module", false); modules != nil {
var err error var err error
@ -187,6 +197,19 @@ func loadFileHcl(root string) (configurable, []string, error) {
return result, nil, nil return result, nil, nil
} }
// Given a handle to a HCL object, this transforms it into the Atlas
// configuration.
func loadAtlasHcl(obj *hclobj.Object) (*AtlasConfig, error) {
var config AtlasConfig
if err := hcl.DecodeObject(&config, obj); err != nil {
return nil, fmt.Errorf(
"Error reading atlas config: %s",
err)
}
return &config, nil
}
// Given a handle to a HCL object, this recurses into the structure // Given a handle to a HCL object, this recurses into the structure
// and pulls out a list of modules. // and pulls out a list of modules.
// //

View File

@ -2,6 +2,7 @@ package config
import ( import (
"path/filepath" "path/filepath"
"reflect"
"strings" "strings"
"testing" "testing"
) )
@ -57,6 +58,11 @@ func TestLoadBasic(t *testing.T) {
t.Fatalf("bad: %#v", c.Dir) t.Fatalf("bad: %#v", c.Dir)
} }
expectedAtlas := &AtlasConfig{Name: "mitchellh/foo"}
if !reflect.DeepEqual(c.Atlas, expectedAtlas) {
t.Fatalf("bad: %#v", c.Atlas)
}
actual := variablesStr(c.Variables) actual := variablesStr(c.Variables)
if actual != strings.TrimSpace(basicVariablesStr) { if actual != strings.TrimSpace(basicVariablesStr) {
t.Fatalf("bad:\n%s", actual) t.Fatalf("bad:\n%s", actual)
@ -132,6 +138,11 @@ func TestLoadBasic_json(t *testing.T) {
t.Fatalf("bad: %#v", c.Dir) t.Fatalf("bad: %#v", c.Dir)
} }
expectedAtlas := &AtlasConfig{Name: "mitchellh/foo"}
if !reflect.DeepEqual(c.Atlas, expectedAtlas) {
t.Fatalf("bad: %#v", c.Atlas)
}
actual := variablesStr(c.Variables) actual := variablesStr(c.Variables)
if actual != strings.TrimSpace(basicVariablesStr) { if actual != strings.TrimSpace(basicVariablesStr) {
t.Fatalf("bad:\n%s", actual) t.Fatalf("bad:\n%s", actual)

View File

@ -25,6 +25,13 @@ func Merge(c1, c2 *Config) (*Config, error) {
} }
} }
// Merge Atlas configuration. This is a dumb one overrides the other
// sort of merge.
c.Atlas = c1.Atlas
if c2.Atlas != nil {
c.Atlas = c2.Atlas
}
// NOTE: Everything below is pretty gross. Due to the lack of generics // NOTE: Everything below is pretty gross. Due to the lack of generics
// in Go, there is some hoop-jumping involved to make this merging a // in Go, there is some hoop-jumping involved to make this merging a
// little more test-friendly and less repetitive. Ironically, making it // little more test-friendly and less repetitive. Ironically, making it

View File

@ -13,6 +13,9 @@ func TestMerge(t *testing.T) {
// Normal good case. // Normal good case.
{ {
&Config{ &Config{
Atlas: &AtlasConfig{
Name: "foo",
},
Modules: []*Module{ Modules: []*Module{
&Module{Name: "foo"}, &Module{Name: "foo"},
}, },
@ -33,6 +36,9 @@ func TestMerge(t *testing.T) {
}, },
&Config{ &Config{
Atlas: &AtlasConfig{
Name: "bar",
},
Modules: []*Module{ Modules: []*Module{
&Module{Name: "bar"}, &Module{Name: "bar"},
}, },
@ -53,6 +59,9 @@ func TestMerge(t *testing.T) {
}, },
&Config{ &Config{
Atlas: &AtlasConfig{
Name: "bar",
},
Modules: []*Module{ Modules: []*Module{
&Module{Name: "foo"}, &Module{Name: "foo"},
&Module{Name: "bar"}, &Module{Name: "bar"},

View File

@ -49,3 +49,7 @@ resource "aws_instance" "db" {
output "web_ip" { output "web_ip" {
value = "${aws_instance.web.private_ip}" value = "${aws_instance.web.private_ip}"
} }
atlas {
name = "mitchellh/foo"
}

View File

@ -63,5 +63,9 @@
"web_ip": { "web_ip": {
"value": "${aws_instance.web.private_ip}" "value": "${aws_instance.web.private_ip}"
} }
},
"atlas": {
"name": "mitchellh/foo"
} }
} }

View File

@ -16,9 +16,12 @@ import (
type InputMode byte type InputMode byte
const ( const (
// InputModeVar asks for variables // InputModeVar asks for all variables
InputModeVar InputMode = 1 << iota InputModeVar InputMode = 1 << iota
// InputModeVarUnset asks for variables which are not set yet
InputModeVarUnset
// InputModeProvider asks for provider variables // InputModeProvider asks for provider variables
InputModeProvider InputModeProvider
@ -154,6 +157,14 @@ func (c *Context) Input(mode InputMode) error {
} }
sort.Strings(names) sort.Strings(names)
for _, n := range names { for _, n := range names {
// If we only care about unset variables, then if the variabel
// is set, continue on.
if mode&InputModeVarUnset != 0 {
if _, ok := c.variables[n]; ok {
continue
}
}
v := m[n] v := m[n]
switch v.Type() { switch v.Type() {
case config.VariableTypeMap: case config.VariableTypeMap:
@ -365,6 +376,23 @@ func (c *Context) Validate() ([]string, []error) {
return walker.ValidationWarnings, rerrs.Errors return walker.ValidationWarnings, rerrs.Errors
} }
// Module returns the module tree associated with this context.
func (c *Context) Module() *module.Tree {
return c.module
}
// Variables will return the mapping of variables that were defined
// for this Context. If Input was called, this mapping may be different
// than what was given.
func (c *Context) Variables() map[string]string {
return c.variables
}
// SetVariable sets a variable after a context has already been built.
func (c *Context) SetVariable(k, v string) {
c.variables[k] = v
}
func (c *Context) acquireRun() chan<- struct{} { func (c *Context) acquireRun() chan<- struct{} {
c.l.Lock() c.l.Lock()
defer c.l.Unlock() defer c.l.Unlock()

View File

@ -2505,6 +2505,9 @@ func TestContext2Input_provider(t *testing.T) {
actual = c.Config["foo"] actual = c.Config["foo"]
return nil return nil
} }
p.ValidateFn = func(c *ResourceConfig) ([]string, []error) {
return nil, c.CheckSet([]string{"foo"})
}
if err := ctx.Input(InputModeStd); err != nil { if err := ctx.Input(InputModeStd); err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
@ -2758,6 +2761,48 @@ func TestContext2Input_varOnly(t *testing.T) {
} }
} }
func TestContext2Input_varOnlyUnset(t *testing.T) {
input := new(MockUIInput)
m := testModule(t, "input-vars-unset")
p := testProvider("aws")
p.ApplyFn = testApplyFn
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
Variables: map[string]string{
"foo": "foovalue",
},
UIInput: input,
})
input.InputReturnMap = map[string]string{
"var.foo": "nope",
"var.bar": "baz",
}
if err := ctx.Input(InputModeVar | InputModeVarUnset); err != nil {
t.Fatalf("err: %s", err)
}
if _, err := ctx.Plan(nil); err != nil {
t.Fatalf("err: %s", err)
}
state, err := ctx.Apply()
if err != nil {
t.Fatalf("err: %s", err)
}
actualStr := strings.TrimSpace(state.String())
expectedStr := strings.TrimSpace(testTerraformInputVarOnlyUnsetStr)
if actualStr != expectedStr {
t.Fatalf("bad: \n%s", actualStr)
}
}
func TestContext2Apply(t *testing.T) { func TestContext2Apply(t *testing.T) {
m := testModule(t, "apply-good") m := testModule(t, "apply-good")
p := testProvider("aws") p := testProvider("aws")

View File

@ -6,17 +6,18 @@ import (
"github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config"
) )
// EvalConfigProvider is an EvalNode implementation that configures // EvalBuildProviderConfig outputs a *ResourceConfig that is properly
// a provider that is already initialized and retrieved. // merged with parents and inputs on top of what is configured in the file.
type EvalConfigProvider struct { type EvalBuildProviderConfig struct {
Provider string Provider string
Config **ResourceConfig Config **ResourceConfig
Output **ResourceConfig
} }
func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) { func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) {
cfg := *n.Config cfg := *n.Config
// If we have a configuration set, then use that // If we have a configuration set, then merge that in
if input := ctx.ProviderInput(n.Provider); input != nil { if input := ctx.ProviderInput(n.Provider); input != nil {
rc, err := config.NewRawConfig(input) rc, err := config.NewRawConfig(input)
if err != nil { if err != nil {
@ -33,7 +34,19 @@ func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) {
cfg = NewResourceConfig(merged) cfg = NewResourceConfig(merged)
} }
return nil, ctx.ConfigureProvider(n.Provider, cfg) *n.Output = cfg
return nil, nil
}
// EvalConfigProvider is an EvalNode implementation that configures
// a provider that is already initialized and retrieved.
type EvalConfigProvider struct {
Provider string
Config **ResourceConfig
}
func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) {
return nil, ctx.ConfigureProvider(n.Provider, *n.Config)
} }
// EvalInitProvider is an EvalNode implementation that initializes a provider // EvalInitProvider is an EvalNode implementation that initializes a provider

View File

@ -5,6 +5,71 @@ import (
"testing" "testing"
) )
func TestEvalBuildProviderConfig_impl(t *testing.T) {
var _ EvalNode = new(EvalBuildProviderConfig)
}
func TestEvalBuildProviderConfig(t *testing.T) {
config := testResourceConfig(t, map[string]interface{}{})
provider := "foo"
n := &EvalBuildProviderConfig{
Provider: provider,
Config: &config,
Output: &config,
}
ctx := &MockEvalContext{
ParentProviderConfigConfig: testResourceConfig(t, map[string]interface{}{
"foo": "bar",
}),
ProviderInputConfig: map[string]interface{}{
"bar": "baz",
},
}
if _, err := n.Eval(ctx); err != nil {
t.Fatalf("err: %s", err)
}
expected := map[string]interface{}{
"foo": "bar",
"bar": "baz",
}
if !reflect.DeepEqual(config.Raw, expected) {
t.Fatalf("bad: %#v", config.Raw)
}
}
func TestEvalBuildProviderConfig_parentPriority(t *testing.T) {
config := testResourceConfig(t, map[string]interface{}{})
provider := "foo"
n := &EvalBuildProviderConfig{
Provider: provider,
Config: &config,
Output: &config,
}
ctx := &MockEvalContext{
ParentProviderConfigConfig: testResourceConfig(t, map[string]interface{}{
"foo": "bar",
}),
ProviderInputConfig: map[string]interface{}{
"foo": "baz",
},
}
if _, err := n.Eval(ctx); err != nil {
t.Fatalf("err: %s", err)
}
expected := map[string]interface{}{
"foo": "bar",
}
if !reflect.DeepEqual(config.Raw, expected) {
t.Fatalf("bad: %#v", config.Raw)
}
}
func TestEvalConfigProvider_impl(t *testing.T) { func TestEvalConfigProvider_impl(t *testing.T) {
var _ EvalNode = new(EvalConfigProvider) var _ EvalNode = new(EvalConfigProvider)
} }

View File

@ -57,7 +57,6 @@ RETURN:
// EvalValidateProvider is an EvalNode implementation that validates // EvalValidateProvider is an EvalNode implementation that validates
// the configuration of a resource. // the configuration of a resource.
type EvalValidateProvider struct { type EvalValidateProvider struct {
ProviderName string
Provider *ResourceProvider Provider *ResourceProvider
Config **ResourceConfig Config **ResourceConfig
} }
@ -66,12 +65,6 @@ func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) {
provider := *n.Provider provider := *n.Provider
config := *n.Config config := *n.Config
// Get the parent configuration if there is one
if parent := ctx.ParentProviderConfig(n.ProviderName); parent != nil {
merged := parent.raw.Merge(config.raw)
config = NewResourceConfig(merged)
}
warns, errs := provider.Validate(config) warns, errs := provider.Validate(config)
if len(warns) == 0 && len(errs) == 0 { if len(warns) == 0 && len(errs) == 0 {
return nil, nil return nil, nil

View File

@ -44,8 +44,12 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode {
Config: config, Config: config,
Output: &resourceConfig, Output: &resourceConfig,
}, },
&EvalBuildProviderConfig{
Provider: n,
Config: &resourceConfig,
Output: &resourceConfig,
},
&EvalValidateProvider{ &EvalValidateProvider{
ProviderName: n,
Provider: &provider, Provider: &provider,
Config: &resourceConfig, Config: &resourceConfig,
}, },

View File

@ -214,7 +214,20 @@ func (s *State) DeepCopy() *State {
// IncrementSerialMaybe increments the serial number of this state // IncrementSerialMaybe increments the serial number of this state
// if it different from the other state. // if it different from the other state.
func (s *State) IncrementSerialMaybe(other *State) { func (s *State) IncrementSerialMaybe(other *State) {
if s == nil {
return
}
if other == nil {
return
}
if s.Serial > other.Serial {
return
}
if !s.Equal(other) { if !s.Equal(other) {
if other.Serial > s.Serial {
s.Serial = other.Serial
}
s.Serial++ s.Serial++
} }
} }
@ -331,6 +344,10 @@ func (r *RemoteState) Equals(other *RemoteState) bool {
return true return true
} }
func (r *RemoteState) GoString() string {
return fmt.Sprintf("*%#v", *r)
}
// ModuleState is used to track all the state relevant to a single // ModuleState is used to track all the state relevant to a single
// module. Previous to Terraform 0.3, all state belonged to the "root" // module. Previous to Terraform 0.3, all state belonged to the "root"
// module. // module.

View File

@ -178,6 +178,50 @@ func TestStateEqual(t *testing.T) {
} }
} }
func TestStateIncrementSerialMaybe(t *testing.T) {
cases := map[string]struct {
S1, S2 *State
Serial int64
}{
"S2 is nil": {
&State{},
nil,
0,
},
"S2 is identical": {
&State{},
&State{},
0,
},
"S2 is different": {
&State{},
&State{
Modules: []*ModuleState{
&ModuleState{Path: rootModulePath},
},
},
1,
},
"S1 serial is higher": {
&State{Serial: 5},
&State{
Serial: 3,
Modules: []*ModuleState{
&ModuleState{Path: rootModulePath},
},
},
5,
},
}
for name, tc := range cases {
tc.S1.IncrementSerialMaybe(tc.S2)
if tc.S1.Serial != tc.Serial {
t.Fatalf("Bad: %s\nGot: %d", name, tc.S1.Serial)
}
}
}
func TestResourceStateEqual(t *testing.T) { func TestResourceStateEqual(t *testing.T) {
cases := []struct { cases := []struct {
Result bool Result bool

View File

@ -150,6 +150,14 @@ aws_instance.foo:
type = aws_instance type = aws_instance
` `
const testTerraformInputVarOnlyUnsetStr = `
aws_instance.foo:
ID = foo
bar = baz
foo = foovalue
type = aws_instance
`
const testTerraformInputVarsStr = ` const testTerraformInputVarsStr = `
aws_instance.bar: aws_instance.bar:
ID = foo ID = foo

View File

@ -0,0 +1,7 @@
variable "foo" {}
variable "bar" {}
resource "aws_instance" "foo" {
foo = "${var.foo}"
bar = "${var.bar}"
}

View File

@ -49,5 +49,6 @@ The command-line flags are all optional. The list of available flags are:
* `-var-file=foo` - Set variables in the Terraform configuration from * `-var-file=foo` - Set variables in the Terraform configuration from
a file. If "terraform.tfvars" is present, it will be automatically a file. If "terraform.tfvars" is present, it will be automatically
loaded if this flag is not specified. loaded first. Any files specified by `-var-file` override any values
in a "terraform.tfvars".

View File

@ -0,0 +1,97 @@
---
layout: "docs"
page_title: "Command: push"
sidebar_current: "docs-commands-push"
description: |-
The `terraform push` command is used to upload the Terraform configuration to HashiCorp's Atlas service for automatically managing your infrastructure in the cloud.
---
# Command: push
The `terraform push` command uploads your Terraform configuration to
be managed by HashiCorp's [Atlas](https://atlas.hashicorp.com).
By uploading your configuration to Atlas, Atlas can automatically run
Terraform for you, will save all state transitions, will save plans,
and will keep a history of all Terraform runs.
This makes it significantly easier to use Terraform as a team: team
members modify the Terraform configurations locally and continue to
use normal version control. When the Terraform configurations are ready
to be run, they are pushed to Atlas, and any member of your team can
run Terraform with the push of a button.
Atlas can also be used to set ACLs on who can run Terraform, and a
future update of Atlas will allow parallel Terraform runs and automatically
perform infrastructure locking so only one run is modifying the same
infrastructure at a time.
## Usage
Usage: `terraform push [options] [path]`
The `path` argument is the same as for the
[apply](/docs/commands/apply.html) command.
The command-line flags are all optional. The list of available flags are:
* `-atlas-address=<url>` - An alternate address to an Atlas instance.
Defaults to `https://atlas.hashicorp.com`.
* `-upload-modules=true` - If true (default), then the
[modules](/docs/modules/index.html)
being used are all locked at their current checkout and uploaded
completely to Atlas. This prevents Atlas from running `terraform get`
for you.
* `-name=<name>` - Name of the infrastructure configuration in Atlas.
The format of this is: "username/name" so that you can upload
configurations not just to your account but to other accounts and
organizations. This setting can also be set in the configuration
in the
[Atlas section](/docs/configuration/atlas.html).
* `-no-color` - Disables output with coloring
* `-token=<token>` - Atlas API token to use to authorize the upload.
If blank or unspecified, the `ATLAS_TOKEN` environmental variable
will be used.
* `-vcs=true` - If true (default), then Terraform will detect if a VCS
is in use, such as Git, and will only upload files that are comitted to
version control. If no version control system is detected, Terraform will
upload all files in `path` (parameter to the command).
## Packaged Files
The files that are uploaded and packaged with a `push` are all the
files in the `path` given as the parameter to the command, recursively.
By default (unless `-vcs=false` is specified), Terraform will automatically
detect when a VCS such as Git is being used, and in that case will only
upload the files that are comitted. Because of this built-in intelligence,
you don't have to worry about excluding folders such as ".git" or ".hg" usually.
If Terraform doesn't detect a VCS, it will upload all files.
The reason Terraform uploads all of these files is because Terraform
cannot know what is and isn't being used for provisioning, so it uploads
all the files to be safe. To exclude certain files, specify the `-exclude`
flag when pushing, or specify the `exclude` parameter in the
[Atlas configuration section](/docs/configuration/atlas.html).
## Remote State Requirement
`terraform push` requires that
[remote state](/docs/commands/remote-config.html)
is enabled. The reasoning for this is simple: `terraform push` sends your
configuration to be managed remotely. For it to keep the state in sync
and for you to be able to easily access that state, remote state must
be enabled instead of juggling local files.
While `terraform push` sends your configuration to be managed by Atlas,
the remote state backend _does not_ have to be Atlas. It can be anything
as long as it is accessible by the public internet, since Atlas will need
to be able to communicate to it.
**Warning:** The credentials for accessing the remote state will be
sent up to Atlas as well. Therefore, we recommend you use access keys
that are restricted if possible.

View File

@ -73,8 +73,9 @@ The command-line flags are all optional. The list of available flags are:
* `-path=path` - Path of the remote state in Consul. Required for the * `-path=path` - Path of the remote state in Consul. Required for the
Consul backend. Consul backend.
* `-pull=true` - Controls if the remote state is pulled before disabling. * `-pull=true` - Controls if the remote state is pulled before disabling
This defaults to true to ensure the latest state is cached before disabling. or after enabling. This defaults to true to ensure the latest state
is available under both conditions.
* `-state=path` - Path to read state. Defaults to "terraform.tfstate" * `-state=path` - Path to read state. Defaults to "terraform.tfstate"
unless remote state is enabled. unless remote state is enabled.

View File

@ -0,0 +1,58 @@
---
layout: "docs"
page_title: "Configuring Atlas"
sidebar_current: "docs-config-atlas"
description: |-
Atlas is the ideal way to use Terraform in a team environment. Atlas will run Terraform for you, safely handle parallelization across different team members, save run history along with plans, and more.
---
# Atlas Configuration
Terraform can be configured to be able to upload to HashiCorp's
[Atlas](https://atlas.hashicorp.com). This configuration doesn't change
the behavior of Terraform itself, it only configures your Terraform
configuration to support being uploaded to Atlas via the
[push command](/docs/commands/push.html).
For more information on the benefits of uploading your Terraform
configuration to Atlas, please see the
[push command documentation](/docs/commands/push.html).
This page assumes you're familiar with the
[configuration syntax](/docs/configuration/syntax.html)
already.
## Example
Atlas configuration looks like the following:
```
atlas {
name = "mitchellh/production-example"
}
```
## Description
The `atlas` block configures the settings when Terraform is
[pushed](/docs/commands/push.html) to Atlas. Only one `atlas` block
is allowed.
Within the block (the `{ }`) is configuration for Atlas uploading.
No keys are required, but the key typically set is `name`.
**No value within the `atlas` block can use interpolations.** Due
to the nature of this configuration, interpolations are not possible.
If you want to parameterize these settings, use the Atlas block to
set defaults, then use the command-line flags of the
[push command](/docs/commands/push.html) to override.
## Syntax
The full syntax is:
```
atlas {
name = VALUE
}
```

View File

@ -23,6 +23,17 @@ resource "aws_autoscaling_group" "bar" {
desired_capacity = 4 desired_capacity = 4
force_delete = true force_delete = true
launch_configuration = "${aws_launch_configuration.foobar.name}" launch_configuration = "${aws_launch_configuration.foobar.name}"
tag {
key = "foo"
value = "bar"
propagate_at_launch = true
}
tag {
key = "lorem"
value = "ipsum"
propagate_at_launch = false
}
} }
``` ```
@ -44,6 +55,14 @@ The following arguments are supported:
group names. group names.
* `vpc_zone_identifier` (Optional) A list of subnet IDs to launch resources in. * `vpc_zone_identifier` (Optional) A list of subnet IDs to launch resources in.
* `termination_policies` (Optional) A list of policies to decide how the instances in the auto scale group should be terminated. * `termination_policies` (Optional) A list of policies to decide how the instances in the auto scale group should be terminated.
* `tag` (Optional) A list of tag blocks. Tags documented below.
Tags support the following:
* `key` - (Required) Key
* `value` - (Required) Value
* `propagate_at_launch` - (Required) Enables propagation of the tag to
Amazon EC2 instances launched via this ASG
## Attributes Reference ## Attributes Reference

View File

@ -66,9 +66,6 @@ to understand the implications of using these attributes.
The `root_block_device` mapping supports the following: The `root_block_device` mapping supports the following:
* `device_name` - The name of the root device on the target instance. Must
match the root device as defined in the AMI. Defaults to `"/dev/sda1"`, which
is the typical root volume for Linux instances.
* `volume_type` - (Optional) The type of volume. Can be `"standard"`, `"gp2"`, * `volume_type` - (Optional) The type of volume. Can be `"standard"`, `"gp2"`,
or `"io1"`. (Default: `"standard"`). or `"io1"`. (Default: `"standard"`).
* `volume_size` - (Optional) The size of the volume in gigabytes. * `volume_size` - (Optional) The size of the volume in gigabytes.

View File

@ -40,7 +40,7 @@ The following keys can be used to configure the provider.
are running terraform from a GCE instance with a properly-configured [Compute are running terraform from a GCE instance with a properly-configured [Compute
Engine Service Account](https://cloud.google.com/compute/docs/authentication). Engine Service Account](https://cloud.google.com/compute/docs/authentication).
* `project` - (Required) The name of the project to apply any resources to. * `project` - (Required) The ID of the project to apply any resources to.
* `region` - (Required) The region to operate under. * `region` - (Required) The region to operate under.

View File

@ -93,6 +93,9 @@ The `disk` block supports:
* `type` - (Optional) The GCE disk type. * `type` - (Optional) The GCE disk type.
* `size` - (Optional) The size of the image in gigabytes. If not specified,
it will inherit the size of its base image.
The `network_interface` block supports: The `network_interface` block supports:
* `network` - (Required) The name of the network to attach this interface to. * `network` - (Required) The name of the network to attach this interface to.

View File

@ -40,11 +40,16 @@ usage: terraform [--version] [--help] <command> [<args>]
Available commands are: Available commands are:
apply Builds or changes infrastructure apply Builds or changes infrastructure
destroy Destroy Terraform-managed infrastructure
get Download and install modules for the configuration
graph Create a visual graph of Terraform resources graph Create a visual graph of Terraform resources
init Initializes Terraform configuration from a module
output Read an output from a state file output Read an output from a state file
plan Generate and show an execution plan plan Generate and show an execution plan
refresh Update local state file against real resources refresh Update local state file against real resources
remote Configure remote state storage
show Inspect Terraform state or plan show Inspect Terraform state or plan
taint Manually mark a resource for recreation
version Prints the Terraform version version Prints the Terraform version
``` ```

View File

@ -45,6 +45,10 @@
<a href="/docs/configuration/modules.html">Modules</a> <a href="/docs/configuration/modules.html">Modules</a>
</li> </li>
<li<%= sidebar_current("docs-config-atlas") %>>
<a href="/docs/configuration/atlas.html">Atlas</a>
</li>
</ul> </ul>
</li> </li>
@ -79,6 +83,10 @@
<a href="/docs/commands/plan.html">plan</a> <a href="/docs/commands/plan.html">plan</a>
</li> </li>
<li<%= sidebar_current("docs-commands-push") %>>
<a href="/docs/commands/push.html">push</a>
</li>
<li<%= sidebar_current("docs-commands-refresh") %>> <li<%= sidebar_current("docs-commands-refresh") %>>
<a href="/docs/commands/refresh.html">refresh</a> <a href="/docs/commands/refresh.html">refresh</a>
</li> </li>