diff --git a/CHANGELOG.md b/CHANGELOG.md index 50b5a41c2..43e3de9ba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,12 @@ ## 0.3.2 (unreleased) +BUG FIXES: + * core: Fixed issue causing double delete. [GH-555] + * core: Fixed issue with create-before-destroy not being respected in + some circumstances. + * core: Fixing issue with count expansion with non-homogenous instance + plans. ## 0.3.1 (October 21, 2014) diff --git a/builtin/bins/provider-aws/main.go b/builtin/bins/provider-aws/main.go index 5ff7a4ab2..349ef0412 100644 --- a/builtin/bins/provider-aws/main.go +++ b/builtin/bins/provider-aws/main.go @@ -3,13 +3,10 @@ package main import ( "github.com/hashicorp/terraform/builtin/providers/aws" "github.com/hashicorp/terraform/plugin" - "github.com/hashicorp/terraform/terraform" ) func main() { plugin.Serve(&plugin.ServeOpts{ - ProviderFunc: func() terraform.ResourceProvider { - return new(aws.ResourceProvider) - }, + ProviderFunc: aws.Provider, }) } diff --git a/builtin/bins/provider-cloudflare/main.go b/builtin/bins/provider-cloudflare/main.go index c81c552e7..fdce8e7a8 100644 --- a/builtin/bins/provider-cloudflare/main.go +++ b/builtin/bins/provider-cloudflare/main.go @@ -3,13 +3,10 @@ package main import ( "github.com/hashicorp/terraform/builtin/providers/cloudflare" "github.com/hashicorp/terraform/plugin" - "github.com/hashicorp/terraform/terraform" ) func main() { plugin.Serve(&plugin.ServeOpts{ - ProviderFunc: func() terraform.ResourceProvider { - return new(cloudflare.ResourceProvider) - }, + ProviderFunc: cloudflare.Provider, }) } diff --git a/builtin/bins/provider-digitalocean/main.go b/builtin/bins/provider-digitalocean/main.go index 86d2acf7a..7b43c053a 100644 --- a/builtin/bins/provider-digitalocean/main.go +++ b/builtin/bins/provider-digitalocean/main.go @@ -3,13 +3,10 @@ package main import ( "github.com/hashicorp/terraform/builtin/providers/digitalocean" "github.com/hashicorp/terraform/plugin" - "github.com/hashicorp/terraform/terraform" ) func main() { plugin.Serve(&plugin.ServeOpts{ - ProviderFunc: func() terraform.ResourceProvider { - return new(digitalocean.ResourceProvider) - }, + ProviderFunc: digitalocean.Provider, }) } diff --git a/builtin/bins/provider-dnsimple/main.go b/builtin/bins/provider-dnsimple/main.go index 2c578ace2..96c5046c3 100644 --- a/builtin/bins/provider-dnsimple/main.go +++ b/builtin/bins/provider-dnsimple/main.go @@ -3,13 +3,10 @@ package main import ( "github.com/hashicorp/terraform/builtin/providers/dnsimple" "github.com/hashicorp/terraform/plugin" - "github.com/hashicorp/terraform/terraform" ) func main() { plugin.Serve(&plugin.ServeOpts{ - ProviderFunc: func() terraform.ResourceProvider { - return new(dnsimple.ResourceProvider) - }, + ProviderFunc: dnsimple.Provider, }) } diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index 8479cb9ae..c1be060bb 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -2,17 +2,74 @@ package aws import ( "fmt" - "os" + "log" "strings" "unicode" + "github.com/hashicorp/terraform/helper/multierror" + "github.com/mitchellh/goamz/autoscaling" "github.com/mitchellh/goamz/aws" + "github.com/mitchellh/goamz/ec2" + "github.com/mitchellh/goamz/elb" + "github.com/mitchellh/goamz/rds" + "github.com/mitchellh/goamz/route53" + "github.com/mitchellh/goamz/s3" ) type Config struct { - AccessKey string `mapstructure:"access_key"` - SecretKey string `mapstructure:"secret_key"` - Region string `mapstructure:"region"` + AccessKey string + SecretKey string + Region string +} + +type AWSClient struct { + ec2conn *ec2.EC2 + elbconn *elb.ELB + autoscalingconn *autoscaling.AutoScaling + s3conn *s3.S3 + rdsconn *rds.Rds + route53 *route53.Route53 +} + +// Client configures and returns a fully initailized AWSClient +func (c *Config) Client() (interface{}, error) { + var client AWSClient + + // Get the auth and region. This can fail if keys/regions were not + // specified and we're attempting to use the environment. + var errs []error + log.Println("[INFO] Building AWS auth structure") + auth, err := c.AWSAuth() + if err != nil { + errs = append(errs, err) + } + + log.Println("[INFO] Building AWS region structure") + region, err := c.AWSRegion() + if err != nil { + errs = append(errs, err) + } + + if len(errs) == 0 { + log.Println("[INFO] Initializing EC2 connection") + client.ec2conn = ec2.New(auth, region) + log.Println("[INFO] Initializing ELB connection") + client.elbconn = elb.New(auth, region) + log.Println("[INFO] Initializing AutoScaling connection") + client.autoscalingconn = autoscaling.New(auth, region) + log.Println("[INFO] Initializing S3 connection") + client.s3conn = s3.New(auth, region) + log.Println("[INFO] Initializing RDS connection") + client.rdsconn = rds.New(auth, region) + log.Println("[INFO] Initializing Route53 connection") + client.route53 = route53.New(auth, region) + } + + if len(errs) > 0 { + return nil, &multierror.Error{Errors: errs} + } + + return &client, nil } // AWSAuth returns a valid aws.Auth object for access to AWS services, or @@ -56,10 +113,6 @@ func (c *Config) AWSRegion() (aws.Region, error) { } } - if v := os.Getenv("AWS_REGION"); v != "" { - return aws.Regions[v], nil - } - md, err := aws.GetMetaData("placement/availability-zone") if err != nil { return aws.Region{}, err diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 54db9b4d6..d24f91611 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -4,28 +4,16 @@ import ( "os" "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" ) -// Provider returns a schema.Provider for AWS. -// -// NOTE: schema.Provider became available long after the AWS provider -// was started, so resources may not be converted to this new structure -// yet. This is a WIP. To assist with the migration, make sure any resources -// you migrate are acceptance tested, then perform the migration. -func Provider() *schema.Provider { +// Provider returns a terraform.ResourceProvider. +func Provider() terraform.ResourceProvider { // TODO: Move the validation to this, requires conditional schemas // TODO: Move the configuration to this, requires validation return &schema.Provider{ Schema: map[string]*schema.Schema{ - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: envDefaultFunc("AWS_REGION"), - Description: descriptions["region"], - InputDefault: "us-east-1", - }, - "access_key": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -39,28 +27,38 @@ func Provider() *schema.Provider { DefaultFunc: envDefaultFunc("AWS_SECRET_KEY"), Description: descriptions["secret_key"], }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: envDefaultFunc("AWS_REGION"), + Description: descriptions["region"], + InputDefault: "us-east-1", + }, }, ResourcesMap: map[string]*schema.Resource{ - "aws_autoscaling_group": resourceAwsAutoscalingGroup(), - "aws_eip": resourceAwsEip(), - "aws_elb": resourceAwsElb(), - "aws_instance": resourceAwsInstance(), - "aws_launch_configuration": resourceAwsLaunchConfiguration(), - "aws_security_group": resourceAwsSecurityGroup(), - "aws_db_subnet_group": resourceAwsDbSubnetGroup(), - "aws_vpc": resourceAwsVpc(), + "aws_autoscaling_group": resourceAwsAutoscalingGroup(), + "aws_db_instance": resourceAwsDbInstance(), + "aws_db_parameter_group": resourceAwsDbParameterGroup(), + "aws_db_security_group": resourceAwsDbSecurityGroup(), + "aws_db_subnet_group": resourceAwsDbSubnetGroup(), + "aws_eip": resourceAwsEip(), + "aws_elb": resourceAwsElb(), + "aws_instance": resourceAwsInstance(), + "aws_internet_gateway": resourceAwsInternetGateway(), + "aws_launch_configuration": resourceAwsLaunchConfiguration(), + "aws_route53_record": resourceAwsRoute53Record(), + "aws_route53_zone": resourceAwsRoute53Zone(), + "aws_route_table": resourceAwsRouteTable(), + "aws_route_table_association": resourceAwsRouteTableAssociation(), + "aws_s3_bucket": resourceAwsS3Bucket(), + "aws_security_group": resourceAwsSecurityGroup(), + "aws_subnet": resourceAwsSubnet(), + "aws_vpc": resourceAwsVpc(), }, - } -} -func envDefaultFunc(k string) schema.SchemaDefaultFunc { - return func() (interface{}, error) { - if v := os.Getenv(k); v != "" { - return v, nil - } - - return nil, nil + ConfigureFunc: providerConfigure, } } @@ -78,3 +76,23 @@ func init() { "from the 'Security & Credentials' section of the AWS console.", } } + +func envDefaultFunc(k string) schema.SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return v, nil + } + + return nil, nil + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + config := Config{ + AccessKey: d.Get("access_key").(string), + SecretKey: d.Get("secret_key").(string), + Region: d.Get("region").(string), + } + + return config.Client() +} diff --git a/builtin/providers/aws/provider_test.go b/builtin/providers/aws/provider_test.go index 480c88bf8..ea214f7ee 100644 --- a/builtin/providers/aws/provider_test.go +++ b/builtin/providers/aws/provider_test.go @@ -1,11 +1,43 @@ package aws import ( + "log" + "os" "testing" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" ) +var testAccProviders map[string]terraform.ResourceProvider +var testAccProvider *schema.Provider + +func init() { + testAccProvider = Provider().(*schema.Provider) + testAccProviders = map[string]terraform.ResourceProvider{ + "aws": testAccProvider, + } +} + func TestProvider(t *testing.T) { - if err := Provider().InternalValidate(); err != nil { + if err := Provider().(*schema.Provider).InternalValidate(); err != nil { t.Fatalf("err: %s", err) } } + +func TestProvider_impl(t *testing.T) { + var _ terraform.ResourceProvider = Provider() +} + +func testAccPreCheck(t *testing.T) { + if v := os.Getenv("AWS_ACCESS_KEY"); v == "" { + t.Fatal("AWS_ACCESS_KEY must be set for acceptance tests") + } + if v := os.Getenv("AWS_SECRET_KEY"); v == "" { + t.Fatal("AWS_SECRET_KEY must be set for acceptance tests") + } + if v := os.Getenv("AWS_REGION"); v == "" { + log.Println("[INFO] Test: Using us-west-2 as test region") + os.Setenv("AWS_REGION", "us-west-2") + } +} diff --git a/builtin/providers/aws/resource_aws_autoscaling_group.go b/builtin/providers/aws/resource_aws_autoscaling_group.go index 54c744989..184ed455c 100644 --- a/builtin/providers/aws/resource_aws_autoscaling_group.go +++ b/builtin/providers/aws/resource_aws_autoscaling_group.go @@ -110,8 +110,7 @@ func resourceAwsAutoscalingGroup() *schema.Resource { } func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - autoscalingconn := p.autoscalingconn + autoscalingconn := meta.(*AWSClient).autoscalingconn var autoScalingGroupOpts autoscaling.CreateAutoScalingGroup autoScalingGroupOpts.Name = d.Get("name").(string) @@ -161,9 +160,32 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) return resourceAwsAutoscalingGroupRead(d, meta) } +func resourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) error { + g, err := getAwsAutoscalingGroup(d, meta) + if err != nil { + return err + } + if g == nil { + return nil + } + + d.Set("availability_zones", g.AvailabilityZones) + d.Set("default_cooldown", g.DefaultCooldown) + d.Set("desired_capacity", g.DesiredCapacity) + d.Set("health_check_grace_period", g.HealthCheckGracePeriod) + d.Set("health_check_type", g.HealthCheckType) + d.Set("launch_configuration", g.LaunchConfigurationName) + d.Set("load_balancers", g.LoadBalancerNames) + d.Set("min_size", g.MinSize) + d.Set("max_size", g.MaxSize) + d.Set("name", g.Name) + d.Set("vpc_zone_identifier", g.VPCZoneIdentifier) + + return nil +} + func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - autoscalingconn := p.autoscalingconn + autoscalingconn := meta.(*AWSClient).autoscalingconn opts := autoscaling.UpdateAutoScalingGroup{ Name: d.Id(), @@ -195,8 +217,7 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) } func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - autoscalingconn := p.autoscalingconn + autoscalingconn := meta.(*AWSClient).autoscalingconn // Read the autoscaling group first. If it doesn't exist, we're done. // We need the group in order to check if there are instances attached. @@ -238,35 +259,10 @@ func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) return nil } -func resourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) error { - g, err := getAwsAutoscalingGroup(d, meta) - if err != nil { - return err - } - if g == nil { - return nil - } - - d.Set("availability_zones", g.AvailabilityZones) - d.Set("default_cooldown", g.DefaultCooldown) - d.Set("desired_capacity", g.DesiredCapacity) - d.Set("health_check_grace_period", g.HealthCheckGracePeriod) - d.Set("health_check_type", g.HealthCheckType) - d.Set("launch_configuration", g.LaunchConfigurationName) - d.Set("load_balancers", g.LoadBalancerNames) - d.Set("min_size", g.MinSize) - d.Set("max_size", g.MaxSize) - d.Set("name", g.Name) - d.Set("vpc_zone_identifier", g.VPCZoneIdentifier) - - return nil -} - func getAwsAutoscalingGroup( d *schema.ResourceData, meta interface{}) (*autoscaling.AutoScalingGroup, error) { - p := meta.(*ResourceProvider) - autoscalingconn := p.autoscalingconn + autoscalingconn := meta.(*AWSClient).autoscalingconn describeOpts := autoscaling.DescribeAutoScalingGroups{ Names: []string{d.Id()}, @@ -298,8 +294,7 @@ func getAwsAutoscalingGroup( } func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - autoscalingconn := p.autoscalingconn + autoscalingconn := meta.(*AWSClient).autoscalingconn // First, set the capacity to zero so the group will drain log.Printf("[DEBUG] Reducing autoscaling group capacity to zero") diff --git a/builtin/providers/aws/resource_aws_autoscaling_group_test.go b/builtin/providers/aws/resource_aws_autoscaling_group_test.go index 79ce2f6b7..b35de9320 100644 --- a/builtin/providers/aws/resource_aws_autoscaling_group_test.go +++ b/builtin/providers/aws/resource_aws_autoscaling_group_test.go @@ -72,7 +72,7 @@ func TestAccAWSAutoScalingGroupWithLoadBalancer(t *testing.T) { }) } func testAccCheckAWSAutoScalingGroupDestroy(s *terraform.State) error { - conn := testAccProvider.autoscalingconn + conn := testAccProvider.Meta().(*AWSClient).autoscalingconn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_autoscaling_group" { @@ -164,7 +164,7 @@ func testAccCheckAWSAutoScalingGroupExists(n string, group *autoscaling.AutoScal return fmt.Errorf("No AutoScaling Group ID is set") } - conn := testAccProvider.autoscalingconn + conn := testAccProvider.Meta().(*AWSClient).autoscalingconn describeOpts := autoscaling.DescribeAutoScalingGroups{ Names: []string{rs.Primary.ID}, diff --git a/builtin/providers/aws/resource_aws_db_instance.go b/builtin/providers/aws/resource_aws_db_instance.go index 6c769a7a7..c06582aff 100644 --- a/builtin/providers/aws/resource_aws_db_instance.go +++ b/builtin/providers/aws/resource_aws_db_instance.go @@ -3,112 +3,264 @@ package aws import ( "fmt" "log" - "strconv" "strings" "time" - "github.com/hashicorp/terraform/flatmap" - "github.com/hashicorp/terraform/helper/config" - "github.com/hashicorp/terraform/helper/diff" + "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/helper/schema" "github.com/mitchellh/goamz/rds" ) -func resource_aws_db_instance_create( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - conn := p.rdsconn +func resourceAwsDbInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDbInstanceCreate, + Read: resourceAwsDbInstanceRead, + Delete: resourceAwsDbInstanceDelete, - // Merge the diff into the state so that we have all the attributes - // properly. - rs := s.MergeDiff(d) + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, - var err error - var attr string + "username": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, - opts := rds.CreateDBInstance{} + "password": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, - if attr = rs.Attributes["allocated_storage"]; attr != "" { - opts.AllocatedStorage, err = strconv.Atoi(attr) - opts.SetAllocatedStorage = true + "engine": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "engine_version": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "allocated_storage": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "identifier": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "instance_class": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "availability_zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "backup_retention_period": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "backup_window": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "iops": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "maintenance_window": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "multi_az": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "publicly_accessible": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + + "vpc_security_group_ids": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: func(v interface{}) int { + return hashcode.String(v.(string)) + }, + }, + + "security_group_names": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: func(v interface{}) int { + return hashcode.String(v.(string)) + }, + }, + + "skip_final_snapshot": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + + "final_snapshot_identifier": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "db_subnet_group_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "parameter_group_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "endpoint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "status": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn + opts := rds.CreateDBInstance{ + AllocatedStorage: d.Get("allocated_storage").(int), + SetAllocatedStorage: true, + DBInstanceClass: d.Get("instance_class").(string), + DBInstanceIdentifier: d.Get("identifier").(string), + DBName: d.Get("name").(string), + MasterUsername: d.Get("username").(string), + MasterUserPassword: d.Get("password").(string), + Engine: d.Get("engine").(string), + EngineVersion: d.Get("engine_version").(string), } - if attr = rs.Attributes["backup_retention_period"]; attr != "" { - opts.BackupRetentionPeriod, err = strconv.Atoi(attr) + // Special treatment for the password, as we don't want that + // saved into the state file + d.Set("password", "") + + if attr, ok := d.GetOk("backup_retention_period"); ok { + opts.BackupRetentionPeriod = attr.(int) opts.SetBackupRetentionPeriod = true } - if attr = rs.Attributes["iops"]; attr != "" { - opts.Iops, err = strconv.Atoi(attr) + if attr, ok := d.GetOk("iops"); ok { + opts.Iops = attr.(int) opts.SetIops = true } - if attr = rs.Attributes["port"]; attr != "" { - opts.Port, err = strconv.Atoi(attr) + if attr, ok := d.GetOk("port"); ok { + opts.Port = attr.(int) opts.SetPort = true } - if attr = rs.Attributes["availability_zone"]; attr != "" { - opts.AvailabilityZone = attr + if attr, ok := d.GetOk("multi_az"); ok { + opts.MultiAZ = attr.(bool) } - if attr = rs.Attributes["instance_class"]; attr != "" { - opts.DBInstanceClass = attr + if attr, ok := d.GetOk("availability_zone"); ok { + opts.AvailabilityZone = attr.(string) } - if attr = rs.Attributes["maintenance_window"]; attr != "" { - opts.PreferredMaintenanceWindow = attr + if attr, ok := d.GetOk("maintenance_window"); ok { + opts.PreferredMaintenanceWindow = attr.(string) } - if attr = rs.Attributes["backup_window"]; attr != "" { - opts.PreferredBackupWindow = attr + if attr, ok := d.GetOk("backup_window"); ok { + opts.PreferredBackupWindow = attr.(string) } - if attr = rs.Attributes["multi_az"]; attr == "true" { - opts.MultiAZ = true + if attr, ok := d.GetOk("publicly_accessible"); ok { + opts.PubliclyAccessible = attr.(bool) } - if attr = rs.Attributes["publicly_accessible"]; attr == "true" { - opts.PubliclyAccessible = true + if attr, ok := d.GetOk("db_subnet_group_name"); ok { + opts.DBSubnetGroupName = attr.(string) } - if attr = rs.Attributes["db_subnet_group_name"]; attr != "" { - opts.DBSubnetGroupName = attr + if attr, ok := d.GetOk("parameter_group_name"); ok { + opts.DBParameterGroupName = attr.(string) } - if err != nil { - return nil, fmt.Errorf("Error parsing configuration: %s", err) + if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { + var s []string + for _, v := range attr.List() { + s = append(s, v.(string)) + } + opts.VpcSecurityGroupIds = s } - if _, ok := rs.Attributes["vpc_security_group_ids.#"]; ok { - opts.VpcSecurityGroupIds = expandStringList(flatmap.Expand( - rs.Attributes, "vpc_security_group_ids").([]interface{})) + if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 { + var s []string + for _, v := range attr.List() { + s = append(s, v.(string)) + } + opts.DBSecurityGroupNames = s } - if _, ok := rs.Attributes["security_group_names.#"]; ok { - opts.DBSecurityGroupNames = expandStringList(flatmap.Expand( - rs.Attributes, "security_group_names").([]interface{})) - } - - opts.DBInstanceIdentifier = rs.Attributes["identifier"] - opts.DBName = rs.Attributes["name"] - opts.MasterUsername = rs.Attributes["username"] - opts.MasterUserPassword = rs.Attributes["password"] - opts.EngineVersion = rs.Attributes["engine_version"] - opts.Engine = rs.Attributes["engine"] - log.Printf("[DEBUG] DB Instance create configuration: %#v", opts) - _, err = conn.CreateDBInstance(&opts) + _, err := conn.CreateDBInstance(&opts) if err != nil { - return nil, fmt.Errorf("Error creating DB Instance: %s", err) + return fmt.Errorf("Error creating DB Instance: %s", err) } - rs.ID = rs.Attributes["identifier"] + d.SetId(d.Get("identifier").(string)) - log.Printf("[INFO] DB Instance ID: %s", rs.ID) + log.Printf("[INFO] DB Instance ID: %s", d.Id()) log.Println( "[INFO] Waiting for DB Instance to be available") @@ -116,8 +268,8 @@ func resource_aws_db_instance_create( stateConf := &resource.StateChangeConf{ Pending: []string{"creating", "backing-up", "modifying"}, Target: "available", - Refresh: DBInstanceStateRefreshFunc(rs.ID, conn), - Timeout: 10 * time.Minute, + Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), + Timeout: 20 * time.Minute, MinTimeout: 10 * time.Second, Delay: 30 * time.Second, // Wait 30 secs before starting } @@ -125,38 +277,77 @@ func resource_aws_db_instance_create( // Wait, catching any errors _, err = stateConf.WaitForState() if err != nil { - return rs, err + return err } - v, err := resource_aws_db_instance_retrieve(rs.ID, conn) + return resourceAwsDbInstanceRead(d, meta) +} + +func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error { + v, err := resourceAwsBbInstanceRetrieve(d, meta) + if err != nil { - return rs, err + return err + } + if v == nil { + d.SetId("") + return nil } - return resource_aws_db_instance_update_state(rs, v) + d.Set("name", v.DBName) + d.Set("username", v.MasterUsername) + d.Set("engine", v.Engine) + d.Set("engine_version", v.EngineVersion) + d.Set("allocated_storage", v.AllocatedStorage) + d.Set("instance_class", v.DBInstanceClass) + d.Set("availability_zone", v.AvailabilityZone) + d.Set("backup_retention_period", v.BackupRetentionPeriod) + d.Set("backup_window", v.PreferredBackupWindow) + d.Set("maintenance_window", v.PreferredMaintenanceWindow) + d.Set("multi_az", v.MultiAZ) + d.Set("port", v.Port) + d.Set("db_subnet_group_name", v.DBSubnetGroup.Name) + d.Set("parameter_group_name", v.DBParameterGroupName) + d.Set("address", v.Address) + d.Set("endpoint", fmt.Sprintf("%s:%d", v.Address, v.Port)) + d.Set("status", v.DBInstanceStatus) + + // Create an empty schema.Set to hold all vpc security group ids + ids := &schema.Set{ + F: func(v interface{}) int { + return hashcode.String(v.(string)) + }, + } + for _, v := range v.VpcSecurityGroupIds { + ids.Add(v) + } + d.Set("vpc_security_group_ids", ids) + + // Create an empty schema.Set to hold all security group names + sgn := &schema.Set{ + F: func(v interface{}) int { + return hashcode.String(v.(string)) + }, + } + for _, v := range v.DBSecurityGroupNames { + sgn.Add(v) + } + d.Set("security_group_names", sgn) + + return nil } -func resource_aws_db_instance_update( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - panic("Cannot update DB") -} +func resourceAwsDbInstanceDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn -func resource_aws_db_instance_destroy( - s *terraform.InstanceState, - meta interface{}) error { - p := meta.(*ResourceProvider) - conn := p.rdsconn + log.Printf("[DEBUG] DB Instance destroy: %v", d.Id()) - log.Printf("[DEBUG] DB Instance destroy: %v", s.ID) + opts := rds.DeleteDBInstance{DBInstanceIdentifier: d.Id()} - opts := rds.DeleteDBInstance{DBInstanceIdentifier: s.ID} - - if s.Attributes["skip_final_snapshot"] == "true" { + if d.Get("skip_final_snapshot").(bool) { opts.SkipFinalSnapshot = true } else { - opts.FinalDBSnapshotIdentifier = s.Attributes["final_snapshot_identifier"] + opts.FinalDBSnapshotIdentifier = d.Get("final_snapshot_identifier").(string) } log.Printf("[DEBUG] DB Instance destroy configuration: %v", opts) @@ -170,8 +361,8 @@ func resource_aws_db_instance_destroy( Pending: []string{"creating", "backing-up", "modifying", "deleting", "available"}, Target: "", - Refresh: DBInstanceStateRefreshFunc(s.ID, conn), - Timeout: 10 * time.Minute, + Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), + Timeout: 20 * time.Minute, MinTimeout: 10 * time.Second, Delay: 30 * time.Second, // Wait 30 secs before starting } @@ -182,114 +373,12 @@ func resource_aws_db_instance_destroy( return nil } -func resource_aws_db_instance_refresh( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - conn := p.rdsconn +func resourceAwsBbInstanceRetrieve( + d *schema.ResourceData, meta interface{}) (*rds.DBInstance, error) { + conn := meta.(*AWSClient).rdsconn - v, err := resource_aws_db_instance_retrieve(s.ID, conn) - - if err != nil { - return s, err - } - if v == nil { - s.ID = "" - return s, nil - } - - return resource_aws_db_instance_update_state(s, v) -} - -func resource_aws_db_instance_diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - - b := &diff.ResourceBuilder{ - Attrs: map[string]diff.AttrType{ - "allocated_storage": diff.AttrTypeCreate, - "availability_zone": diff.AttrTypeCreate, - "backup_retention_period": diff.AttrTypeCreate, - "backup_window": diff.AttrTypeCreate, - "engine": diff.AttrTypeCreate, - "engine_version": diff.AttrTypeCreate, - "identifier": diff.AttrTypeCreate, - "instance_class": diff.AttrTypeCreate, - "iops": diff.AttrTypeCreate, - "maintenance_window": diff.AttrTypeCreate, - "multi_az": diff.AttrTypeCreate, - "name": diff.AttrTypeCreate, - "password": diff.AttrTypeCreate, - "port": diff.AttrTypeCreate, - "publicly_accessible": diff.AttrTypeCreate, - "username": diff.AttrTypeCreate, - "vpc_security_group_ids": diff.AttrTypeCreate, - "security_group_names": diff.AttrTypeCreate, - "db_subnet_group_name": diff.AttrTypeCreate, - "skip_final_snapshot": diff.AttrTypeUpdate, - "final_snapshot_identifier": diff.AttrTypeUpdate, - }, - - ComputedAttrs: []string{ - "address", - "availability_zone", - "backup_retention_period", - "backup_window", - "engine_version", - "maintenance_window", - "endpoint", - "status", - "multi_az", - "port", - "address", - "password", - }, - } - - return b.Diff(s, c) -} - -func resource_aws_db_instance_update_state( - s *terraform.InstanceState, - v *rds.DBInstance) (*terraform.InstanceState, error) { - - s.Attributes["address"] = v.Address - s.Attributes["allocated_storage"] = strconv.Itoa(v.AllocatedStorage) - s.Attributes["availability_zone"] = v.AvailabilityZone - s.Attributes["backup_retention_period"] = strconv.Itoa(v.BackupRetentionPeriod) - s.Attributes["backup_window"] = v.PreferredBackupWindow - s.Attributes["endpoint"] = fmt.Sprintf("%s:%s", s.Attributes["address"], strconv.Itoa(v.Port)) - s.Attributes["engine"] = v.Engine - s.Attributes["engine_version"] = v.EngineVersion - s.Attributes["instance_class"] = v.DBInstanceClass - s.Attributes["maintenance_window"] = v.PreferredMaintenanceWindow - s.Attributes["multi_az"] = strconv.FormatBool(v.MultiAZ) - s.Attributes["name"] = v.DBName - s.Attributes["port"] = strconv.Itoa(v.Port) - s.Attributes["status"] = v.DBInstanceStatus - s.Attributes["username"] = v.MasterUsername - s.Attributes["db_subnet_group_name"] = v.DBSubnetGroup.Name - - // Flatten our group values - toFlatten := make(map[string]interface{}) - - if len(v.DBSecurityGroupNames) > 0 && v.DBSecurityGroupNames[0] != "" { - toFlatten["security_group_names"] = v.DBSecurityGroupNames - } - if len(v.VpcSecurityGroupIds) > 0 && v.VpcSecurityGroupIds[0] != "" { - toFlatten["vpc_security_group_ids"] = v.VpcSecurityGroupIds - } - for k, v := range flatmap.Flatten(toFlatten) { - s.Attributes[k] = v - } - - return s, nil -} - -func resource_aws_db_instance_retrieve(id string, conn *rds.Rds) (*rds.DBInstance, error) { opts := rds.DescribeDBInstances{ - DBInstanceIdentifier: id, + DBInstanceIdentifier: d.Id(), } log.Printf("[DEBUG] DB Instance describe configuration: %#v", opts) @@ -304,7 +393,7 @@ func resource_aws_db_instance_retrieve(id string, conn *rds.Rds) (*rds.DBInstanc } if len(resp.DBInstances) != 1 || - resp.DBInstances[0].DBInstanceIdentifier != id { + resp.DBInstances[0].DBInstanceIdentifier != d.Id() { if err != nil { return nil, nil } @@ -315,38 +404,10 @@ func resource_aws_db_instance_retrieve(id string, conn *rds.Rds) (*rds.DBInstanc return &v, nil } -func resource_aws_db_instance_validation() *config.Validator { - return &config.Validator{ - Required: []string{ - "allocated_storage", - "engine", - "engine_version", - "identifier", - "instance_class", - "name", - "password", - "username", - }, - Optional: []string{ - "availability_zone", - "backup_retention_period", - "backup_window", - "iops", - "maintenance_window", - "multi_az", - "port", - "publicly_accessible", - "vpc_security_group_ids.*", - "skip_final_snapshot", - "security_group_names.*", - "db_subnet_group_name", - }, - } -} - -func DBInstanceStateRefreshFunc(id string, conn *rds.Rds) resource.StateRefreshFunc { +func resourceAwsDbInstanceStateRefreshFunc( + d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { return func() (interface{}, string, error) { - v, err := resource_aws_db_instance_retrieve(id, conn) + v, err := resourceAwsBbInstanceRetrieve(d, meta) if err != nil { log.Printf("Error on retrieving DB Instance when waiting: %s", err) diff --git a/builtin/providers/aws/resource_aws_db_instance_test.go b/builtin/providers/aws/resource_aws_db_instance_test.go index 05073fe93..8f87e450f 100644 --- a/builtin/providers/aws/resource_aws_db_instance_test.go +++ b/builtin/providers/aws/resource_aws_db_instance_test.go @@ -43,6 +43,8 @@ func TestAccAWSDBInstance(t *testing.T) { "aws_db_instance.bar", "skip_final_snapshot", "true"), resource.TestCheckResourceAttr( "aws_db_instance.bar", "security_group_names.0", "secfoobarbaz-test-terraform"), + resource.TestCheckResourceAttr( + "aws_db_instance.bar", "parameter_group_name", "default.mysql5.6"), ), }, }, @@ -50,7 +52,7 @@ func TestAccAWSDBInstance(t *testing.T) { } func testAccCheckAWSDBInstanceDestroy(s *terraform.State) error { - conn := testAccProvider.rdsconn + conn := testAccProvider.Meta().(*AWSClient).rdsconn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_db_instance" { @@ -113,7 +115,7 @@ func testAccCheckAWSDBInstanceExists(n string, v *rds.DBInstance) resource.TestC return fmt.Errorf("No DB Instance ID is set") } - conn := testAccProvider.rdsconn + conn := testAccProvider.Meta().(*AWSClient).rdsconn opts := rds.DescribeDBInstances{ DBInstanceIdentifier: rs.Primary.ID, @@ -160,5 +162,6 @@ resource "aws_db_instance" "bar" { skip_final_snapshot = true security_group_names = ["${aws_db_security_group.bar.name}"] + parameter_group_name = "default.mysql5.6" } ` diff --git a/builtin/providers/aws/resource_aws_db_parameter_group.go b/builtin/providers/aws/resource_aws_db_parameter_group.go new file mode 100644 index 000000000..7253e7793 --- /dev/null +++ b/builtin/providers/aws/resource_aws_db_parameter_group.go @@ -0,0 +1,211 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/mitchellh/goamz/rds" +) + +func resourceAwsDbParameterGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDbParameterGroupCreate, + Read: resourceAwsDbParameterGroupRead, + Update: resourceAwsDbParameterGroupUpdate, + Delete: resourceAwsDbParameterGroupDelete, + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "family": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "parameter": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: false, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + Set: resourceAwsDbParameterHash, + }, + }, + } +} + +func resourceAwsDbParameterGroupCreate(d *schema.ResourceData, meta interface{}) error { + rdsconn := meta.(*AWSClient).rdsconn + + createOpts := rds.CreateDBParameterGroup{ + DBParameterGroupName: d.Get("name").(string), + DBParameterGroupFamily: d.Get("family").(string), + Description: d.Get("description").(string), + } + + log.Printf("[DEBUG] Create DB Parameter Group: %#v", createOpts) + _, err := rdsconn.CreateDBParameterGroup(&createOpts) + if err != nil { + return fmt.Errorf("Error creating DB Parameter Group: %s", err) + } + + d.Partial(true) + d.SetPartial("name") + d.SetPartial("family") + d.SetPartial("description") + d.Partial(false) + + d.SetId(createOpts.DBParameterGroupName) + log.Printf("[INFO] DB Parameter Group ID: %s", d.Id()) + + return resourceAwsDbParameterGroupUpdate(d, meta) +} + +func resourceAwsDbParameterGroupRead(d *schema.ResourceData, meta interface{}) error { + rdsconn := meta.(*AWSClient).rdsconn + + describeOpts := rds.DescribeDBParameterGroups{ + DBParameterGroupName: d.Id(), + } + + describeResp, err := rdsconn.DescribeDBParameterGroups(&describeOpts) + if err != nil { + return err + } + + if len(describeResp.DBParameterGroups) != 1 || + describeResp.DBParameterGroups[0].DBParameterGroupName != d.Id() { + return fmt.Errorf("Unable to find Parameter Group: %#v", describeResp.DBParameterGroups) + } + + d.Set("name", describeResp.DBParameterGroups[0].DBParameterGroupName) + d.Set("family", describeResp.DBParameterGroups[0].DBParameterGroupFamily) + d.Set("description", describeResp.DBParameterGroups[0].Description) + + // Only include user customized parameters as there's hundreds of system/default ones + describeParametersOpts := rds.DescribeDBParameters{ + DBParameterGroupName: d.Id(), + Source: "user", + } + + describeParametersResp, err := rdsconn.DescribeDBParameters(&describeParametersOpts) + if err != nil { + return err + } + + d.Set("parameter", flattenParameters(describeParametersResp.Parameters)) + + return nil +} + +func resourceAwsDbParameterGroupUpdate(d *schema.ResourceData, meta interface{}) error { + rdsconn := meta.(*AWSClient).rdsconn + + d.Partial(true) + + if d.HasChange("parameter") { + o, n := d.GetChange("parameter") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + // Expand the "parameter" set to goamz compat []rds.Parameter + parameters, err := expandParameters(ns.Difference(os).List()) + if err != nil { + return err + } + + if len(parameters) > 0 { + modifyOpts := rds.ModifyDBParameterGroup{ + DBParameterGroupName: d.Get("name").(string), + Parameters: parameters, + } + + log.Printf("[DEBUG] Modify DB Parameter Group: %#v", modifyOpts) + _, err = rdsconn.ModifyDBParameterGroup(&modifyOpts) + if err != nil { + return fmt.Errorf("Error modifying DB Parameter Group: %s", err) + } + } + d.SetPartial("parameter") + } + + d.Partial(false) + + return resourceAwsDbParameterGroupRead(d, meta) +} + +func resourceAwsDbParameterGroupDelete(d *schema.ResourceData, meta interface{}) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: "destroyed", + Refresh: resourceAwsDbParameterGroupDeleteRefreshFunc(d, meta), + Timeout: 3 * time.Minute, + MinTimeout: 1 * time.Second, + } + _, err := stateConf.WaitForState() + return err +} + +func resourceAwsDbParameterGroupDeleteRefreshFunc( + d *schema.ResourceData, + meta interface{}) resource.StateRefreshFunc { + rdsconn := meta.(*AWSClient).rdsconn + + return func() (interface{}, string, error) { + + deleteOpts := rds.DeleteDBParameterGroup{ + DBParameterGroupName: d.Id(), + } + + if _, err := rdsconn.DeleteDBParameterGroup(&deleteOpts); err != nil { + rdserr, ok := err.(*rds.Error) + if !ok { + return d, "error", err + } + + if rdserr.Code != "DBParameterGroupNotFoundFault" { + return d, "error", err + } + } + + return d, "destroyed", nil + } +} + +func resourceAwsDbParameterHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["value"].(string))) + + return hashcode.String(buf.String()) +} diff --git a/builtin/providers/aws/resource_aws_db_parameter_group_test.go b/builtin/providers/aws/resource_aws_db_parameter_group_test.go new file mode 100644 index 000000000..bf730e156 --- /dev/null +++ b/builtin/providers/aws/resource_aws_db_parameter_group_test.go @@ -0,0 +1,248 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/goamz/rds" +) + +func TestAccAWSDBParameterGroup(t *testing.T) { + var v rds.DBParameterGroup + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSDBParameterGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSDBParameterGroupConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.bar", &v), + testAccCheckAWSDBParameterGroupAttributes(&v), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "name", "parameter-group-test-terraform"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "family", "mysql5.6"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "description", "Test parameter group for terraform"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "parameter.0.name", "character_set_results"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "parameter.0.value", "utf8"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "parameter.1.name", "character_set_server"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "parameter.1.value", "utf8"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "parameter.2.name", "character_set_client"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "parameter.2.value", "utf8"), + ), + }, + resource.TestStep{ + Config: testAccAWSDBParameterGroupAddParametersConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.bar", &v), + testAccCheckAWSDBParameterGroupAttributes(&v), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "name", "parameter-group-test-terraform"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "family", "mysql5.6"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "description", "Test parameter group for terraform"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "parameter.0.name", "collation_connection"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "parameter.0.value", "utf8_unicode_ci"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "parameter.1.name", "character_set_results"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "parameter.1.value", "utf8"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "parameter.2.name", "character_set_server"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "parameter.2.value", "utf8"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "parameter.3.name", "collation_server"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "parameter.3.value", "utf8_unicode_ci"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "parameter.4.name", "character_set_client"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "parameter.4.value", "utf8"), + ), + }, + }, + }) +} + +func TestAccAWSDBParameterGroupOnly(t *testing.T) { + var v rds.DBParameterGroup + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSDBParameterGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSDBParameterGroupOnlyConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.bar", &v), + testAccCheckAWSDBParameterGroupAttributes(&v), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "name", "parameter-group-test-terraform"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "family", "mysql5.6"), + resource.TestCheckResourceAttr( + "aws_db_parameter_group.bar", "description", "Test parameter group for terraform"), + ), + }, + }, + }) +} + +func testAccCheckAWSDBParameterGroupDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).rdsconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_db_parameter_group" { + continue + } + + // Try to find the Group + resp, err := conn.DescribeDBParameterGroups( + &rds.DescribeDBParameterGroups{ + DBParameterGroupName: rs.Primary.ID, + }) + + if err == nil { + if len(resp.DBParameterGroups) != 0 && + resp.DBParameterGroups[0].DBParameterGroupName == rs.Primary.ID { + return fmt.Errorf("DB Parameter Group still exists") + } + } + + // Verify the error + newerr, ok := err.(*rds.Error) + if !ok { + return err + } + if newerr.Code != "InvalidDBParameterGroup.NotFound" { + return err + } + } + + return nil +} + +func testAccCheckAWSDBParameterGroupAttributes(v *rds.DBParameterGroup) resource.TestCheckFunc { + return func(s *terraform.State) error { + + if v.DBParameterGroupName != "parameter-group-test-terraform" { + return fmt.Errorf("bad name: %#v", v.DBParameterGroupName) + } + + if v.DBParameterGroupFamily != "mysql5.6" { + return fmt.Errorf("bad family: %#v", v.DBParameterGroupFamily) + } + + if v.Description != "Test parameter group for terraform" { + return fmt.Errorf("bad description: %#v", v.Description) + } + + return nil + } +} + +func testAccCheckAWSDBParameterGroupExists(n string, v *rds.DBParameterGroup) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No DB Parameter Group ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).rdsconn + + opts := rds.DescribeDBParameterGroups{ + DBParameterGroupName: rs.Primary.ID, + } + + resp, err := conn.DescribeDBParameterGroups(&opts) + + if err != nil { + return err + } + + if len(resp.DBParameterGroups) != 1 || + resp.DBParameterGroups[0].DBParameterGroupName != rs.Primary.ID { + return fmt.Errorf("DB Parameter Group not found") + } + + *v = resp.DBParameterGroups[0] + + return nil + } +} + +const testAccAWSDBParameterGroupConfig = ` +resource "aws_db_parameter_group" "bar" { + name = "parameter-group-test-terraform" + family = "mysql5.6" + description = "Test parameter group for terraform" + parameter { + name = "character_set_server" + value = "utf8" + } + parameter { + name = "character_set_client" + value = "utf8" + } + parameter{ + name = "character_set_results" + value = "utf8" + } +} +` + +const testAccAWSDBParameterGroupAddParametersConfig = ` +resource "aws_db_parameter_group" "bar" { + name = "parameter-group-test-terraform" + family = "mysql5.6" + description = "Test parameter group for terraform" + parameter { + name = "character_set_server" + value = "utf8" + } + parameter { + name = "character_set_client" + value = "utf8" + } + parameter{ + name = "character_set_results" + value = "utf8" + } + parameter { + name = "collation_server" + value = "utf8_unicode_ci" + } + parameter { + name = "collation_connection" + value = "utf8_unicode_ci" + } +} +` + +const testAccAWSDBParameterGroupOnlyConfig = ` +resource "aws_db_parameter_group" "bar" { + name = "parameter-group-test-terraform" + family = "mysql5.6" + description = "Test parameter group for terraform" +} +` diff --git a/builtin/providers/aws/resource_aws_db_security_group.go b/builtin/providers/aws/resource_aws_db_security_group.go index a4cb1c308..a5fa41f75 100644 --- a/builtin/providers/aws/resource_aws_db_security_group.go +++ b/builtin/providers/aws/resource_aws_db_security_group.go @@ -1,68 +1,110 @@ package aws import ( + "bytes" "fmt" "log" "time" - "github.com/hashicorp/terraform/flatmap" - "github.com/hashicorp/terraform/helper/config" - "github.com/hashicorp/terraform/helper/diff" + "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/multierror" "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/helper/schema" "github.com/mitchellh/goamz/rds" ) -func resource_aws_db_security_group_create( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - conn := p.rdsconn +func resourceAwsDbSecurityGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDbSecurityGroupCreate, + Read: resourceAwsDbSecurityGroupRead, + Delete: resourceAwsDbSecurityGroupDelete, - // Merge the diff into the state so that we have all the attributes - // properly. - rs := s.MergeDiff(d) + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "ingress": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "security_group_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "security_group_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "security_group_owner_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + Set: resourceAwsDbSecurityGroupIngressHash, + }, + }, + } +} + +func resourceAwsDbSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn var err error var errs []error opts := rds.CreateDBSecurityGroup{ - DBSecurityGroupName: rs.Attributes["name"], - DBSecurityGroupDescription: rs.Attributes["description"], + DBSecurityGroupName: d.Get("name").(string), + DBSecurityGroupDescription: d.Get("description").(string), } log.Printf("[DEBUG] DB Security Group create configuration: %#v", opts) _, err = conn.CreateDBSecurityGroup(&opts) if err != nil { - return nil, fmt.Errorf("Error creating DB Security Group: %s", err) + return fmt.Errorf("Error creating DB Security Group: %s", err) } - rs.ID = rs.Attributes["name"] + d.SetId(d.Get("name").(string)) - log.Printf("[INFO] DB Security Group ID: %s", rs.ID) + log.Printf("[INFO] DB Security Group ID: %s", d.Id()) - v, err := resource_aws_db_security_group_retrieve(rs.ID, conn) + sg, err := resourceAwsDbSecurityGroupRetrieve(d, meta) if err != nil { - return rs, err + return err } - if _, ok := rs.Attributes["ingress.#"]; ok { - ingresses := flatmap.Expand( - rs.Attributes, "ingress").([]interface{}) + ingresses := d.Get("ingress").(*schema.Set) + for _, ing := range ingresses.List() { + err = resourceAwsDbSecurityGroupAuthorizeRule(ing, sg.Name, conn) - for _, ing := range ingresses { - err = authorize_ingress_rule(ing, v.Name, conn) - - if err != nil { - errs = append(errs, err) - } + if err != nil { + errs = append(errs, err) } + } - if len(errs) > 0 { - return rs, &multierror.Error{Errors: errs} - } + if len(errs) > 0 { + return &multierror.Error{Errors: errs} } log.Println( @@ -71,35 +113,58 @@ func resource_aws_db_security_group_create( stateConf := &resource.StateChangeConf{ Pending: []string{"authorizing"}, Target: "authorized", - Refresh: DBSecurityGroupStateRefreshFunc(rs.ID, conn), + Refresh: resourceAwsDbSecurityGroupStateRefreshFunc(d, meta), Timeout: 10 * time.Minute, } // Wait, catching any errors _, err = stateConf.WaitForState() if err != nil { - return rs, err + return err } - return resource_aws_db_security_group_update_state(rs, v) + return resourceAwsDbSecurityGroupRead(d, meta) } -func resource_aws_db_security_group_update( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - panic("Cannot update DB security group") +func resourceAwsDbSecurityGroupRead(d *schema.ResourceData, meta interface{}) error { + sg, err := resourceAwsDbSecurityGroupRetrieve(d, meta) + if err != nil { + return err + } + + d.Set("name", sg.Name) + d.Set("description", sg.Description) + + // Create an empty schema.Set to hold all ingress rules + rules := &schema.Set{ + F: resourceAwsDbSecurityGroupIngressHash, + } + + for _, v := range sg.CidrIps { + rule := map[string]interface{}{"cidr": v} + rules.Add(rule) + } + + for i, _ := range sg.EC2SecurityGroupOwnerIds { + rule := map[string]interface{}{ + "security_group_name": sg.EC2SecurityGroupNames[i], + "security_group_id": sg.EC2SecurityGroupIds[i], + "security_group_owner_id": sg.EC2SecurityGroupOwnerIds[i], + } + rules.Add(rule) + } + + d.Set("ingress", rules) + + return nil } -func resource_aws_db_security_group_destroy( - s *terraform.InstanceState, - meta interface{}) error { - p := meta.(*ResourceProvider) - conn := p.rdsconn +func resourceAwsDbSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).rdsconn - log.Printf("[DEBUG] DB Security Group destroy: %v", s.ID) + log.Printf("[DEBUG] DB Security Group destroy: %v", d.Id()) - opts := rds.DeleteDBSecurityGroup{DBSecurityGroupName: s.ID} + opts := rds.DeleteDBSecurityGroup{DBSecurityGroupName: d.Id()} log.Printf("[DEBUG] DB Security Group destroy configuration: %v", opts) _, err := conn.DeleteDBSecurityGroup(&opts) @@ -115,70 +180,11 @@ func resource_aws_db_security_group_destroy( return nil } -func resource_aws_db_security_group_refresh( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - conn := p.rdsconn +func resourceAwsDbSecurityGroupRetrieve(d *schema.ResourceData, meta interface{}) (*rds.DBSecurityGroup, error) { + conn := meta.(*AWSClient).rdsconn - v, err := resource_aws_db_security_group_retrieve(s.ID, conn) - - if err != nil { - return s, err - } - - return resource_aws_db_security_group_update_state(s, v) -} - -func resource_aws_db_security_group_diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - - b := &diff.ResourceBuilder{ - Attrs: map[string]diff.AttrType{ - "name": diff.AttrTypeCreate, - "description": diff.AttrTypeCreate, - "ingress": diff.AttrTypeCreate, - }, - - ComputedAttrs: []string{ - "ingress_cidr", - "ingress_security_groups", - }, - } - - return b.Diff(s, c) -} - -func resource_aws_db_security_group_update_state( - s *terraform.InstanceState, - v *rds.DBSecurityGroup) (*terraform.InstanceState, error) { - - s.Attributes["name"] = v.Name - s.Attributes["description"] = v.Description - - // Flatten our group values - toFlatten := make(map[string]interface{}) - - if len(v.EC2SecurityGroupOwnerIds) > 0 && v.EC2SecurityGroupOwnerIds[0] != "" { - toFlatten["ingress_security_groups"] = v.EC2SecurityGroupOwnerIds - } - - if len(v.CidrIps) > 0 && v.CidrIps[0] != "" { - toFlatten["ingress_cidr"] = v.CidrIps - } - - for k, v := range flatmap.Flatten(toFlatten) { - s.Attributes[k] = v - } - - return s, nil -} - -func resource_aws_db_security_group_retrieve(id string, conn *rds.Rds) (*rds.DBSecurityGroup, error) { opts := rds.DescribeDBSecurityGroups{ - DBSecurityGroupName: id, + DBSecurityGroupName: d.Id(), } log.Printf("[DEBUG] DB Security Group describe configuration: %#v", opts) @@ -190,7 +196,7 @@ func resource_aws_db_security_group_retrieve(id string, conn *rds.Rds) (*rds.DBS } if len(resp.DBSecurityGroups) != 1 || - resp.DBSecurityGroups[0].Name != id { + resp.DBSecurityGroups[0].Name != d.Id() { if err != nil { return nil, fmt.Errorf("Unable to find DB Security Group: %#v", resp.DBSecurityGroups) } @@ -202,27 +208,27 @@ func resource_aws_db_security_group_retrieve(id string, conn *rds.Rds) (*rds.DBS } // Authorizes the ingress rule on the db security group -func authorize_ingress_rule(ingress interface{}, dbSecurityGroupName string, conn *rds.Rds) error { +func resourceAwsDbSecurityGroupAuthorizeRule(ingress interface{}, dbSecurityGroupName string, conn *rds.Rds) error { ing := ingress.(map[string]interface{}) opts := rds.AuthorizeDBSecurityGroupIngress{ DBSecurityGroupName: dbSecurityGroupName, } - if attr, ok := ing["cidr"].(string); ok && attr != "" { - opts.Cidr = attr + if attr, ok := ing["cidr"]; ok && attr != "" { + opts.Cidr = attr.(string) } - if attr, ok := ing["security_group_name"].(string); ok && attr != "" { - opts.EC2SecurityGroupName = attr + if attr, ok := ing["security_group_name"]; ok && attr != "" { + opts.EC2SecurityGroupName = attr.(string) } - if attr, ok := ing["security_group_id"].(string); ok && attr != "" { - opts.EC2SecurityGroupId = attr + if attr, ok := ing["security_group_id"]; ok && attr != "" { + opts.EC2SecurityGroupId = attr.(string) } - if attr, ok := ing["security_group_owner_id"].(string); ok && attr != "" { - opts.EC2SecurityGroupOwnerId = attr + if attr, ok := ing["security_group_owner_id"]; ok && attr != "" { + opts.EC2SecurityGroupOwnerId = attr.(string) } log.Printf("[DEBUG] Authorize ingress rule configuration: %#v", opts) @@ -236,25 +242,33 @@ func authorize_ingress_rule(ingress interface{}, dbSecurityGroupName string, con return nil } -func resource_aws_db_security_group_validation() *config.Validator { - return &config.Validator{ - Required: []string{ - "name", - "description", - }, - Optional: []string{ - "ingress.*", - "ingress.*.cidr", - "ingress.*.security_group_name", - "ingress.*.security_group_id", - "ingress.*.security_group_owner_id", - }, +func resourceAwsDbSecurityGroupIngressHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + if v, ok := m["cidr"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) } + + if v, ok := m["security_group_name"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["security_group_id"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["security_group_owner_id"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + return hashcode.String(buf.String()) } -func DBSecurityGroupStateRefreshFunc(id string, conn *rds.Rds) resource.StateRefreshFunc { +func resourceAwsDbSecurityGroupStateRefreshFunc( + d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { return func() (interface{}, string, error) { - v, err := resource_aws_db_security_group_retrieve(id, conn) + v, err := resourceAwsDbSecurityGroupRetrieve(d, meta) if err != nil { log.Printf("Error on retrieving DB Security Group when waiting: %s", err) diff --git a/builtin/providers/aws/resource_aws_db_security_group_test.go b/builtin/providers/aws/resource_aws_db_security_group_test.go index f81ac2537..d99bc2f93 100644 --- a/builtin/providers/aws/resource_aws_db_security_group_test.go +++ b/builtin/providers/aws/resource_aws_db_security_group_test.go @@ -37,7 +37,7 @@ func TestAccAWSDBSecurityGroup(t *testing.T) { } func testAccCheckAWSDBSecurityGroupDestroy(s *terraform.State) error { - conn := testAccProvider.rdsconn + conn := testAccProvider.Meta().(*AWSClient).rdsconn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_db_security_group" { @@ -107,7 +107,7 @@ func testAccCheckAWSDBSecurityGroupExists(n string, v *rds.DBSecurityGroup) reso return fmt.Errorf("No DB Security Group ID is set") } - conn := testAccProvider.rdsconn + conn := testAccProvider.Meta().(*AWSClient).rdsconn opts := rds.DescribeDBSecurityGroups{ DBSecurityGroupName: rs.Primary.ID, diff --git a/builtin/providers/aws/resource_aws_db_subnet_group.go b/builtin/providers/aws/resource_aws_db_subnet_group.go index b67f5eddc..b9aa1de17 100644 --- a/builtin/providers/aws/resource_aws_db_subnet_group.go +++ b/builtin/providers/aws/resource_aws_db_subnet_group.go @@ -15,7 +15,6 @@ func resourceAwsDbSubnetGroup() *schema.Resource { return &schema.Resource{ Create: resourceAwsDbSubnetGroupCreate, Read: resourceAwsDbSubnetGroupRead, - Update: nil, Delete: resourceAwsDbSubnetGroupDelete, Schema: map[string]*schema.Schema{ @@ -45,8 +44,7 @@ func resourceAwsDbSubnetGroup() *schema.Resource { } func resourceAwsDbSubnetGroupCreate(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - rdsconn := p.rdsconn + rdsconn := meta.(*AWSClient).rdsconn subnetIdsSet := d.Get("subnet_ids").(*schema.Set) subnetIds := make([]string, subnetIdsSet.Len()) @@ -71,21 +69,8 @@ func resourceAwsDbSubnetGroupCreate(d *schema.ResourceData, meta interface{}) er return resourceAwsDbSubnetGroupRead(d, meta) } -func resourceAwsDbSubnetGroupDelete(d *schema.ResourceData, meta interface{}) error { - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: "destroyed", - Refresh: resourceDbSubnetGroupDeleteRefreshFunc(d, meta), - Timeout: 3 * time.Minute, - MinTimeout: 1 * time.Second, - } - _, err := stateConf.WaitForState() - return err -} - func resourceAwsDbSubnetGroupRead(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - rdsconn := p.rdsconn + rdsconn := meta.(*AWSClient).rdsconn describeOpts := rds.DescribeDBSubnetGroups{ DBSubnetGroupName: d.Id(), @@ -107,11 +92,22 @@ func resourceAwsDbSubnetGroupRead(d *schema.ResourceData, meta interface{}) erro return nil } -func resourceDbSubnetGroupDeleteRefreshFunc( +func resourceAwsDbSubnetGroupDelete(d *schema.ResourceData, meta interface{}) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: "destroyed", + Refresh: resourceAwsDbSubnetGroupDeleteRefreshFunc(d, meta), + Timeout: 3 * time.Minute, + MinTimeout: 1 * time.Second, + } + _, err := stateConf.WaitForState() + return err +} + +func resourceAwsDbSubnetGroupDeleteRefreshFunc( d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { - p := meta.(*ResourceProvider) - rdsconn := p.rdsconn + rdsconn := meta.(*AWSClient).rdsconn return func() (interface{}, string, error) { diff --git a/builtin/providers/aws/resource_aws_db_subnet_group_test.go b/builtin/providers/aws/resource_aws_db_subnet_group_test.go index 3f7d8a632..ac9382c23 100644 --- a/builtin/providers/aws/resource_aws_db_subnet_group_test.go +++ b/builtin/providers/aws/resource_aws_db_subnet_group_test.go @@ -9,7 +9,7 @@ import ( "github.com/mitchellh/goamz/rds" ) -func TestAccAWSDbSubnetGroup(t *testing.T) { +func TestAccAWSDBSubnetGroup(t *testing.T) { var v rds.DBSubnetGroup testCheck := func(*terraform.State) error { @@ -19,12 +19,12 @@ func TestAccAWSDbSubnetGroup(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccCheckDbSubnetGroupDestroy, + CheckDestroy: testAccCheckDBSubnetGroupDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccDbSubnetGroupConfig, + Config: testAccDBSubnetGroupConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckDbSubnetGroupExists( + testAccCheckDBSubnetGroupExists( "aws_db_subnet_group.foo", &v), testCheck, ), @@ -33,8 +33,8 @@ func TestAccAWSDbSubnetGroup(t *testing.T) { }) } -func testAccCheckDbSubnetGroupDestroy(s *terraform.State) error { - conn := testAccProvider.rdsconn +func testAccCheckDBSubnetGroupDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).rdsconn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_db_subnet_group" { @@ -64,7 +64,7 @@ func testAccCheckDbSubnetGroupDestroy(s *terraform.State) error { return nil } -func testAccCheckDbSubnetGroupExists(n string, v *rds.DBSubnetGroup) resource.TestCheckFunc { +func testAccCheckDBSubnetGroupExists(n string, v *rds.DBSubnetGroup) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -75,7 +75,7 @@ func testAccCheckDbSubnetGroupExists(n string, v *rds.DBSubnetGroup) resource.Te return fmt.Errorf("No ID is set") } - conn := testAccProvider.rdsconn + conn := testAccProvider.Meta().(*AWSClient).rdsconn resp, err := conn.DescribeDBSubnetGroups(&rds.DescribeDBSubnetGroups{rs.Primary.ID}) if err != nil { return err @@ -90,18 +90,20 @@ func testAccCheckDbSubnetGroupExists(n string, v *rds.DBSubnetGroup) resource.Te } } -const testAccDbSubnetGroupConfig = ` +const testAccDBSubnetGroupConfig = ` resource "aws_vpc" "foo" { cidr_block = "10.1.0.0/16" } resource "aws_subnet" "foo" { cidr_block = "10.1.1.0/24" + availability_zone = "us-west-2a" vpc_id = "${aws_vpc.foo.id}" } resource "aws_subnet" "bar" { cidr_block = "10.1.2.0/24" + availability_zone = "us-west-2b" vpc_id = "${aws_vpc.foo.id}" } diff --git a/builtin/providers/aws/resource_aws_eip.go b/builtin/providers/aws/resource_aws_eip.go index 5a70ebacb..8f86941e0 100644 --- a/builtin/providers/aws/resource_aws_eip.go +++ b/builtin/providers/aws/resource_aws_eip.go @@ -8,7 +8,6 @@ import ( "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" - //"github.com/hashicorp/terraform/terraform" "github.com/mitchellh/goamz/ec2" ) @@ -60,8 +59,7 @@ func resourceAwsEip() *schema.Resource { } func resourceAwsEipCreate(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn + ec2conn := meta.(*AWSClient).ec2conn // By default, we're not in a VPC domainOpt := "" @@ -97,9 +95,55 @@ func resourceAwsEipCreate(d *schema.ResourceData, meta interface{}) error { return resourceAwsEipUpdate(d, meta) } +func resourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + + domain := resourceAwsEipDomain(d) + id := d.Id() + + assocIds := []string{} + publicIps := []string{} + if domain == "vpc" { + assocIds = []string{id} + } else { + publicIps = []string{id} + } + + log.Printf( + "[DEBUG] EIP describe configuration: %#v, %#v (domain: %s)", + assocIds, publicIps, domain) + + describeAddresses, err := ec2conn.Addresses(publicIps, assocIds, nil) + if err != nil { + if ec2err, ok := err.(*ec2.Error); ok && ec2err.Code == "InvalidAllocationID.NotFound" { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving EIP: %s", err) + } + + // Verify AWS returned our EIP + if len(describeAddresses.Addresses) != 1 || + describeAddresses.Addresses[0].AllocationId != id || + describeAddresses.Addresses[0].PublicIp != id { + if err != nil { + return fmt.Errorf("Unable to find EIP: %#v", describeAddresses.Addresses) + } + } + + address := describeAddresses.Addresses[0] + + d.Set("association_id", address.AssociationId) + d.Set("instance", address.InstanceId) + d.Set("public_ip", address.PublicIp) + d.Set("private_ip", address.PrivateIpAddress) + + return nil +} + func resourceAwsEipUpdate(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn + ec2conn := meta.(*AWSClient).ec2conn domain := resourceAwsEipDomain(d) @@ -132,8 +176,7 @@ func resourceAwsEipUpdate(d *schema.ResourceData, meta interface{}) error { } func resourceAwsEipDelete(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn + ec2conn := meta.(*AWSClient).ec2conn if err := resourceAwsEipRead(d, meta); err != nil { return err @@ -183,54 +226,6 @@ func resourceAwsEipDelete(d *schema.ResourceData, meta interface{}) error { }) } -func resourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn - - domain := resourceAwsEipDomain(d) - id := d.Id() - - assocIds := []string{} - publicIps := []string{} - if domain == "vpc" { - assocIds = []string{id} - } else { - publicIps = []string{id} - } - - log.Printf( - "[DEBUG] EIP describe configuration: %#v, %#v (domain: %s)", - assocIds, publicIps, domain) - - describeAddresses, err := ec2conn.Addresses(publicIps, assocIds, nil) - if err != nil { - if ec2err, ok := err.(*ec2.Error); ok && ec2err.Code == "InvalidAllocationID.NotFound" { - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving EIP: %s", err) - } - - // Verify AWS returned our EIP - if len(describeAddresses.Addresses) != 1 || - describeAddresses.Addresses[0].AllocationId != id || - describeAddresses.Addresses[0].PublicIp != id { - if err != nil { - return fmt.Errorf("Unable to find EIP: %#v", describeAddresses.Addresses) - } - } - - address := describeAddresses.Addresses[0] - - d.Set("association_id", address.AssociationId) - d.Set("instance", address.InstanceId) - d.Set("public_ip", address.PublicIp) - d.Set("private_ip", address.PrivateIpAddress) - - return nil -} - func resourceAwsEipDomain(d *schema.ResourceData) string { if v, ok := d.GetOk("domain"); ok { return v.(string) diff --git a/builtin/providers/aws/resource_aws_eip_test.go b/builtin/providers/aws/resource_aws_eip_test.go index b0272bd52..0a72af4f4 100644 --- a/builtin/providers/aws/resource_aws_eip_test.go +++ b/builtin/providers/aws/resource_aws_eip_test.go @@ -57,7 +57,7 @@ func TestAccAWSEIP_instance(t *testing.T) { } func testAccCheckAWSEIPDestroy(s *terraform.State) error { - conn := testAccProvider.ec2conn + conn := testAccProvider.Meta().(*AWSClient).ec2conn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_eip" { @@ -112,7 +112,7 @@ func testAccCheckAWSEIPExists(n string, res *ec2.Address) resource.TestCheckFunc return fmt.Errorf("No EIP ID is set") } - conn := testAccProvider.ec2conn + conn := testAccProvider.Meta().(*AWSClient).ec2conn if strings.Contains(rs.Primary.ID, "eipalloc") { describe, err := conn.Addresses([]string{}, []string{rs.Primary.ID}, nil) diff --git a/builtin/providers/aws/resource_aws_elb.go b/builtin/providers/aws/resource_aws_elb.go index 353a675d6..c08b3d6da 100644 --- a/builtin/providers/aws/resource_aws_elb.go +++ b/builtin/providers/aws/resource_aws_elb.go @@ -150,36 +150,8 @@ func resourceAwsElb() *schema.Resource { } } -func resourceAwsElbHealthCheckHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%d-", m["healthy_threshold"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["unhealthy_threshold"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["target"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["interval"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["timeout"].(int))) - - return hashcode.String(buf.String()) -} - -func resourceAwsElbListenerHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%d-", m["instance_port"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["instance_protocol"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["lb_port"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["lb_protocol"].(string))) - - if v, ok := m["ssl_certificate_id"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - return hashcode.String(buf.String()) -} - func resourceAwsElbCreate(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - elbconn := p.elbconn + elbconn := meta.(*AWSClient).elbconn // Expand the "listener" set to goamz compat []elb.Listener listeners, err := expandListeners(d.Get("listener").(*schema.Set).List()) @@ -250,9 +222,49 @@ func resourceAwsElbCreate(d *schema.ResourceData, meta interface{}) error { return resourceAwsElbUpdate(d, meta) } +func resourceAwsElbRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + // Retrieve the ELB properties for updating the state + describeElbOpts := &elb.DescribeLoadBalancer{ + Names: []string{d.Id()}, + } + + describeResp, err := elbconn.DescribeLoadBalancers(describeElbOpts) + if err != nil { + if ec2err, ok := err.(*elb.Error); ok && ec2err.Code == "LoadBalancerNotFound" { + // The ELB is gone now, so just remove it from the state + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving ELB: %s", err) + } + if len(describeResp.LoadBalancers) != 1 { + return fmt.Errorf("Unable to find ELB: %#v", describeResp.LoadBalancers) + } + + lb := describeResp.LoadBalancers[0] + + d.Set("name", lb.LoadBalancerName) + d.Set("dns_name", lb.DNSName) + d.Set("internal", lb.Scheme == "internal") + d.Set("instances", flattenInstances(lb.Instances)) + d.Set("listener", flattenListeners(lb.Listeners)) + d.Set("security_groups", lb.SecurityGroups) + d.Set("subnets", lb.Subnets) + + // There's only one health check, so save that to state as we + // currently can + if lb.HealthCheck.Target != "" { + d.Set("health_check", flattenHealthCheck(lb.HealthCheck)) + } + + return nil +} + func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - elbconn := p.elbconn + elbconn := meta.(*AWSClient).elbconn d.Partial(true) @@ -297,8 +309,7 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error { } func resourceAwsElbDelete(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - elbconn := p.elbconn + elbconn := meta.(*AWSClient).elbconn log.Printf("[INFO] Deleting ELB: %s", d.Id()) @@ -313,44 +324,29 @@ func resourceAwsElbDelete(d *schema.ResourceData, meta interface{}) error { return nil } -func resourceAwsElbRead(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - elbconn := p.elbconn +func resourceAwsElbHealthCheckHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%d-", m["healthy_threshold"].(int))) + buf.WriteString(fmt.Sprintf("%d-", m["unhealthy_threshold"].(int))) + buf.WriteString(fmt.Sprintf("%s-", m["target"].(string))) + buf.WriteString(fmt.Sprintf("%d-", m["interval"].(int))) + buf.WriteString(fmt.Sprintf("%d-", m["timeout"].(int))) - // Retrieve the ELB properties for updating the state - describeElbOpts := &elb.DescribeLoadBalancer{ - Names: []string{d.Id()}, - } - - describeResp, err := elbconn.DescribeLoadBalancers(describeElbOpts) - if err != nil { - if ec2err, ok := err.(*elb.Error); ok && ec2err.Code == "LoadBalancerNotFound" { - // The ELB is gone now, so just remove it from the state - d.SetId("") - return nil - } - - return fmt.Errorf("Error retrieving ELB: %s", err) - } - if len(describeResp.LoadBalancers) != 1 { - return fmt.Errorf("Unable to find ELB: %#v", describeResp.LoadBalancers) - } - - lb := describeResp.LoadBalancers[0] - - d.Set("name", lb.LoadBalancerName) - d.Set("dns_name", lb.DNSName) - d.Set("internal", lb.Scheme == "internal") - d.Set("instances", flattenInstances(lb.Instances)) - d.Set("listener", flattenListeners(lb.Listeners)) - d.Set("security_groups", lb.SecurityGroups) - d.Set("subnets", lb.Subnets) - - // There's only one health check, so save that to state as we - // currently can - if lb.HealthCheck.Target != "" { - d.Set("health_check", flattenHealthCheck(lb.HealthCheck)) - } - - return nil + return hashcode.String(buf.String()) +} + +func resourceAwsElbListenerHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%d-", m["instance_port"].(int))) + buf.WriteString(fmt.Sprintf("%s-", m["instance_protocol"].(string))) + buf.WriteString(fmt.Sprintf("%d-", m["lb_port"].(int))) + buf.WriteString(fmt.Sprintf("%s-", m["lb_protocol"].(string))) + + if v, ok := m["ssl_certificate_id"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + return hashcode.String(buf.String()) } diff --git a/builtin/providers/aws/resource_aws_elb_test.go b/builtin/providers/aws/resource_aws_elb_test.go index 73a1503dd..05078b673 100644 --- a/builtin/providers/aws/resource_aws_elb_test.go +++ b/builtin/providers/aws/resource_aws_elb_test.go @@ -114,7 +114,7 @@ func TestAccAWSELB_HealthCheck(t *testing.T) { }) } func testAccCheckAWSELBDestroy(s *terraform.State) error { - conn := testAccProvider.elbconn + conn := testAccProvider.Meta().(*AWSClient).elbconn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_elb" { @@ -222,7 +222,7 @@ func testAccCheckAWSELBExists(n string, res *elb.LoadBalancer) resource.TestChec return fmt.Errorf("No ELB ID is set") } - conn := testAccProvider.elbconn + conn := testAccProvider.Meta().(*AWSClient).elbconn describe, err := conn.DescribeLoadBalancers(&elb.DescribeLoadBalancer{ Names: []string{rs.Primary.ID}, diff --git a/builtin/providers/aws/resource_aws_instance.go b/builtin/providers/aws/resource_aws_instance.go index cc7957769..ab7e02e35 100644 --- a/builtin/providers/aws/resource_aws_instance.go +++ b/builtin/providers/aws/resource_aws_instance.go @@ -1,6 +1,7 @@ package aws import ( + "bytes" "crypto/sha1" "encoding/hex" "fmt" @@ -131,13 +132,58 @@ func resourceAwsInstance() *schema.Resource { ForceNew: true, }, "tags": tagsSchema(), + + "block_device": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "device_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "snapshot_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "volume_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "volume_size": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "delete_on_termination": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "encrypted": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + }, + }, + Set: resourceAwsInstanceBlockDevicesHash, + }, }, } } func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn + ec2conn := meta.(*AWSClient).ec2conn // Figure out user data userData := "" @@ -180,6 +226,22 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error { } } + if v := d.Get("block_device"); v != nil { + vs := v.(*schema.Set).List() + if len(vs) > 0 { + runOpts.BlockDevices = make([]ec2.BlockDeviceMapping, len(vs)) + for i, v := range vs { + bd := v.(map[string]interface{}) + runOpts.BlockDevices[i].DeviceName = bd["device_name"].(string) + runOpts.BlockDevices[i].SnapshotId = bd["snapshot_id"].(string) + runOpts.BlockDevices[i].VolumeType = bd["volume_type"].(string) + runOpts.BlockDevices[i].VolumeSize = int64(bd["volume_size"].(int)) + runOpts.BlockDevices[i].DeleteOnTermination = bd["delete_on_termination"].(bool) + runOpts.BlockDevices[i].Encrypted = bd["encrypted"].(bool) + } + } + } + // Create the instance log.Printf("[DEBUG] Run configuration: %#v", runOpts) runResp, err := ec2conn.RunInstances(runOpts) @@ -232,74 +294,8 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error { return resourceAwsInstanceUpdate(d, meta) } -func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn - - modify := false - opts := new(ec2.ModifyInstance) - - if v, ok := d.GetOk("source_dest_check"); ok { - opts.SourceDestCheck = v.(bool) - opts.SetSourceDestCheck = true - modify = true - } - - if modify { - log.Printf("[INFO] Modifing instance %s: %#v", d.Id(), opts) - if _, err := ec2conn.ModifyInstance(d.Id(), opts); err != nil { - return err - } - - // TODO(mitchellh): wait for the attributes we modified to - // persist the change... - } - - if err := setTags(ec2conn, d); err != nil { - return err - } else { - d.SetPartial("tags") - } - - return nil -} - -func resourceAwsInstanceDelete(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn - - log.Printf("[INFO] Terminating instance: %s", d.Id()) - if _, err := ec2conn.TerminateInstances([]string{d.Id()}); err != nil { - return fmt.Errorf("Error terminating instance: %s", err) - } - - log.Printf( - "[DEBUG] Waiting for instance (%s) to become terminated", - d.Id()) - - stateConf := &resource.StateChangeConf{ - Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"}, - Target: "terminated", - Refresh: InstanceStateRefreshFunc(ec2conn, d.Id()), - Timeout: 10 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - _, err := stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for instance (%s) to terminate: %s", - d.Id(), err) - } - - d.SetId("") - return nil -} - func resourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn + ec2conn := meta.(*AWSClient).ec2conn resp, err := ec2conn.Instances([]string{d.Id()}, ec2.NewFilter()) if err != nil { @@ -368,6 +364,93 @@ func resourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error { } d.Set("security_groups", sgs) + volIDs := make([]string, len(instance.BlockDevices)) + bdByVolID := make(map[string]ec2.BlockDevice) + for i, bd := range instance.BlockDevices { + volIDs[i] = bd.VolumeId + bdByVolID[bd.VolumeId] = bd + } + + volResp, err := ec2conn.Volumes(volIDs, ec2.NewFilter()) + if err != nil { + return err + } + + bds := make([]map[string]interface{}, len(instance.BlockDevices)) + for i, vol := range volResp.Volumes { + bds[i] = make(map[string]interface{}) + bds[i]["device_name"] = bdByVolID[vol.VolumeId].DeviceName + bds[i]["snapshot_id"] = vol.SnapshotId + bds[i]["volume_type"] = vol.VolumeType + bds[i]["volume_size"] = vol.Size + bds[i]["delete_on_termination"] = bdByVolID[vol.VolumeId].DeleteOnTermination + bds[i]["encrypted"] = vol.Encrypted + } + d.Set("block_device", bds) + + return nil +} + +func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + + modify := false + opts := new(ec2.ModifyInstance) + + if v, ok := d.GetOk("source_dest_check"); ok { + opts.SourceDestCheck = v.(bool) + opts.SetSourceDestCheck = true + modify = true + } + + if modify { + log.Printf("[INFO] Modifing instance %s: %#v", d.Id(), opts) + if _, err := ec2conn.ModifyInstance(d.Id(), opts); err != nil { + return err + } + + // TODO(mitchellh): wait for the attributes we modified to + // persist the change... + } + + if err := setTags(ec2conn, d); err != nil { + return err + } else { + d.SetPartial("tags") + } + + return nil +} + +func resourceAwsInstanceDelete(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + + log.Printf("[INFO] Terminating instance: %s", d.Id()) + if _, err := ec2conn.TerminateInstances([]string{d.Id()}); err != nil { + return fmt.Errorf("Error terminating instance: %s", err) + } + + log.Printf( + "[DEBUG] Waiting for instance (%s) to become terminated", + d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"}, + Target: "terminated", + Refresh: InstanceStateRefreshFunc(ec2conn, d.Id()), + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err := stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for instance (%s) to terminate: %s", + d.Id(), err) + } + + d.SetId("") return nil } @@ -396,3 +479,15 @@ func InstanceStateRefreshFunc(conn *ec2.EC2, instanceID string) resource.StateRe return i, i.State.Name, nil } } + +func resourceAwsInstanceBlockDevicesHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["snapshot_id"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["volume_type"].(string))) + buf.WriteString(fmt.Sprintf("%d-", m["volume_size"].(int))) + buf.WriteString(fmt.Sprintf("%t-", m["delete_on_termination"].(bool))) + buf.WriteString(fmt.Sprintf("%t-", m["encrypted"].(bool))) + return hashcode.String(buf.String()) +} diff --git a/builtin/providers/aws/resource_aws_instance_test.go b/builtin/providers/aws/resource_aws_instance_test.go index 1c91611dd..a3c46c09a 100644 --- a/builtin/providers/aws/resource_aws_instance_test.go +++ b/builtin/providers/aws/resource_aws_instance_test.go @@ -64,6 +64,44 @@ func TestAccAWSInstance_normal(t *testing.T) { }) } +func TestAccAWSInstance_blockDevicesCheck(t *testing.T) { + var v ec2.Instance + + testCheck := func() resource.TestCheckFunc { + return func(*terraform.State) error { + + // Map out the block devices by name, which should be unique. + blockDevices := make(map[string]ec2.BlockDevice) + for _, blockDevice := range v.BlockDevices { + blockDevices[blockDevice.DeviceName] = blockDevice + } + + // Check if the secondary block device exists. + if _, ok := blockDevices["/dev/sdb"]; !ok { + fmt.Errorf("block device doesn't exist: /dev/sdb") + } + + return nil + } + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccInstanceConfigBlockDevices, + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists( + "aws_instance.foo", &v), + testCheck(), + ), + }, + }, + }) +} + func TestAccAWSInstance_sourceDestCheck(t *testing.T) { var v ec2.Instance @@ -151,7 +189,7 @@ func TestAccInstance_tags(t *testing.T) { } func testAccCheckInstanceDestroy(s *terraform.State) error { - conn := testAccProvider.ec2conn + conn := testAccProvider.Meta().(*AWSClient).ec2conn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_instance" { @@ -193,7 +231,7 @@ func testAccCheckInstanceExists(n string, i *ec2.Instance) resource.TestCheckFun return fmt.Errorf("No ID is set") } - conn := testAccProvider.ec2conn + conn := testAccProvider.Meta().(*AWSClient).ec2conn resp, err := conn.Instances( []string{rs.Primary.ID}, ec2.NewFilter()) if err != nil { @@ -233,6 +271,19 @@ resource "aws_instance" "foo" { } ` +const testAccInstanceConfigBlockDevices = ` +resource "aws_instance" "foo" { + # us-west-2 + ami = "ami-55a7ea65" + instance_type = "m1.small" + block_device { + device_name = "/dev/sdb" + volume_type = "gp2" + volume_size = 10 + } +} +` + const testAccInstanceConfigSourceDest = ` resource "aws_vpc" "foo" { cidr_block = "10.1.0.0/16" @@ -293,6 +344,8 @@ resource "aws_instance" "foo" { const testAccCheckInstanceConfigTags = ` resource "aws_instance" "foo" { + ami = "ami-4fccb37f" + instance_type = "m1.small" tags { foo = "bar" } @@ -301,6 +354,8 @@ resource "aws_instance" "foo" { const testAccCheckInstanceConfigTagsUpdate = ` resource "aws_instance" "foo" { + ami = "ami-4fccb37f" + instance_type = "m1.small" tags { bar = "baz" } diff --git a/builtin/providers/aws/resource_aws_internet_gateway.go b/builtin/providers/aws/resource_aws_internet_gateway.go index fc23ebb06..0dac15781 100644 --- a/builtin/providers/aws/resource_aws_internet_gateway.go +++ b/builtin/providers/aws/resource_aws_internet_gateway.go @@ -5,84 +5,88 @@ import ( "log" "time" - "github.com/hashicorp/terraform/helper/diff" "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/helper/schema" "github.com/mitchellh/goamz/ec2" ) -func resource_aws_internet_gateway_create( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn +func resourceAwsInternetGateway() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsInternetGatewayCreate, + Read: resourceAwsInternetGatewayRead, + Update: resourceAwsInternetGatewayUpdate, + Delete: resourceAwsInternetGatewayDelete, + + Schema: map[string]*schema.Schema{ + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsInternetGatewayCreate(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn // Create the gateway log.Printf("[DEBUG] Creating internet gateway") resp, err := ec2conn.CreateInternetGateway(nil) if err != nil { - return nil, fmt.Errorf("Error creating subnet: %s", err) + return fmt.Errorf("Error creating internet gateway: %s", err) } // Get the ID and store it ig := &resp.InternetGateway - s.ID = ig.InternetGatewayId - log.Printf("[INFO] InternetGateway ID: %s", s.ID) + d.SetId(ig.InternetGatewayId) + log.Printf("[INFO] InternetGateway ID: %s", d.Id()) - // Update our attributes and return - return resource_aws_internet_gateway_update(s, d, meta) + // Attach the new gateway to the correct vpc + return resourceAwsInternetGatewayAttach(d, meta) } -func resource_aws_internet_gateway_update( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn +func resourceAwsInternetGatewayRead(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn - // Merge the diff so we have the latest attributes - rs := s.MergeDiff(d) + igRaw, _, err := IGStateRefreshFunc(ec2conn, d.Id())() + if err != nil { + return err + } + if igRaw == nil { + // Seems we have lost our internet gateway + d.SetId("") + return nil + } - // A note on the states below: the AWS docs (as of July, 2014) say - // that the states would be: attached, attaching, detached, detaching, - // but when running, I noticed that the state is usually "available" when - // it is attached. + ig := igRaw.(*ec2.InternetGateway) + d.Set("vpc_id", ig.Attachments[0].VpcId) + return nil +} + +func resourceAwsInternetGatewayUpdate(d *schema.ResourceData, meta interface{}) error { // If we're already attached, detach it first - if err := resource_aws_internet_gateway_detach(ec2conn, s); err != nil { - return s, err - } - - // Set the VPC ID to empty since we're detached at this point - delete(rs.Attributes, "vpc_id") - - if attr, ok := d.Attributes["vpc_id"]; ok && attr.New != "" { - err := resource_aws_internet_gateway_attach(ec2conn, s, attr.New) - if err != nil { - return rs, err - } - - rs.Attributes["vpc_id"] = attr.New - } - - return resource_aws_internet_gateway_update_state(rs, nil) -} - -func resource_aws_internet_gateway_destroy( - s *terraform.InstanceState, - meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn - - // Detach if it is attached - if err := resource_aws_internet_gateway_detach(ec2conn, s); err != nil { + if err := resourceAwsInternetGatewayDetach(d, meta); err != nil { return err } - log.Printf("[INFO] Deleting Internet Gateway: %s", s.ID) + // Attach the gateway to the new vpc + return resourceAwsInternetGatewayAttach(d, meta) +} + +func resourceAwsInternetGatewayDelete(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + + // Detach if it is attached + if err := resourceAwsInternetGatewayDetach(d, meta); err != nil { + return err + } + + log.Printf("[INFO] Deleting Internet Gateway: %s", d.Id()) + return resource.Retry(5*time.Minute, func() error { - _, err := ec2conn.DeleteInternetGateway(s.ID) + _, err := ec2conn.DeleteInternetGateway(d.Id()) if err != nil { ec2err, ok := err.(*ec2.Error) if !ok { @@ -103,96 +107,67 @@ func resource_aws_internet_gateway_destroy( }) // Wait for the internet gateway to actually delete - log.Printf("[DEBUG] Waiting for internet gateway (%s) to delete", s.ID) + log.Printf("[DEBUG] Waiting for internet gateway (%s) to delete", d.Id()) stateConf := &resource.StateChangeConf{ Pending: []string{"available"}, Target: "", - Refresh: IGStateRefreshFunc(ec2conn, s.ID), + Refresh: IGStateRefreshFunc(ec2conn, d.Id()), Timeout: 10 * time.Minute, } if _, err := stateConf.WaitForState(); err != nil { return fmt.Errorf( "Error waiting for internet gateway (%s) to destroy: %s", - s.ID, err) + d.Id(), err) } return nil } -func resource_aws_internet_gateway_refresh( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn +func resourceAwsInternetGatewayAttach(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn - igRaw, _, err := IGStateRefreshFunc(ec2conn, s.ID)() - if err != nil { - return s, err - } - if igRaw == nil { - return nil, nil - } - - ig := igRaw.(*ec2.InternetGateway) - return resource_aws_internet_gateway_update_state(s, ig) -} - -func resource_aws_internet_gateway_diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - b := &diff.ResourceBuilder{ - Attrs: map[string]diff.AttrType{ - "vpc_id": diff.AttrTypeUpdate, - }, - } - - return b.Diff(s, c) -} - -func resource_aws_internet_gateway_attach( - ec2conn *ec2.EC2, - s *terraform.InstanceState, - vpcId string) error { log.Printf( "[INFO] Attaching Internet Gateway '%s' to VPC '%s'", - s.ID, - vpcId) - _, err := ec2conn.AttachInternetGateway(s.ID, vpcId) + d.Id(), + d.Get("vpc_id").(string)) + + _, err := ec2conn.AttachInternetGateway(d.Id(), d.Get("vpc_id").(string)) if err != nil { return err } + // A note on the states below: the AWS docs (as of July, 2014) say + // that the states would be: attached, attaching, detached, detaching, + // but when running, I noticed that the state is usually "available" when + // it is attached. + // Wait for it to be fully attached before continuing - log.Printf("[DEBUG] Waiting for internet gateway (%s) to attach", s.ID) + log.Printf("[DEBUG] Waiting for internet gateway (%s) to attach", d.Id()) stateConf := &resource.StateChangeConf{ Pending: []string{"detached", "attaching"}, Target: "available", - Refresh: IGAttachStateRefreshFunc(ec2conn, s.ID, "available"), + Refresh: IGAttachStateRefreshFunc(ec2conn, d.Id(), "available"), Timeout: 1 * time.Minute, } if _, err := stateConf.WaitForState(); err != nil { return fmt.Errorf( "Error waiting for internet gateway (%s) to attach: %s", - s.ID, err) + d.Id(), err) } return nil } -func resource_aws_internet_gateway_detach( - ec2conn *ec2.EC2, - s *terraform.InstanceState) error { - if s.Attributes["vpc_id"] == "" { - return nil - } +func resourceAwsInternetGatewayDetach(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn log.Printf( "[INFO] Detaching Internet Gateway '%s' from VPC '%s'", - s.ID, - s.Attributes["vpc_id"]) + d.Id(), + d.Get("vpc_id").(string)) + wait := true - _, err := ec2conn.DetachInternetGateway(s.ID, s.Attributes["vpc_id"]) + _, err := ec2conn.DetachInternetGateway(d.Id(), d.Get("vpc_id").(string)) if err != nil { ec2err, ok := err.(*ec2.Error) if ok { @@ -210,40 +185,32 @@ func resource_aws_internet_gateway_detach( } } - delete(s.Attributes, "vpc_id") - if !wait { return nil } // Wait for it to be fully detached before continuing - log.Printf("[DEBUG] Waiting for internet gateway (%s) to detach", s.ID) + log.Printf("[DEBUG] Waiting for internet gateway (%s) to detach", d.Id()) stateConf := &resource.StateChangeConf{ Pending: []string{"attached", "detaching", "available"}, Target: "detached", - Refresh: IGAttachStateRefreshFunc(ec2conn, s.ID, "detached"), + Refresh: IGAttachStateRefreshFunc(ec2conn, d.Id(), "detached"), Timeout: 1 * time.Minute, } if _, err := stateConf.WaitForState(); err != nil { return fmt.Errorf( "Error waiting for internet gateway (%s) to detach: %s", - s.ID, err) + d.Id(), err) } return nil } -func resource_aws_internet_gateway_update_state( - s *terraform.InstanceState, - ig *ec2.InternetGateway) (*terraform.InstanceState, error) { - return s, nil -} - // IGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch // an internet gateway. -func IGStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { +func IGStateRefreshFunc(ec2conn *ec2.EC2, id string) resource.StateRefreshFunc { return func() (interface{}, string, error) { - resp, err := conn.DescribeInternetGateways([]string{id}, ec2.NewFilter()) + resp, err := ec2conn.DescribeInternetGateways([]string{id}, ec2.NewFilter()) if err != nil { ec2err, ok := err.(*ec2.Error) if ok && ec2err.Code == "InvalidInternetGatewayID.NotFound" { diff --git a/builtin/providers/aws/resource_aws_internet_gateway_test.go b/builtin/providers/aws/resource_aws_internet_gateway_test.go index a5974cf25..47a57c6a4 100644 --- a/builtin/providers/aws/resource_aws_internet_gateway_test.go +++ b/builtin/providers/aws/resource_aws_internet_gateway_test.go @@ -55,7 +55,7 @@ func TestAccAWSInternetGateway(t *testing.T) { } func testAccCheckInternetGatewayDestroy(s *terraform.State) error { - conn := testAccProvider.ec2conn + conn := testAccProvider.Meta().(*AWSClient).ec2conn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_internet_gateway" { @@ -97,7 +97,7 @@ func testAccCheckInternetGatewayExists(n string, ig *ec2.InternetGateway) resour return fmt.Errorf("No ID is set") } - conn := testAccProvider.ec2conn + conn := testAccProvider.Meta().(*AWSClient).ec2conn resp, err := conn.DescribeInternetGateways( []string{rs.Primary.ID}, ec2.NewFilter()) if err != nil { diff --git a/builtin/providers/aws/resource_aws_launch_configuration.go b/builtin/providers/aws/resource_aws_launch_configuration.go index 0cb80c395..f22147f4b 100644 --- a/builtin/providers/aws/resource_aws_launch_configuration.go +++ b/builtin/providers/aws/resource_aws_launch_configuration.go @@ -75,13 +75,18 @@ func resourceAwsLaunchConfiguration() *schema.Resource { return hashcode.String(v.(string)) }, }, + + "associate_public_ip_address": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, }, } } func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - autoscalingconn := p.autoscalingconn + autoscalingconn := meta.(*AWSClient).autoscalingconn var createLaunchConfigurationOpts autoscaling.CreateLaunchConfiguration createLaunchConfigurationOpts.Name = d.Get("name").(string) @@ -90,6 +95,7 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface createLaunchConfigurationOpts.InstanceType = d.Get("instance_type").(string) createLaunchConfigurationOpts.KeyName = d.Get("key_name").(string) createLaunchConfigurationOpts.UserData = d.Get("user_data").(string) + createLaunchConfigurationOpts.AssociatePublicIpAddress = d.Get("associate_public_ip_address").(bool) if v, ok := d.GetOk("security_groups"); ok { createLaunchConfigurationOpts.SecurityGroups = expandStringList( @@ -112,28 +118,8 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface }) } -func resourceAwsLaunchConfigurationDelete(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - autoscalingconn := p.autoscalingconn - - log.Printf("[DEBUG] Launch Configuration destroy: %v", d.Id()) - _, err := autoscalingconn.DeleteLaunchConfiguration( - &autoscaling.DeleteLaunchConfiguration{Name: d.Id()}) - if err != nil { - autoscalingerr, ok := err.(*autoscaling.Error) - if ok && autoscalingerr.Code == "InvalidConfiguration.NotFound" { - return nil - } - - return err - } - - return nil -} - func resourceAwsLaunchConfigurationRead(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - autoscalingconn := p.autoscalingconn + autoscalingconn := meta.(*AWSClient).autoscalingconn describeOpts := autoscaling.DescribeLaunchConfigurations{ Names: []string{d.Id()}, @@ -167,3 +153,21 @@ func resourceAwsLaunchConfigurationRead(d *schema.ResourceData, meta interface{} return nil } + +func resourceAwsLaunchConfigurationDelete(d *schema.ResourceData, meta interface{}) error { + autoscalingconn := meta.(*AWSClient).autoscalingconn + + log.Printf("[DEBUG] Launch Configuration destroy: %v", d.Id()) + _, err := autoscalingconn.DeleteLaunchConfiguration( + &autoscaling.DeleteLaunchConfiguration{Name: d.Id()}) + if err != nil { + autoscalingerr, ok := err.(*autoscaling.Error) + if ok && autoscalingerr.Code == "InvalidConfiguration.NotFound" { + return nil + } + + return err + } + + return nil +} diff --git a/builtin/providers/aws/resource_aws_launch_configuration_test.go b/builtin/providers/aws/resource_aws_launch_configuration_test.go index 820cdf723..64591ea44 100644 --- a/builtin/providers/aws/resource_aws_launch_configuration_test.go +++ b/builtin/providers/aws/resource_aws_launch_configuration_test.go @@ -28,6 +28,8 @@ func TestAccAWSLaunchConfiguration(t *testing.T) { "aws_launch_configuration.bar", "name", "foobar-terraform-test"), resource.TestCheckResourceAttr( "aws_launch_configuration.bar", "instance_type", "t1.micro"), + resource.TestCheckResourceAttr( + "aws_launch_configuration.bar", "associate_public_ip_address", "true"), ), }, }, @@ -35,7 +37,7 @@ func TestAccAWSLaunchConfiguration(t *testing.T) { } func testAccCheckAWSLaunchConfigurationDestroy(s *terraform.State) error { - conn := testAccProvider.autoscalingconn + conn := testAccProvider.Meta().(*AWSClient).autoscalingconn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_launch_configuration" { @@ -96,7 +98,7 @@ func testAccCheckAWSLaunchConfigurationExists(n string, res *autoscaling.LaunchC return fmt.Errorf("No Launch Configuration ID is set") } - conn := testAccProvider.autoscalingconn + conn := testAccProvider.Meta().(*AWSClient).autoscalingconn describeOpts := autoscaling.DescribeLaunchConfigurations{ Names: []string{rs.Primary.ID}, @@ -124,5 +126,6 @@ resource "aws_launch_configuration" "bar" { image_id = "ami-21f78e11" instance_type = "t1.micro" user_data = "foobar-user-data" + associate_public_ip_address = true } ` diff --git a/builtin/providers/aws/resource_aws_route53_record.go b/builtin/providers/aws/resource_aws_route53_record.go index 31ec0d91f..167405e9d 100644 --- a/builtin/providers/aws/resource_aws_route53_record.go +++ b/builtin/providers/aws/resource_aws_route53_record.go @@ -3,45 +3,62 @@ package aws import ( "fmt" "log" - "strconv" "strings" "time" - "github.com/hashicorp/terraform/flatmap" - "github.com/hashicorp/terraform/helper/config" - "github.com/hashicorp/terraform/helper/diff" "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/helper/schema" "github.com/mitchellh/goamz/route53" ) -func resource_aws_r53_record_validation() *config.Validator { - return &config.Validator{ - Required: []string{ - "zone_id", - "name", - "type", - "ttl", - "records.*", +func resourceAwsRoute53Record() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRoute53RecordCreate, + Read: resourceAwsRoute53RecordRead, + Delete: resourceAwsRoute53RecordDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "zone_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "ttl": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "records": &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Required: true, + ForceNew: true, + }, }, } } -func resource_aws_r53_record_create( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - conn := p.route53 - - // Merge the diff into the state so that we have all the attributes - // properly. - rs := s.MergeDiff(d) +func resourceAwsRoute53RecordCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).route53 // Get the record - rec, err := resource_aws_r53_build_record_set(rs) + rec, err := resourceAwsRoute53RecordBuildSet(d) if err != nil { - return rs, err + return err } // Create the new records. We abuse StateChangeConf for this to @@ -56,9 +73,10 @@ func resource_aws_r53_record_create( }, }, } - zone := rs.Attributes["zone_id"] + zone := d.Get("zone_id").(string) log.Printf("[DEBUG] Creating resource records for zone: %s, name: %s", - zone, rs.Attributes["name"]) + zone, d.Get("name").(string)) + wait := resource.StateChangeConf{ Pending: []string{"rejected"}, Target: "accepted", @@ -79,14 +97,15 @@ func resource_aws_r53_record_create( return resp.ChangeInfo, "accepted", nil }, } + respRaw, err := wait.WaitForState() if err != nil { - return rs, err + return err } changeInfo := respRaw.(route53.ChangeInfo) // Generate an ID - rs.ID = fmt.Sprintf("%s_%s_%s", zone, rs.Attributes["name"], rs.Attributes["type"]) + d.SetId(fmt.Sprintf("%s_%s_%s", zone, d.Get("name").(string), d.Get("type").(string))) // Wait until we are done wait = resource.StateChangeConf{ @@ -96,47 +115,63 @@ func resource_aws_r53_record_create( Timeout: 10 * time.Minute, MinTimeout: 5 * time.Second, Refresh: func() (result interface{}, state string, err error) { - return resource_aws_r53_wait(conn, changeInfo.ID) + return resourceAwsRoute53Wait(conn, changeInfo.ID) }, } _, err = wait.WaitForState() if err != nil { - return rs, err + return err } - return rs, nil + + return nil } -func resource_aws_r53_build_record_set(s *terraform.InstanceState) (*route53.ResourceRecordSet, error) { - // Parse the TTL - ttl, err := strconv.ParseInt(s.Attributes["ttl"], 10, 32) +func resourceAwsRoute53RecordRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).route53 + + zone := d.Get("zone_id").(string) + lopts := &route53.ListOpts{ + Name: d.Get("name").(string), + Type: d.Get("type").(string), + } + resp, err := conn.ListResourceRecordSets(zone, lopts) if err != nil { - return nil, err + return err } - // Expand the records - recRaw := flatmap.Expand(s.Attributes, "records") - var records []string - for _, raw := range recRaw.([]interface{}) { - records = append(records, raw.(string)) + // Scan for a matching record + found := false + for _, record := range resp.Records { + if route53.FQDN(record.Name) != route53.FQDN(lopts.Name) { + continue + } + if strings.ToUpper(record.Type) != strings.ToUpper(lopts.Type) { + continue + } + + found = true + + for i, rec := range record.Records { + key := fmt.Sprintf("records.%d", i) + d.Set(key, rec) + } + d.Set("ttl", record.TTL) + + break } - rec := &route53.ResourceRecordSet{ - Name: s.Attributes["name"], - Type: s.Attributes["type"], - TTL: int(ttl), - Records: records, + if !found { + d.SetId("") } - return rec, nil + + return nil } -func resource_aws_r53_record_destroy( - s *terraform.InstanceState, - meta interface{}) error { - p := meta.(*ResourceProvider) - conn := p.route53 +func resourceAwsRoute53RecordDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).route53 - // Get the record - rec, err := resource_aws_r53_build_record_set(s) + // Get the records + rec, err := resourceAwsRoute53RecordBuildSet(d) if err != nil { return err } @@ -151,9 +186,10 @@ func resource_aws_r53_record_destroy( }, }, } - zone := s.Attributes["zone_id"] + zone := d.Get("zone_id").(string) log.Printf("[DEBUG] Deleting resource records for zone: %s, name: %s", - zone, s.Attributes["name"]) + zone, d.Get("name").(string)) + wait := resource.StateChangeConf{ Pending: []string{"rejected"}, Target: "accepted", @@ -179,6 +215,7 @@ func resource_aws_r53_record_destroy( return 42, "accepted", nil }, } + if _, err := wait.WaitForState(); err != nil { return err } @@ -186,68 +223,19 @@ func resource_aws_r53_record_destroy( return nil } -func resource_aws_r53_record_refresh( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - conn := p.route53 - - zone := s.Attributes["zone_id"] - lopts := &route53.ListOpts{ - Name: s.Attributes["name"], - Type: s.Attributes["type"], - } - resp, err := conn.ListResourceRecordSets(zone, lopts) - if err != nil { - return s, err +func resourceAwsRoute53RecordBuildSet(d *schema.ResourceData) (*route53.ResourceRecordSet, error) { + recs := d.Get("records.#").(int) + records := make([]string, 0, recs) + for i := 0; i < recs; i++ { + key := fmt.Sprintf("records.%d", i) + records = append(records, d.Get(key).(string)) } - // Scan for a matching record - found := false - for _, record := range resp.Records { - if route53.FQDN(record.Name) != route53.FQDN(lopts.Name) { - continue - } - if strings.ToUpper(record.Type) != strings.ToUpper(lopts.Type) { - continue - } - - found = true - resource_aws_r53_record_update_state(s, &record) - break + rec := &route53.ResourceRecordSet{ + Name: d.Get("name").(string), + Type: d.Get("type").(string), + TTL: d.Get("ttl").(int), + Records: records, } - if !found { - s.ID = "" - } - return s, nil -} - -func resource_aws_r53_record_update_state( - s *terraform.InstanceState, - rec *route53.ResourceRecordSet) { - - flatRec := flatmap.Flatten(map[string]interface{}{ - "records": rec.Records, - }) - for k, v := range flatRec { - s.Attributes[k] = v - } - - s.Attributes["ttl"] = strconv.FormatInt(int64(rec.TTL), 10) -} - -func resource_aws_r53_record_diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - b := &diff.ResourceBuilder{ - Attrs: map[string]diff.AttrType{ - "zone_id": diff.AttrTypeCreate, - "name": diff.AttrTypeCreate, - "type": diff.AttrTypeCreate, - "ttl": diff.AttrTypeUpdate, - "records": diff.AttrTypeUpdate, - }, - } - return b.Diff(s, c) + return rec, nil } diff --git a/builtin/providers/aws/resource_aws_route53_record_test.go b/builtin/providers/aws/resource_aws_route53_record_test.go index c22fc994f..3f72a9297 100644 --- a/builtin/providers/aws/resource_aws_route53_record_test.go +++ b/builtin/providers/aws/resource_aws_route53_record_test.go @@ -27,7 +27,7 @@ func TestAccRoute53Record(t *testing.T) { } func testAccCheckRoute53RecordDestroy(s *terraform.State) error { - conn := testAccProvider.route53 + conn := testAccProvider.Meta().(*AWSClient).route53 for _, rs := range s.RootModule().Resources { if rs.Type != "aws_route53_record" { continue @@ -56,7 +56,7 @@ func testAccCheckRoute53RecordDestroy(s *terraform.State) error { func testAccCheckRoute53RecordExists(n string) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := testAccProvider.route53 + conn := testAccProvider.Meta().(*AWSClient).route53 rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) diff --git a/builtin/providers/aws/resource_aws_route53_zone.go b/builtin/providers/aws/resource_aws_route53_zone.go index 6b68cf3cf..4a5027899 100644 --- a/builtin/providers/aws/resource_aws_route53_zone.go +++ b/builtin/providers/aws/resource_aws_route53_zone.go @@ -5,46 +5,49 @@ import ( "strings" "time" - "github.com/hashicorp/terraform/helper/config" - "github.com/hashicorp/terraform/helper/diff" "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/helper/schema" "github.com/mitchellh/goamz/route53" ) -func resource_aws_r53_zone_validation() *config.Validator { - return &config.Validator{ - Required: []string{ - "name", +func resourceAwsRoute53Zone() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRoute53ZoneCreate, + Read: resourceAwsRoute53ZoneRead, + Delete: resourceAwsRoute53ZoneDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "zone_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, } } -func resource_aws_r53_zone_create( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - r53 := p.route53 - - // Merge the diff into the state so that we have all the attributes - // properly. - rs := s.MergeDiff(d) +func resourceAwsRoute53ZoneCreate(d *schema.ResourceData, meta interface{}) error { + r53 := meta.(*AWSClient).route53 req := &route53.CreateHostedZoneRequest{ - Name: rs.Attributes["name"], + Name: d.Get("name").(string), Comment: "Managed by Terraform", } log.Printf("[DEBUG] Creating Route53 hosted zone: %s", req.Name) resp, err := r53.CreateHostedZone(req) if err != nil { - return rs, err + return err } // Store the zone_id zone := route53.CleanZoneID(resp.HostedZone.ID) - rs.ID = zone - rs.Attributes["zone_id"] = zone + d.Set("zone_id", zone) + d.SetId(zone) // Wait until we are done initializing wait := resource.StateChangeConf{ @@ -52,73 +55,52 @@ func resource_aws_r53_zone_create( Pending: []string{"PENDING"}, Target: "INSYNC", Timeout: 10 * time.Minute, - MinTimeout: 5 * time.Second, + MinTimeout: 2 * time.Second, Refresh: func() (result interface{}, state string, err error) { - return resource_aws_r53_wait(r53, resp.ChangeInfo.ID) + return resourceAwsRoute53Wait(r53, resp.ChangeInfo.ID) }, } _, err = wait.WaitForState() - if err != nil { - return rs, err - } - return rs, nil -} - -// resource_aws_r53_wait checks the status of a change -func resource_aws_r53_wait(r53 *route53.Route53, ref string) (result interface{}, state string, err error) { - status, err := r53.GetChange(ref) - if err != nil { - return nil, "UNKNOWN", err - } - return true, status, nil -} - -func resource_aws_r53_zone_destroy( - s *terraform.InstanceState, - meta interface{}) error { - p := meta.(*ResourceProvider) - r53 := p.route53 - - log.Printf("[DEBUG] Deleting Route53 hosted zone: %s (ID: %s)", - s.Attributes["name"], s.Attributes["zone_id"]) - _, err := r53.DeleteHostedZone(s.Attributes["zone_id"]) if err != nil { return err } return nil } -func resource_aws_r53_zone_refresh( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - r53 := p.route53 +func resourceAwsRoute53ZoneRead(d *schema.ResourceData, meta interface{}) error { + r53 := meta.(*AWSClient).route53 - _, err := r53.GetHostedZone(s.Attributes["zone_id"]) + _, err := r53.GetHostedZone(d.Id()) if err != nil { // Handle a deleted zone if strings.Contains(err.Error(), "404") { - s.ID = "" - return s, nil + d.SetId("") + return nil } - return s, err + return err } - return s, nil + + return nil } -func resource_aws_r53_zone_diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { +func resourceAwsRoute53ZoneDelete(d *schema.ResourceData, meta interface{}) error { + r53 := meta.(*AWSClient).route53 - b := &diff.ResourceBuilder{ - Attrs: map[string]diff.AttrType{ - "name": diff.AttrTypeCreate, - }, - - ComputedAttrs: []string{ - "zone_id", - }, + log.Printf("[DEBUG] Deleting Route53 hosted zone: %s (ID: %s)", + d.Get("name").(string), d.Id()) + _, err := r53.DeleteHostedZone(d.Id()) + if err != nil { + return err } - return b.Diff(s, c) + + return nil +} + +// resourceAwsRoute53Wait checks the status of a change +func resourceAwsRoute53Wait(r53 *route53.Route53, ref string) (result interface{}, state string, err error) { + status, err := r53.GetChange(ref) + if err != nil { + return nil, "UNKNOWN", err + } + return true, status, nil } diff --git a/builtin/providers/aws/resource_aws_route53_zone_test.go b/builtin/providers/aws/resource_aws_route53_zone_test.go index 45475a1a3..d55e208e8 100644 --- a/builtin/providers/aws/resource_aws_route53_zone_test.go +++ b/builtin/providers/aws/resource_aws_route53_zone_test.go @@ -25,7 +25,7 @@ func TestAccRoute53Zone(t *testing.T) { } func testAccCheckRoute53ZoneDestroy(s *terraform.State) error { - conn := testAccProvider.route53 + conn := testAccProvider.Meta().(*AWSClient).route53 for _, rs := range s.RootModule().Resources { if rs.Type != "aws_route53_zone" { continue @@ -50,7 +50,7 @@ func testAccCheckRoute53ZoneExists(n string) resource.TestCheckFunc { return fmt.Errorf("No hosted zone ID is set") } - conn := testAccProvider.route53 + conn := testAccProvider.Meta().(*AWSClient).route53 _, err := conn.GetHostedZone(rs.Primary.ID) if err != nil { return fmt.Errorf("Hosted zone err: %v", err) diff --git a/builtin/providers/aws/resource_aws_route_table.go b/builtin/providers/aws/resource_aws_route_table.go index b444a083c..841e456dd 100644 --- a/builtin/providers/aws/resource_aws_route_table.go +++ b/builtin/providers/aws/resource_aws_route_table.go @@ -1,146 +1,192 @@ package aws import ( + "bytes" "fmt" "log" - "reflect" "time" - "github.com/hashicorp/terraform/flatmap" - "github.com/hashicorp/terraform/helper/diff" + "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/helper/schema" "github.com/mitchellh/goamz/ec2" ) -func resource_aws_route_table_create( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn +func resourceAwsRouteTable() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRouteTableCreate, + Read: resourceAwsRouteTableRead, + Update: resourceAwsRouteTableUpdate, + Delete: resourceAwsRouteTableDelete, + + Schema: map[string]*schema.Schema{ + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "route": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr_block": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "gateway_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "instance_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + Set: resourceAwsRouteTableHash, + }, + }, + } +} + +func resourceAwsRouteTableCreate(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn // Create the routing table createOpts := &ec2.CreateRouteTable{ - VpcId: d.Attributes["vpc_id"].New, + VpcId: d.Get("vpc_id").(string), } log.Printf("[DEBUG] RouteTable create config: %#v", createOpts) + resp, err := ec2conn.CreateRouteTable(createOpts) if err != nil { - return nil, fmt.Errorf("Error creating route table: %s", err) + return fmt.Errorf("Error creating route table: %s", err) } // Get the ID and store it rt := &resp.RouteTable - s.ID = rt.RouteTableId - log.Printf("[INFO] Route Table ID: %s", s.ID) + d.SetId(rt.RouteTableId) + log.Printf("[INFO] Route Table ID: %s", d.Id()) // Wait for the route table to become available log.Printf( "[DEBUG] Waiting for route table (%s) to become available", - s.ID) + d.Id()) stateConf := &resource.StateChangeConf{ Pending: []string{"pending"}, Target: "ready", - Refresh: RouteTableStateRefreshFunc(ec2conn, s.ID), + Refresh: resourceAwsRouteTableStateRefreshFunc(ec2conn, d.Id()), Timeout: 1 * time.Minute, } if _, err := stateConf.WaitForState(); err != nil { - return s, fmt.Errorf( + return fmt.Errorf( "Error waiting for route table (%s) to become available: %s", - s.ID, err) + d.Id(), err) } - // Update our routes - return resource_aws_route_table_update(s, d, meta) + return resourceAwsRouteTableUpdate(d, meta) } -func resource_aws_route_table_update( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn +func resourceAwsRouteTableRead(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn - // Our resulting state - rs := s.MergeDiff(d) - - // Get our routes out of the merge - oldroutes := flatmap.Expand(s.Attributes, "route") - routes := flatmap.Expand(s.MergeDiff(d).Attributes, "route") - - // Determine the route operations we need to perform - ops := routeTableOps(oldroutes, routes) - if len(ops) == 0 { - return s, nil + rtRaw, _, err := resourceAwsRouteTableStateRefreshFunc(ec2conn, d.Id())() + if err != nil { + return err + } + if rtRaw == nil { + return nil } - // Go through each operation, performing each one at a time. - // We store the updated state on each operation so that if any - // individual operation fails, we can return a valid partial state. - var err error - resultRoutes := make([]map[string]string, 0, len(ops)) - for _, op := range ops { - switch op.Op { - case routeTableOpCreate: + rt := rtRaw.(*ec2.RouteTable) + d.Set("vpc_id", rt.VpcId) + + // Create an empty schema.Set to hold all routes + route := &schema.Set{F: resourceAwsRouteTableHash} + + // Loop through the routes and add them to the set + for _, r := range rt.Routes { + if r.GatewayId == "local" { + continue + } + + m := make(map[string]interface{}) + m["cidr_block"] = r.DestinationCidrBlock + + if r.GatewayId != "" { + m["gateway_id"] = r.GatewayId + } + + if r.InstanceId != "" { + m["instance_id"] = r.InstanceId + } + + route.Add(m) + } + d.Set("route", route) + + return nil +} + +func resourceAwsRouteTableUpdate(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + + // Check if the route set as a whole has changed + if d.HasChange("route") { + o, n := d.GetChange("route") + ors := o.(*schema.Set).Difference(n.(*schema.Set)) + nrs := n.(*schema.Set).Difference(o.(*schema.Set)) + + // Now first loop through all the old routes and delete any obsolete ones + for _, route := range ors.List() { + m := route.(map[string]interface{}) + + // Delete the route as it no longer exists in the config + _, err := ec2conn.DeleteRoute( + d.Id(), m["cidr_block"].(string)) + if err != nil { + return err + } + } + + // Make sure we save the state of the currently configured rules + routes := o.(*schema.Set).Intersection(n.(*schema.Set)) + d.Set("route", routes) + + // Then loop through al the newly configured routes and create them + for _, route := range nrs.List() { + m := route.(map[string]interface{}) + opts := ec2.CreateRoute{ - RouteTableId: s.ID, - DestinationCidrBlock: op.Route.DestinationCidrBlock, - GatewayId: op.Route.GatewayId, - InstanceId: op.Route.InstanceId, + RouteTableId: d.Id(), + DestinationCidrBlock: m["cidr_block"].(string), + GatewayId: m["gateway_id"].(string), + InstanceId: m["instance_id"].(string), } - _, err = ec2conn.CreateRoute(&opts) - case routeTableOpReplace: - opts := ec2.ReplaceRoute{ - RouteTableId: s.ID, - DestinationCidrBlock: op.Route.DestinationCidrBlock, - GatewayId: op.Route.GatewayId, - InstanceId: op.Route.InstanceId, + _, err := ec2conn.CreateRoute(&opts) + if err != nil { + return err } - _, err = ec2conn.ReplaceRoute(&opts) - case routeTableOpDelete: - _, err = ec2conn.DeleteRoute( - s.ID, op.Route.DestinationCidrBlock) - } - - if err != nil { - // Exit early so we can return what we've done so far - break - } - - // If we didn't delete the route, append it to the list of routes - // we have. - if op.Op != routeTableOpDelete { - resultMap := map[string]string{"cidr_block": op.Route.DestinationCidrBlock} - if op.Route.GatewayId != "" { - resultMap["gateway_id"] = op.Route.GatewayId - } else if op.Route.InstanceId != "" { - resultMap["instance_id"] = op.Route.InstanceId - } - - resultRoutes = append(resultRoutes, resultMap) + routes.Add(route) + d.Set("route", routes) } } - // Update our state with the settings - flatmap.Map(rs.Attributes).Merge(flatmap.Flatten(map[string]interface{}{ - "route": resultRoutes, - })) - - return rs, err + return resourceAwsRouteTableRead(d, meta) } -func resource_aws_route_table_destroy( - s *terraform.InstanceState, - meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn +func resourceAwsRouteTableDelete(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn // First request the routing table since we'll have to disassociate // all the subnets first. - rtRaw, _, err := RouteTableStateRefreshFunc(ec2conn, s.ID)() + rtRaw, _, err := resourceAwsRouteTableStateRefreshFunc(ec2conn, d.Id())() if err != nil { return err } @@ -158,8 +204,8 @@ func resource_aws_route_table_destroy( } // Delete the route table - log.Printf("[INFO] Deleting Route Table: %s", s.ID) - if _, err := ec2conn.DeleteRouteTable(s.ID); err != nil { + log.Printf("[INFO] Deleting Route Table: %s", d.Id()) + if _, err := ec2conn.DeleteRouteTable(d.Id()); err != nil { ec2err, ok := err.(*ec2.Error) if ok && ec2err.Code == "InvalidRouteTableID.NotFound" { return nil @@ -171,147 +217,42 @@ func resource_aws_route_table_destroy( // Wait for the route table to really destroy log.Printf( "[DEBUG] Waiting for route table (%s) to become destroyed", - s.ID) + d.Id()) + stateConf := &resource.StateChangeConf{ Pending: []string{"ready"}, Target: "", - Refresh: RouteTableStateRefreshFunc(ec2conn, s.ID), + Refresh: resourceAwsRouteTableStateRefreshFunc(ec2conn, d.Id()), Timeout: 1 * time.Minute, } if _, err := stateConf.WaitForState(); err != nil { return fmt.Errorf( "Error waiting for route table (%s) to become destroyed: %s", - s.ID, err) + d.Id(), err) } return nil } -func resource_aws_route_table_refresh( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn +func resourceAwsRouteTableHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["cidr_block"].(string))) - rtRaw, _, err := RouteTableStateRefreshFunc(ec2conn, s.ID)() - if err != nil { - return s, err - } - if rtRaw == nil { - return nil, nil + if v, ok := m["gateway_id"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) } - rt := rtRaw.(*ec2.RouteTable) - return resource_aws_route_table_update_state(s, rt) + if v, ok := m["instance_id"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + return hashcode.String(buf.String()) } -func resource_aws_route_table_diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - b := &diff.ResourceBuilder{ - Attrs: map[string]diff.AttrType{ - "vpc_id": diff.AttrTypeCreate, - "route": diff.AttrTypeUpdate, - }, - } - - return b.Diff(s, c) -} - -func resource_aws_route_table_update_state( - s *terraform.InstanceState, - rt *ec2.RouteTable) (*terraform.InstanceState, error) { - s.Attributes["vpc_id"] = rt.VpcId - - return s, nil -} - -// routeTableOp represents a minor operation on the routing table. -// This tells us what we should do to the routing table. -type routeTableOp struct { - Op routeTableOpType - Route ec2.Route -} - -// routeTableOpType is the type of operation related to a route that -// can be operated on a routing table. -type routeTableOpType byte - -const ( - routeTableOpCreate routeTableOpType = iota - routeTableOpReplace - routeTableOpDelete -) - -// routeTableOps takes the old and new routes from flatmap.Expand -// and returns a set of operations that must be performed in order -// to get to the desired state. -func routeTableOps(a interface{}, b interface{}) []routeTableOp { - // Build up the actual ec2.Route objects - oldRoutes := make(map[string]ec2.Route) - newRoutes := make(map[string]ec2.Route) - for i, raws := range []interface{}{a, b} { - result := oldRoutes - if i == 1 { - result = newRoutes - } - if raws == nil { - continue - } - - for _, raw := range raws.([]interface{}) { - m := raw.(map[string]interface{}) - r := ec2.Route{ - DestinationCidrBlock: m["cidr_block"].(string), - } - if v, ok := m["gateway_id"]; ok { - r.GatewayId = v.(string) - } - if v, ok := m["instance_id"]; ok { - r.InstanceId = v.(string) - } - - result[r.DestinationCidrBlock] = r - } - } - - // Now, start building up the ops - ops := make([]routeTableOp, 0, len(newRoutes)) - for n, r := range newRoutes { - op := routeTableOpCreate - if oldR, ok := oldRoutes[n]; ok { - if reflect.DeepEqual(r, oldR) { - // No changes! - continue - } - - op = routeTableOpReplace - } - - ops = append(ops, routeTableOp{ - Op: op, - Route: r, - }) - } - - // Determine what routes we need to delete - for _, op := range ops { - delete(oldRoutes, op.Route.DestinationCidrBlock) - } - for _, r := range oldRoutes { - ops = append(ops, routeTableOp{ - Op: routeTableOpDelete, - Route: r, - }) - } - - return ops -} - -// RouteTableStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// resourceAwsRouteTableStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch // a RouteTable. -func RouteTableStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { +func resourceAwsRouteTableStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { return func() (interface{}, string, error) { resp, err := conn.DescribeRouteTables([]string{id}, ec2.NewFilter()) if err != nil { diff --git a/builtin/providers/aws/resource_aws_route_table_association.go b/builtin/providers/aws/resource_aws_route_table_association.go index c8932f749..846836008 100644 --- a/builtin/providers/aws/resource_aws_route_table_association.go +++ b/builtin/providers/aws/resource_aws_route_table_association.go @@ -4,77 +4,121 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/helper/diff" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/helper/schema" "github.com/mitchellh/goamz/ec2" ) -func resource_aws_route_table_association_create( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn - rs := s.MergeDiff(d) +func resourceAwsRouteTableAssociation() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRouteTableAssociationCreate, + Read: resourceAwsRouteTableAssociationRead, + Update: resourceAwsRouteTableAssociationUpdate, + Delete: resourceAwsRouteTableAssociationDelete, + + Schema: map[string]*schema.Schema{ + "subnet_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "route_table_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceAwsRouteTableAssociationCreate(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn log.Printf( "[INFO] Creating route table association: %s => %s", - rs.Attributes["subnet_id"], - rs.Attributes["route_table_id"]) + d.Get("subnet_id").(string), + d.Get("route_table_id").(string)) + resp, err := ec2conn.AssociateRouteTable( - rs.Attributes["route_table_id"], - rs.Attributes["subnet_id"]) + d.Get("route_table_id").(string), + d.Get("subnet_id").(string)) + if err != nil { - return nil, err + return err } // Set the ID and return - rs.ID = resp.AssociationId - log.Printf("[INFO] Association ID: %s", rs.ID) + d.SetId(resp.AssociationId) + log.Printf("[INFO] Association ID: %s", d.Id()) - return rs, nil + return nil } -func resource_aws_route_table_association_update( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn +func resourceAwsRouteTableAssociationRead(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + + // Get the routing table that this association belongs to + rtRaw, _, err := resourceAwsRouteTableStateRefreshFunc( + ec2conn, d.Get("route_table_id").(string))() + if err != nil { + return err + } + if rtRaw == nil { + return nil + } + rt := rtRaw.(*ec2.RouteTable) + + // Inspect that the association exists + found := false + for _, a := range rt.Associations { + if a.AssociationId == d.Id() { + found = true + d.Set("subnet_id", a.SubnetId) + break + } + } + + if !found { + // It seems it doesn't exist anymore, so clear the ID + d.SetId("") + } + + return nil +} + +func resourceAwsRouteTableAssociationUpdate(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn - rs := s.MergeDiff(d) log.Printf( - "[INFO] Replacing route table association: %s => %s", - rs.Attributes["subnet_id"], - rs.Attributes["route_table_id"]) + "[INFO] Creating route table association: %s => %s", + d.Get("subnet_id").(string), + d.Get("route_table_id").(string)) + resp, err := ec2conn.ReassociateRouteTable( - rs.ID, - rs.Attributes["route_table_id"]) + d.Id(), + d.Get("route_table_id").(string)) + if err != nil { ec2err, ok := err.(*ec2.Error) if ok && ec2err.Code == "InvalidAssociationID.NotFound" { // Not found, so just create a new one - return resource_aws_route_table_association_create(s, d, meta) + return resourceAwsRouteTableAssociationCreate(d, meta) } - return s, err + return err } // Update the ID - rs.ID = resp.AssociationId - log.Printf("[INFO] Association ID: %s", rs.ID) + d.SetId(resp.AssociationId) + log.Printf("[INFO] Association ID: %s", d.Id()) - return rs, nil + return nil } -func resource_aws_route_table_association_destroy( - s *terraform.InstanceState, - meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn +func resourceAwsRouteTableAssociationDelete(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn - log.Printf("[INFO] Deleting route table association: %s", s.ID) - if _, err := ec2conn.DisassociateRouteTable(s.ID); err != nil { + log.Printf("[INFO] Deleting route table association: %s", d.Id()) + if _, err := ec2conn.DisassociateRouteTable(d.Id()); err != nil { ec2err, ok := err.(*ec2.Error) if ok && ec2err.Code == "InvalidAssociationID.NotFound" { return nil @@ -85,50 +129,3 @@ func resource_aws_route_table_association_destroy( return nil } - -func resource_aws_route_table_association_refresh( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn - - // Get the routing table that this association belongs to - rtRaw, _, err := RouteTableStateRefreshFunc( - ec2conn, s.Attributes["route_table_id"])() - if err != nil { - return s, err - } - if rtRaw == nil { - return nil, nil - } - rt := rtRaw.(*ec2.RouteTable) - - // Inspect that the association exists - found := false - for _, a := range rt.Associations { - if a.AssociationId == s.ID { - found = true - s.Attributes["subnet_id"] = a.SubnetId - break - } - } - if !found { - return nil, nil - } - - return s, nil -} - -func resource_aws_route_table_association_diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - b := &diff.ResourceBuilder{ - Attrs: map[string]diff.AttrType{ - "subnet_id": diff.AttrTypeCreate, - "route_table_id": diff.AttrTypeUpdate, - }, - } - - return b.Diff(s, c) -} diff --git a/builtin/providers/aws/resource_aws_route_table_association_test.go b/builtin/providers/aws/resource_aws_route_table_association_test.go index bc670cdf5..079fb41f8 100644 --- a/builtin/providers/aws/resource_aws_route_table_association_test.go +++ b/builtin/providers/aws/resource_aws_route_table_association_test.go @@ -37,7 +37,7 @@ func TestAccAWSRouteTableAssociation(t *testing.T) { } func testAccCheckRouteTableAssociationDestroy(s *terraform.State) error { - conn := testAccProvider.ec2conn + conn := testAccProvider.Meta().(*AWSClient).ec2conn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_route_table_association" { @@ -81,7 +81,7 @@ func testAccCheckRouteTableAssociationExists(n string, v *ec2.RouteTable) resour return fmt.Errorf("No ID is set") } - conn := testAccProvider.ec2conn + conn := testAccProvider.Meta().(*AWSClient).ec2conn resp, err := conn.DescribeRouteTables( []string{rs.Primary.Attributes["route_table_id"]}, ec2.NewFilter()) if err != nil { diff --git a/builtin/providers/aws/resource_aws_route_table_test.go b/builtin/providers/aws/resource_aws_route_table_test.go index 2fbe2433f..45cae1fb8 100644 --- a/builtin/providers/aws/resource_aws_route_table_test.go +++ b/builtin/providers/aws/resource_aws_route_table_test.go @@ -122,7 +122,7 @@ func TestAccAWSRouteTable_instance(t *testing.T) { } func testAccCheckRouteTableDestroy(s *terraform.State) error { - conn := testAccProvider.ec2conn + conn := testAccProvider.Meta().(*AWSClient).ec2conn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_route_table" { @@ -164,7 +164,7 @@ func testAccCheckRouteTableExists(n string, v *ec2.RouteTable) resource.TestChec return fmt.Errorf("No ID is set") } - conn := testAccProvider.ec2conn + conn := testAccProvider.Meta().(*AWSClient).ec2conn resp, err := conn.DescribeRouteTables( []string{rs.Primary.ID}, ec2.NewFilter()) if err != nil { diff --git a/builtin/providers/aws/resource_aws_s3_bucket.go b/builtin/providers/aws/resource_aws_s3_bucket.go index 36a9f4f0f..2e01078f7 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket.go +++ b/builtin/providers/aws/resource_aws_s3_bucket.go @@ -4,90 +4,70 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/helper/config" - "github.com/hashicorp/terraform/helper/diff" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/helper/schema" "github.com/mitchellh/goamz/s3" ) -func resource_aws_s3_bucket_validation() *config.Validator { - return &config.Validator{ - Required: []string{ - "bucket", - }, - Optional: []string{ - "acl", +func resourceAwsS3Bucket() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsS3BucketCreate, + Read: resourceAwsS3BucketRead, + Delete: resourceAwsS3BucketDelete, + + Schema: map[string]*schema.Schema{ + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "acl": &schema.Schema{ + Type: schema.TypeString, + Default: "private", + Optional: true, + ForceNew: true, + }, }, } } -func resource_aws_s3_bucket_create( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - s3conn := p.s3conn +func resourceAwsS3BucketCreate(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn - // Merge the diff into the state so that we have all the attributes - // properly. - rs := s.MergeDiff(d) - - // Get the bucket and optional acl - bucket := rs.Attributes["bucket"] - acl := "private" - if other, ok := rs.Attributes["acl"]; ok { - acl = other - } + // Get the bucket and acl + bucket := d.Get("bucket").(string) + acl := d.Get("acl").(string) log.Printf("[DEBUG] S3 bucket create: %s, ACL: %s", bucket, acl) s3Bucket := s3conn.Bucket(bucket) err := s3Bucket.PutBucket(s3.ACL(acl)) if err != nil { - return nil, fmt.Errorf("Error creating S3 bucket: %s", err) + return fmt.Errorf("Error creating S3 bucket: %s", err) } // Assign the bucket name as the resource ID - rs.ID = bucket - return rs, nil + d.SetId(bucket) + + return nil } -func resource_aws_s3_bucket_destroy( - s *terraform.InstanceState, - meta interface{}) error { - p := meta.(*ResourceProvider) - s3conn := p.s3conn +func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn - name := s.Attributes["bucket"] - bucket := s3conn.Bucket(name) - - log.Printf("[DEBUG] S3 Delete Bucket: %s", name) - return bucket.DelBucket() -} - -func resource_aws_s3_bucket_refresh( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - s3conn := p.s3conn - - bucket := s3conn.Bucket(s.Attributes["bucket"]) + bucket := s3conn.Bucket(d.Id()) resp, err := bucket.Head("/") if err != nil { - return s, err + return err } defer resp.Body.Close() - return s, nil + return nil } -func resource_aws_s3_bucket_diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { +func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error { + s3conn := meta.(*AWSClient).s3conn - b := &diff.ResourceBuilder{ - Attrs: map[string]diff.AttrType{ - "bucket": diff.AttrTypeCreate, - }, - } - return b.Diff(s, c) + log.Printf("[DEBUG] S3 Delete Bucket: %s", d.Id()) + bucket := s3conn.Bucket(d.Id()) + + return bucket.DelBucket() } diff --git a/builtin/providers/aws/resource_aws_s3_bucket_test.go b/builtin/providers/aws/resource_aws_s3_bucket_test.go index 09eed24aa..8571e0f9e 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_test.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_test.go @@ -2,13 +2,17 @@ package aws import ( "fmt" + "math/rand" "testing" + "time" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) func TestAccAWSS3Bucket(t *testing.T) { + rand.Seed(time.Now().UnixNano()) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -25,7 +29,7 @@ func TestAccAWSS3Bucket(t *testing.T) { } func testAccCheckAWSS3BucketDestroy(s *terraform.State) error { - conn := testAccProvider.s3conn + conn := testAccProvider.Meta().(*AWSClient).s3conn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_s3_bucket" { @@ -53,7 +57,7 @@ func testAccCheckAWSS3BucketExists(n string) resource.TestCheckFunc { return fmt.Errorf("No S3 Bucket ID is set") } - conn := testAccProvider.s3conn + conn := testAccProvider.Meta().(*AWSClient).s3conn bucket := conn.Bucket(rs.Primary.ID) resp, err := bucket.Head("/") if err != nil { @@ -64,9 +68,11 @@ func testAccCheckAWSS3BucketExists(n string) resource.TestCheckFunc { } } -const testAccAWSS3BucketConfig = ` +// This needs a bit of randoness as the name can only be +// used once globally within AWS +var testAccAWSS3BucketConfig = fmt.Sprintf(` resource "aws_s3_bucket" "bar" { - bucket = "tf-test-bucket" + bucket = "tf-test-bucket-%d" acl = "public-read" } -` +`, rand.Int()) diff --git a/builtin/providers/aws/resource_aws_security_group.go b/builtin/providers/aws/resource_aws_security_group.go index 4be7ca484..d874bb996 100644 --- a/builtin/providers/aws/resource_aws_security_group.go +++ b/builtin/providers/aws/resource_aws_security_group.go @@ -94,46 +94,8 @@ func resourceAwsSecurityGroup() *schema.Resource { } } -func resourceAwsSecurityGroupIngressHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%d-", m["from_port"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["to_port"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["protocol"].(string))) - - // We need to make sure to sort the strings below so that we always - // generate the same hash code no matter what is in the set. - if v, ok := m["cidr_blocks"]; ok { - vs := v.([]interface{}) - s := make([]string, len(vs)) - for i, raw := range vs { - s[i] = raw.(string) - } - sort.Strings(s) - - for _, v := range s { - buf.WriteString(fmt.Sprintf("%s-", v)) - } - } - if v, ok := m["security_groups"]; ok { - vs := v.(*schema.Set).List() - s := make([]string, len(vs)) - for i, raw := range vs { - s[i] = raw.(string) - } - sort.Strings(s) - - for _, v := range s { - buf.WriteString(fmt.Sprintf("%s-", v)) - } - } - - return hashcode.String(buf.String()) -} - func resourceAwsSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn + ec2conn := meta.(*AWSClient).ec2conn securityGroupOpts := ec2.SecurityGroup{ Name: d.Get("name").(string), @@ -177,103 +139,8 @@ func resourceAwsSecurityGroupCreate(d *schema.ResourceData, meta interface{}) er return resourceAwsSecurityGroupUpdate(d, meta) } -func resourceAwsSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn - - sgRaw, _, err := SGStateRefreshFunc(ec2conn, d.Id())() - if err != nil { - return err - } - if sgRaw == nil { - d.SetId("") - return nil - } - group := sgRaw.(*ec2.SecurityGroupInfo).SecurityGroup - - if d.HasChange("ingress") { - o, n := d.GetChange("ingress") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - remove := expandIPPerms(d.Id(), os.Difference(ns).List()) - add := expandIPPerms(d.Id(), ns.Difference(os).List()) - - // TODO: We need to handle partial state better in the in-between - // in this update. - - // TODO: It'd be nicer to authorize before removing, but then we have - // to deal with complicated unrolling to get individual CIDR blocks - // to avoid authorizing already authorized sources. Removing before - // adding is easier here, and Terraform should be fast enough to - // not have service issues. - - if len(remove) > 0 { - // Revoke the old rules - _, err = ec2conn.RevokeSecurityGroup(group, remove) - if err != nil { - return fmt.Errorf("Error authorizing security group ingress rules: %s", err) - } - } - - if len(add) > 0 { - // Authorize the new rules - _, err := ec2conn.AuthorizeSecurityGroup(group, add) - if err != nil { - return fmt.Errorf("Error authorizing security group ingress rules: %s", err) - } - } - } - - if err := setTags(ec2conn, d); err != nil { - return err - } else { - d.SetPartial("tags") - } - - return nil -} - -func resourceAwsSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn - - log.Printf("[DEBUG] Security Group destroy: %v", d.Id()) - - return resource.Retry(5*time.Minute, func() error { - _, err := ec2conn.DeleteSecurityGroup(ec2.SecurityGroup{Id: d.Id()}) - if err != nil { - ec2err, ok := err.(*ec2.Error) - if !ok { - return err - } - - switch ec2err.Code { - case "InvalidGroup.NotFound": - return nil - case "DependencyViolation": - // If it is a dependency violation, we want to retry - return err - default: - // Any other error, we want to quit the retry loop immediately - return resource.RetryError{err} - } - } - - return nil - }) -} - func resourceAwsSecurityGroupRead(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn + ec2conn := meta.(*AWSClient).ec2conn sgRaw, _, err := SGStateRefreshFunc(ec2conn, d.Id())() if err != nil { @@ -348,6 +215,135 @@ func resourceAwsSecurityGroupRead(d *schema.ResourceData, meta interface{}) erro return nil } +func resourceAwsSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + + sgRaw, _, err := SGStateRefreshFunc(ec2conn, d.Id())() + if err != nil { + return err + } + if sgRaw == nil { + d.SetId("") + return nil + } + group := sgRaw.(*ec2.SecurityGroupInfo).SecurityGroup + + if d.HasChange("ingress") { + o, n := d.GetChange("ingress") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + remove := expandIPPerms(d.Id(), os.Difference(ns).List()) + add := expandIPPerms(d.Id(), ns.Difference(os).List()) + + // TODO: We need to handle partial state better in the in-between + // in this update. + + // TODO: It'd be nicer to authorize before removing, but then we have + // to deal with complicated unrolling to get individual CIDR blocks + // to avoid authorizing already authorized sources. Removing before + // adding is easier here, and Terraform should be fast enough to + // not have service issues. + + if len(remove) > 0 { + // Revoke the old rules + _, err = ec2conn.RevokeSecurityGroup(group, remove) + if err != nil { + return fmt.Errorf("Error authorizing security group ingress rules: %s", err) + } + } + + if len(add) > 0 { + // Authorize the new rules + _, err := ec2conn.AuthorizeSecurityGroup(group, add) + if err != nil { + return fmt.Errorf("Error authorizing security group ingress rules: %s", err) + } + } + } + + if err := setTags(ec2conn, d); err != nil { + return err + } else { + d.SetPartial("tags") + } + + return resourceAwsSecurityGroupRead(d, meta) +} + +func resourceAwsSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + + log.Printf("[DEBUG] Security Group destroy: %v", d.Id()) + + return resource.Retry(5*time.Minute, func() error { + _, err := ec2conn.DeleteSecurityGroup(ec2.SecurityGroup{Id: d.Id()}) + if err != nil { + ec2err, ok := err.(*ec2.Error) + if !ok { + return err + } + + switch ec2err.Code { + case "InvalidGroup.NotFound": + return nil + case "DependencyViolation": + // If it is a dependency violation, we want to retry + return err + default: + // Any other error, we want to quit the retry loop immediately + return resource.RetryError{err} + } + } + + return nil + }) +} + +func resourceAwsSecurityGroupIngressHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%d-", m["from_port"].(int))) + buf.WriteString(fmt.Sprintf("%d-", m["to_port"].(int))) + buf.WriteString(fmt.Sprintf("%s-", m["protocol"].(string))) + + // We need to make sure to sort the strings below so that we always + // generate the same hash code no matter what is in the set. + if v, ok := m["cidr_blocks"]; ok { + vs := v.([]interface{}) + s := make([]string, len(vs)) + for i, raw := range vs { + s[i] = raw.(string) + } + sort.Strings(s) + + for _, v := range s { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + } + if v, ok := m["security_groups"]; ok { + vs := v.(*schema.Set).List() + s := make([]string, len(vs)) + for i, raw := range vs { + s[i] = raw.(string) + } + sort.Strings(s) + + for _, v := range s { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + } + + return hashcode.String(buf.String()) +} + // SGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch // a security group. func SGStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { diff --git a/builtin/providers/aws/resource_aws_security_group_test.go b/builtin/providers/aws/resource_aws_security_group_test.go index 5ff4d4c49..8ee38f89e 100644 --- a/builtin/providers/aws/resource_aws_security_group_test.go +++ b/builtin/providers/aws/resource_aws_security_group_test.go @@ -174,7 +174,7 @@ func TestAccAWSSecurityGroup_Change(t *testing.T) { } func testAccCheckAWSSecurityGroupDestroy(s *terraform.State) error { - conn := testAccProvider.ec2conn + conn := testAccProvider.Meta().(*AWSClient).ec2conn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_security_group" { @@ -221,7 +221,7 @@ func testAccCheckAWSSecurityGroupExists(n string, group *ec2.SecurityGroupInfo) return fmt.Errorf("No Security Group is set") } - conn := testAccProvider.ec2conn + conn := testAccProvider.Meta().(*AWSClient).ec2conn sgs := []ec2.SecurityGroup{ ec2.SecurityGroup{ Id: rs.Primary.ID, diff --git a/builtin/providers/aws/resource_aws_subnet.go b/builtin/providers/aws/resource_aws_subnet.go index 249d8bf2c..7bb88f58f 100644 --- a/builtin/providers/aws/resource_aws_subnet.go +++ b/builtin/providers/aws/resource_aws_subnet.go @@ -5,180 +5,162 @@ import ( "log" "time" - "github.com/hashicorp/terraform/helper/diff" "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/helper/schema" "github.com/mitchellh/goamz/ec2" ) -func resource_aws_subnet_create( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn +func resourceAwsSubnet() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSubnetCreate, + Read: resourceAwsSubnetRead, + Update: resourceAwsSubnetUpdate, + Delete: resourceAwsSubnetDelete, - // Merge the diff so that we have all the proper attributes - s = s.MergeDiff(d) + Schema: map[string]*schema.Schema{ + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, - // Create the Subnet - createOpts := &ec2.CreateSubnet{ - AvailabilityZone: s.Attributes["availability_zone"], - CidrBlock: s.Attributes["cidr_block"], - VpcId: s.Attributes["vpc_id"], + "cidr_block": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "availability_zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "map_public_ip_on_launch": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + + "tags": tagsSchema(), + }, } - log.Printf("[DEBUG] Subnet create config: %#v", createOpts) +} + +func resourceAwsSubnetCreate(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + + createOpts := &ec2.CreateSubnet{ + AvailabilityZone: d.Get("availability_zone").(string), + CidrBlock: d.Get("cidr_block").(string), + VpcId: d.Get("vpc_id").(string), + } + resp, err := ec2conn.CreateSubnet(createOpts) + if err != nil { - return nil, fmt.Errorf("Error creating subnet: %s", err) + return fmt.Errorf("Error creating subnet: %s", err) } // Get the ID and store it subnet := &resp.Subnet - s.ID = subnet.SubnetId - log.Printf("[INFO] Subnet ID: %s", s.ID) + d.SetId(subnet.SubnetId) + log.Printf("[INFO] Subnet ID: %s", subnet.SubnetId) // Wait for the Subnet to become available - log.Printf( - "[DEBUG] Waiting for subnet (%s) to become available", - s.ID) + log.Printf("[DEBUG] Waiting for subnet (%s) to become available", subnet.SubnetId) stateConf := &resource.StateChangeConf{ Pending: []string{"pending"}, Target: "available", - Refresh: SubnetStateRefreshFunc(ec2conn, s.ID), + Refresh: SubnetStateRefreshFunc(ec2conn, subnet.SubnetId), Timeout: 10 * time.Minute, } - subnetRaw, err := stateConf.WaitForState() + + _, err = stateConf.WaitForState() + if err != nil { - return s, fmt.Errorf( - "Error waiting for subnet (%s) to become available: %s", - s.ID, err) + return fmt.Errorf( + "Error waiting for subnet (%s) to become ready: %s", + d.Id(), err) } - // Map public ip on launch must be set in another API call - if attr := s.Attributes["map_public_ip_on_launch"]; attr == "true" { + return resourceAwsSubnetUpdate(d, meta) +} + +func resourceAwsSubnetRead(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + + resp, err := ec2conn.DescribeSubnets([]string{d.Id()}, ec2.NewFilter()) + + if err != nil { + return err + } + if resp == nil { + return nil + } + + subnet := &resp.Subnets[0] + + d.Set("vpc_id", subnet.VpcId) + d.Set("availability_zone", subnet.AvailabilityZone) + d.Set("cidr_block", subnet.CidrBlock) + d.Set("map_public_ip_on_launch", subnet.MapPublicIpOnLaunch) + d.Set("tags", tagsToMap(subnet.Tags)) + + return nil +} + +func resourceAwsSubnetUpdate(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + + d.Partial(true) + + if err := setTags(ec2conn, d); err != nil { + return err + } else { + d.SetPartial("tags") + } + + if d.HasChange("map_public_ip_on_launch") { modifyOpts := &ec2.ModifySubnetAttribute{ - SubnetId: s.ID, + SubnetId: d.Id(), MapPublicIpOnLaunch: true, } + log.Printf("[DEBUG] Subnet modify attributes: %#v", modifyOpts) + _, err := ec2conn.ModifySubnetAttribute(modifyOpts) + if err != nil { - return nil, fmt.Errorf("Error modify subnet attributes: %s", err) + return err + } else { + d.SetPartial("map_public_ip_on_launch") } } - // Update our attributes and return - return resource_aws_subnet_update_state(s, subnetRaw.(*ec2.Subnet)) + d.Partial(false) + + return resourceAwsSubnetRead(d, meta) } -func resource_aws_subnet_update( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - // This should never be called because we have no update-able - // attributes - panic("Update for subnet is not supported") -} +func resourceAwsSubnetDelete(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn -func resource_aws_subnet_destroy( - s *terraform.InstanceState, - meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn - - log.Printf("[INFO] Deleting Subnet: %s", s.ID) - return resource.Retry(5*time.Minute, func() error { - _, err := ec2conn.DeleteSubnet(s.ID) - if err != nil { - ec2err, ok := err.(*ec2.Error) - if !ok { - return err - } - - switch ec2err.Code { - case "InvalidSubnetID.NotFound": - return nil - case "DependencyViolation": - return err // retry - default: - return resource.RetryError{err} - } + log.Printf("[INFO] Deleting subnet: %s", d.Id()) + if _, err := ec2conn.DeleteSubnet(d.Id()); err != nil { + ec2err, ok := err.(*ec2.Error) + if ok && ec2err.Code == "InvalidSubnetID.NotFound" { + return nil } return fmt.Errorf("Error deleting subnet: %s", err) - }) - - // Wait for the Subnet to actually delete - log.Printf("[DEBUG] Waiting for subnet (%s) to delete", s.ID) - stateConf := &resource.StateChangeConf{ - Pending: []string{"available", "pending"}, - Target: "", - Refresh: SubnetStateRefreshFunc(ec2conn, s.ID), - Timeout: 10 * time.Minute, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf( - "Error waiting for subnet (%s) to destroy: %s", - s.ID, err) } return nil } -func resource_aws_subnet_refresh( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn - - subnetRaw, _, err := SubnetStateRefreshFunc(ec2conn, s.ID)() - if err != nil { - return s, err - } - if subnetRaw == nil { - return nil, nil - } - - subnet := subnetRaw.(*ec2.Subnet) - return resource_aws_subnet_update_state(s, subnet) -} - -func resource_aws_subnet_diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - b := &diff.ResourceBuilder{ - Attrs: map[string]diff.AttrType{ - "availability_zone": diff.AttrTypeCreate, - "cidr_block": diff.AttrTypeCreate, - "vpc_id": diff.AttrTypeCreate, - "map_public_ip_on_launch": diff.AttrTypeCreate, - }, - - ComputedAttrs: []string{ - "availability_zone", - }, - } - - return b.Diff(s, c) -} - -func resource_aws_subnet_update_state( - s *terraform.InstanceState, - subnet *ec2.Subnet) (*terraform.InstanceState, error) { - s.Attributes["availability_zone"] = subnet.AvailabilityZone - s.Attributes["cidr_block"] = subnet.CidrBlock - s.Attributes["vpc_id"] = subnet.VpcId - if subnet.MapPublicIpOnLaunch { - s.Attributes["map_public_ip_on_launch"] = "true" - } - - return s, nil -} - -// SubnetStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch -// a Subnet. +// SubnetStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch a Subnet. func SubnetStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { return func() (interface{}, string, error) { resp, err := conn.DescribeSubnets([]string{id}, ec2.NewFilter()) diff --git a/builtin/providers/aws/resource_aws_subnet_test.go b/builtin/providers/aws/resource_aws_subnet_test.go index 3941891f9..461c27269 100644 --- a/builtin/providers/aws/resource_aws_subnet_test.go +++ b/builtin/providers/aws/resource_aws_subnet_test.go @@ -42,7 +42,7 @@ func TestAccAWSSubnet(t *testing.T) { } func testAccCheckSubnetDestroy(s *terraform.State) error { - conn := testAccProvider.ec2conn + conn := testAccProvider.Meta().(*AWSClient).ec2conn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_subnet" { @@ -84,7 +84,7 @@ func testAccCheckSubnetExists(n string, v *ec2.Subnet) resource.TestCheckFunc { return fmt.Errorf("No ID is set") } - conn := testAccProvider.ec2conn + conn := testAccProvider.Meta().(*AWSClient).ec2conn resp, err := conn.DescribeSubnets( []string{rs.Primary.ID}, ec2.NewFilter()) if err != nil { diff --git a/builtin/providers/aws/resource_aws_vpc.go b/builtin/providers/aws/resource_aws_vpc.go index e399384bd..da04543ff 100644 --- a/builtin/providers/aws/resource_aws_vpc.go +++ b/builtin/providers/aws/resource_aws_vpc.go @@ -47,8 +47,7 @@ func resourceAwsVpc() *schema.Resource { } func resourceAwsVpcCreate(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn + ec2conn := meta.(*AWSClient).ec2conn // Create the VPC createOpts := &ec2.CreateVpc{ @@ -89,73 +88,8 @@ func resourceAwsVpcCreate(d *schema.ResourceData, meta interface{}) error { return resourceAwsVpcUpdate(d, meta) } -func resourceAwsVpcUpdate(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn - - // Turn on partial mode - d.Partial(true) - - if d.HasChange("enable_dns_hostnames") { - options := new(ec2.ModifyVpcAttribute) - options.EnableDnsHostnames = d.Get("enable_dns_hostnames").(bool) - options.SetEnableDnsHostnames = true - - log.Printf( - "[INFO] Modifying enable_dns_hostnames vpc attribute for %s: %#v", - d.Id(), options) - if _, err := ec2conn.ModifyVpcAttribute(d.Id(), options); err != nil { - return err - } - - d.SetPartial("enable_dns_hostnames") - } - - if d.HasChange("enable_dns_support") { - options := new(ec2.ModifyVpcAttribute) - options.EnableDnsSupport = d.Get("enable_dns_support").(bool) - options.SetEnableDnsSupport = true - - log.Printf( - "[INFO] Modifying enable_dns_support vpc attribute for %s: %#v", - d.Id(), options) - if _, err := ec2conn.ModifyVpcAttribute(d.Id(), options); err != nil { - return err - } - - d.SetPartial("enable_dns_support") - } - - if err := setTags(ec2conn, d); err != nil { - return err - } else { - d.SetPartial("tags") - } - - d.Partial(false) - return resourceAwsVpcRead(d, meta) -} - -func resourceAwsVpcDelete(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn - - log.Printf("[INFO] Deleting VPC: %s", d.Id()) - if _, err := ec2conn.DeleteVpc(d.Id()); err != nil { - ec2err, ok := err.(*ec2.Error) - if ok && ec2err.Code == "InvalidVpcID.NotFound" { - return nil - } - - return fmt.Errorf("Error deleting VPC: %s", err) - } - - return nil -} - func resourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - ec2conn := p.ec2conn + ec2conn := meta.(*AWSClient).ec2conn // Refresh the VPC state vpcRaw, _, err := VPCStateRefreshFunc(ec2conn, d.Id())() @@ -201,6 +135,68 @@ func resourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error { return nil } +func resourceAwsVpcUpdate(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + + // Turn on partial mode + d.Partial(true) + + if d.HasChange("enable_dns_hostnames") { + options := new(ec2.ModifyVpcAttribute) + options.EnableDnsHostnames = d.Get("enable_dns_hostnames").(bool) + options.SetEnableDnsHostnames = true + + log.Printf( + "[INFO] Modifying enable_dns_hostnames vpc attribute for %s: %#v", + d.Id(), options) + if _, err := ec2conn.ModifyVpcAttribute(d.Id(), options); err != nil { + return err + } + + d.SetPartial("enable_dns_hostnames") + } + + if d.HasChange("enable_dns_support") { + options := new(ec2.ModifyVpcAttribute) + options.EnableDnsSupport = d.Get("enable_dns_support").(bool) + options.SetEnableDnsSupport = true + + log.Printf( + "[INFO] Modifying enable_dns_support vpc attribute for %s: %#v", + d.Id(), options) + if _, err := ec2conn.ModifyVpcAttribute(d.Id(), options); err != nil { + return err + } + + d.SetPartial("enable_dns_support") + } + + if err := setTags(ec2conn, d); err != nil { + return err + } else { + d.SetPartial("tags") + } + + d.Partial(false) + return resourceAwsVpcRead(d, meta) +} + +func resourceAwsVpcDelete(d *schema.ResourceData, meta interface{}) error { + ec2conn := meta.(*AWSClient).ec2conn + + log.Printf("[INFO] Deleting VPC: %s", d.Id()) + if _, err := ec2conn.DeleteVpc(d.Id()); err != nil { + ec2err, ok := err.(*ec2.Error) + if ok && ec2err.Code == "InvalidVpcID.NotFound" { + return nil + } + + return fmt.Errorf("Error deleting VPC: %s", err) + } + + return nil +} + // VPCStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch // a VPC. func VPCStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc { diff --git a/builtin/providers/aws/resource_aws_vpc_test.go b/builtin/providers/aws/resource_aws_vpc_test.go index 86c85655e..4ef2e77aa 100644 --- a/builtin/providers/aws/resource_aws_vpc_test.go +++ b/builtin/providers/aws/resource_aws_vpc_test.go @@ -91,7 +91,7 @@ func TestAccVpcUpdate(t *testing.T) { } func testAccCheckVpcDestroy(s *terraform.State) error { - conn := testAccProvider.ec2conn + conn := testAccProvider.Meta().(*AWSClient).ec2conn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_vpc" { @@ -142,7 +142,7 @@ func testAccCheckVpcExists(n string, vpc *ec2.VPC) resource.TestCheckFunc { return fmt.Errorf("No VPC ID is set") } - conn := testAccProvider.ec2conn + conn := testAccProvider.Meta().(*AWSClient).ec2conn resp, err := conn.DescribeVpcs([]string{rs.Primary.ID}, ec2.NewFilter()) if err != nil { return err diff --git a/builtin/providers/aws/resource_provider.go b/builtin/providers/aws/resource_provider.go deleted file mode 100644 index 4f7818743..000000000 --- a/builtin/providers/aws/resource_provider.go +++ /dev/null @@ -1,135 +0,0 @@ -package aws - -import ( - "log" - - "github.com/hashicorp/terraform/helper/config" - "github.com/hashicorp/terraform/helper/multierror" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/goamz/autoscaling" - "github.com/mitchellh/goamz/ec2" - "github.com/mitchellh/goamz/elb" - "github.com/mitchellh/goamz/rds" - "github.com/mitchellh/goamz/route53" - "github.com/mitchellh/goamz/s3" -) - -type ResourceProvider struct { - Config Config - - ec2conn *ec2.EC2 - elbconn *elb.ELB - autoscalingconn *autoscaling.AutoScaling - s3conn *s3.S3 - rdsconn *rds.Rds - route53 *route53.Route53 - - // This is the schema.Provider. Eventually this will replace much - // of this structure. For now it is an element of it for compatiblity. - p *schema.Provider -} - -func (p *ResourceProvider) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - return Provider().Input(input, c) -} - -func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) { - return Provider().Validate(c) -} - -func (p *ResourceProvider) ValidateResource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - prov := Provider() - if _, ok := prov.ResourcesMap[t]; ok { - return prov.ValidateResource(t, c) - } - - return resourceMap.Validate(t, c) -} - -func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error { - if _, err := config.Decode(&p.Config, c.Config); err != nil { - return err - } - - // Get the auth and region. This can fail if keys/regions were not - // specified and we're attempting to use the environment. - var errs []error - log.Println("[INFO] Building AWS auth structure") - auth, err := p.Config.AWSAuth() - if err != nil { - errs = append(errs, err) - } - - log.Println("[INFO] Building AWS region structure") - region, err := p.Config.AWSRegion() - if err != nil { - errs = append(errs, err) - } - - if len(errs) == 0 { - log.Println("[INFO] Initializing EC2 connection") - p.ec2conn = ec2.New(auth, region) - log.Println("[INFO] Initializing ELB connection") - p.elbconn = elb.New(auth, region) - log.Println("[INFO] Initializing AutoScaling connection") - p.autoscalingconn = autoscaling.New(auth, region) - log.Println("[INFO] Initializing S3 connection") - p.s3conn = s3.New(auth, region) - log.Println("[INFO] Initializing RDS connection") - p.rdsconn = rds.New(auth, region) - log.Println("[INFO] Initializing Route53 connection") - p.route53 = route53.New(auth, region) - } - - if len(errs) > 0 { - return &multierror.Error{Errors: errs} - } - - // Create the provider, set the meta - p.p = Provider() - p.p.SetMeta(p) - - return nil -} - -func (p *ResourceProvider) Apply( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - if _, ok := p.p.ResourcesMap[info.Type]; ok { - return p.p.Apply(info, s, d) - } - - return resourceMap.Apply(info, s, d, p) -} - -func (p *ResourceProvider) Diff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - if _, ok := p.p.ResourcesMap[info.Type]; ok { - return p.p.Diff(info, s, c) - } - - return resourceMap.Diff(info, s, c, p) -} - -func (p *ResourceProvider) Refresh( - info *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - if _, ok := p.p.ResourcesMap[info.Type]; ok { - return p.p.Refresh(info, s) - } - - return resourceMap.Refresh(info, s, p) -} - -func (p *ResourceProvider) Resources() []terraform.ResourceType { - result := resourceMap.Resources() - result = append(result, Provider().Resources()...) - return result -} diff --git a/builtin/providers/aws/resource_provider_test.go b/builtin/providers/aws/resource_provider_test.go deleted file mode 100644 index b376f62b8..000000000 --- a/builtin/providers/aws/resource_provider_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package aws - -import ( - "log" - "os" - "reflect" - "testing" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *ResourceProvider - -func init() { - testAccProvider = new(ResourceProvider) - testAccProviders = map[string]terraform.ResourceProvider{ - "aws": testAccProvider, - } -} - -func TestResourceProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = new(ResourceProvider) -} - -func TestResourceProvider_Configure(t *testing.T) { - rp := new(ResourceProvider) - - raw := map[string]interface{}{ - "access_key": "foo", - "secret_key": "bar", - "region": "us-east-1", - } - - rawConfig, err := config.NewRawConfig(raw) - if err != nil { - t.Fatalf("err: %s", err) - } - - err = rp.Configure(terraform.NewResourceConfig(rawConfig)) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := Config{ - AccessKey: "foo", - SecretKey: "bar", - Region: "us-east-1", - } - - if !reflect.DeepEqual(rp.Config, expected) { - t.Fatalf("bad: %#v", rp.Config) - } - - if rp.p == nil { - t.Fatal("provider should be set") - } - if !reflect.DeepEqual(rp, rp.p.Meta()) { - t.Fatalf("meta should be set") - } -} - -func TestResourceProvider_ConfigureBadRegion(t *testing.T) { - rp := new(ResourceProvider) - - raw := map[string]interface{}{ - "access_key": "foo", - "secret_key": "bar", - "region": "blah", - } - - rawConfig, err := config.NewRawConfig(raw) - if err != nil { - t.Fatalf("err: %s", err) - } - - err = rp.Configure(terraform.NewResourceConfig(rawConfig)) - if err == nil { - t.Fatalf("should have err: bad region") - } -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("AWS_ACCESS_KEY"); v == "" { - t.Fatal("AWS_ACCESS_KEY must be set for acceptance tests") - } - if v := os.Getenv("AWS_SECRET_KEY"); v == "" { - t.Fatal("AWS_SECRET_KEY must be set for acceptance tests") - } - if v := os.Getenv("AWS_REGION"); v == "" { - log.Println("[INFO] Test: Using us-west-2 as test region") - os.Setenv("AWS_REGION", "us-west-2") - } -} diff --git a/builtin/providers/aws/resources.go b/builtin/providers/aws/resources.go deleted file mode 100644 index 891ef6556..000000000 --- a/builtin/providers/aws/resources.go +++ /dev/null @@ -1,105 +0,0 @@ -package aws - -import ( - "github.com/hashicorp/terraform/helper/config" - "github.com/hashicorp/terraform/helper/resource" -) - -// resourceMap is the mapping of resources we support to their basic -// operations. This makes it easy to implement new resource types. -var resourceMap *resource.Map - -func init() { - resourceMap = &resource.Map{ - Mapping: map[string]resource.Resource{ - "aws_db_instance": resource.Resource{ - ConfigValidator: resource_aws_db_instance_validation(), - Create: resource_aws_db_instance_create, - Destroy: resource_aws_db_instance_destroy, - Diff: resource_aws_db_instance_diff, - Refresh: resource_aws_db_instance_refresh, - Update: resource_aws_db_instance_update, - }, - - "aws_db_security_group": resource.Resource{ - ConfigValidator: resource_aws_db_security_group_validation(), - Create: resource_aws_db_security_group_create, - Destroy: resource_aws_db_security_group_destroy, - Diff: resource_aws_db_security_group_diff, - Refresh: resource_aws_db_security_group_refresh, - }, - - "aws_internet_gateway": resource.Resource{ - Create: resource_aws_internet_gateway_create, - Destroy: resource_aws_internet_gateway_destroy, - Diff: resource_aws_internet_gateway_diff, - Refresh: resource_aws_internet_gateway_refresh, - Update: resource_aws_internet_gateway_update, - }, - - "aws_route_table": resource.Resource{ - ConfigValidator: &config.Validator{ - Required: []string{ - "vpc_id", - "route.*.cidr_block", - }, - Optional: []string{ - "route.*.gateway_id", - "route.*.instance_id", - }, - }, - Create: resource_aws_route_table_create, - Destroy: resource_aws_route_table_destroy, - Diff: resource_aws_route_table_diff, - Refresh: resource_aws_route_table_refresh, - Update: resource_aws_route_table_update, - }, - - "aws_route_table_association": resource.Resource{ - ConfigValidator: &config.Validator{ - Required: []string{ - "route_table_id", - "subnet_id", - }, - }, - Create: resource_aws_route_table_association_create, - Destroy: resource_aws_route_table_association_destroy, - Diff: resource_aws_route_table_association_diff, - Refresh: resource_aws_route_table_association_refresh, - Update: resource_aws_route_table_association_update, - }, - - "aws_route53_record": resource.Resource{ - ConfigValidator: resource_aws_r53_record_validation(), - Create: resource_aws_r53_record_create, - Destroy: resource_aws_r53_record_destroy, - Diff: resource_aws_r53_record_diff, - Refresh: resource_aws_r53_record_refresh, - Update: resource_aws_r53_record_create, - }, - - "aws_route53_zone": resource.Resource{ - ConfigValidator: resource_aws_r53_zone_validation(), - Create: resource_aws_r53_zone_create, - Destroy: resource_aws_r53_zone_destroy, - Diff: resource_aws_r53_zone_diff, - Refresh: resource_aws_r53_zone_refresh, - }, - - "aws_s3_bucket": resource.Resource{ - ConfigValidator: resource_aws_s3_bucket_validation(), - Create: resource_aws_s3_bucket_create, - Destroy: resource_aws_s3_bucket_destroy, - Diff: resource_aws_s3_bucket_diff, - Refresh: resource_aws_s3_bucket_refresh, - }, - - "aws_subnet": resource.Resource{ - Create: resource_aws_subnet_create, - Destroy: resource_aws_subnet_destroy, - Diff: resource_aws_subnet_diff, - Refresh: resource_aws_subnet_refresh, - }, - }, - } -} diff --git a/builtin/providers/aws/structure.go b/builtin/providers/aws/structure.go index 3af176c2a..2e634931b 100644 --- a/builtin/providers/aws/structure.go +++ b/builtin/providers/aws/structure.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" "github.com/mitchellh/goamz/ec2" "github.com/mitchellh/goamz/elb" + "github.com/mitchellh/goamz/rds" ) // Takes the result of flatmap.Expand for an array of listeners and @@ -87,6 +88,30 @@ func expandIPPerms(id string, configured []interface{}) []ec2.IPPerm { return perms } +// Takes the result of flatmap.Expand for an array of parameters and +// returns Parameter API compatible objects +func expandParameters(configured []interface{}) ([]rds.Parameter, error) { + parameters := make([]rds.Parameter, 0, len(configured)) + + // Loop over our configured parameters and create + // an array of goamz compatabile objects + for _, pRaw := range configured { + data := pRaw.(map[string]interface{}) + + p := rds.Parameter{ + // Only immediate is supported for now; should add in pending-reboot at some point + // but gets tricky as the DescribeParameterGroups AWS call doesn't return this data + ApplyMethod: "immediate", + ParameterName: data["name"].(string), + ParameterValue: data["value"].(string), + } + + parameters = append(parameters, p) + } + + return parameters, nil +} + // Flattens an array of ipPerms into a list of primitives that // flatmap.Flatten() can handle func flattenIPPerms(list []ec2.IPPerm) []map[string]interface{} { @@ -162,6 +187,18 @@ func flattenListeners(list []elb.Listener) []map[string]interface{} { return result } +// Flattens an array of Parameters into a []map[string]interface{} +func flattenParameters(list []rds.Parameter) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(list)) + for _, i := range list { + result = append(result, map[string]interface{}{ + "name": strings.ToLower(i.ParameterName), + "value": strings.ToLower(i.ParameterValue), + }) + } + return result +} + // Takes the result of flatmap.Expand for an array of strings // and returns a []string func expandStringList(configured []interface{}) []string { diff --git a/builtin/providers/aws/structure_test.go b/builtin/providers/aws/structure_test.go index e230cdf91..09ae987c8 100644 --- a/builtin/providers/aws/structure_test.go +++ b/builtin/providers/aws/structure_test.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" "github.com/mitchellh/goamz/ec2" "github.com/mitchellh/goamz/elb" + "github.com/mitchellh/goamz/rds" ) // Returns test configuration @@ -254,3 +255,59 @@ func Test_expandStringList(t *testing.T) { } } + +func Test_expandParameters(t *testing.T) { + expanded := []interface{}{ + map[string]interface{}{ + "name": "character_set_client", + "value": "utf8", + "apply_method": "immediate", + }, + } + parameters, err := expandParameters(expanded) + if err != nil { + t.Fatalf("bad: %#v", err) + } + + expected := rds.Parameter{ + ParameterName: "character_set_client", + ParameterValue: "utf8", + ApplyMethod: "immediate", + } + + if !reflect.DeepEqual(parameters[0], expected) { + t.Fatalf( + "Got:\n\n%#v\n\nExpected:\n\n%#v\n", + parameters[0], + expected) + } +} + +func Test_flattenParameters(t *testing.T) { + cases := []struct { + Input []rds.Parameter + Output []map[string]interface{} + }{ + { + Input: []rds.Parameter{ + rds.Parameter{ + ParameterName: "character_set_client", + ParameterValue: "utf8", + }, + }, + Output: []map[string]interface{}{ + map[string]interface{}{ + "name": "character_set_client", + "value": "utf8", + }, + }, + }, + } + + for _, tc := range cases { + output := flattenParameters(tc.Input) + if !reflect.DeepEqual(output, tc.Output) { + t.Fatalf("Got:\n\n%#v\n\nExpected:\n\n%#v", output, tc.Output) + } + } +} diff --git a/builtin/providers/aws/tags_test.go b/builtin/providers/aws/tags_test.go index eb30f346e..6e89492ca 100644 --- a/builtin/providers/aws/tags_test.go +++ b/builtin/providers/aws/tags_test.go @@ -53,10 +53,10 @@ func TestDiffTags(t *testing.T) { cm := tagsToMap(c) rm := tagsToMap(r) if !reflect.DeepEqual(cm, tc.Create) { - t.Fatalf("%i: bad create: %#v", i, cm) + t.Fatalf("%d: bad create: %#v", i, cm) } if !reflect.DeepEqual(rm, tc.Remove) { - t.Fatalf("%i: bad remove: %#v", i, rm) + t.Fatalf("%d: bad remove: %#v", i, rm) } } } diff --git a/builtin/providers/cloudflare/config.go b/builtin/providers/cloudflare/config.go index 2aa020b62..4e4bb6d2f 100644 --- a/builtin/providers/cloudflare/config.go +++ b/builtin/providers/cloudflare/config.go @@ -8,12 +8,11 @@ import ( ) type Config struct { - Token string `mapstructure:"token"` - Email string `mapstructure:"email"` + Email string + Token string } // Client() returns a new client for accessing cloudflare. -// func (c *Config) Client() (*cloudflare.Client, error) { client, err := cloudflare.NewClient(c.Email, c.Token) diff --git a/builtin/providers/cloudflare/provider.go b/builtin/providers/cloudflare/provider.go new file mode 100644 index 000000000..f92e67673 --- /dev/null +++ b/builtin/providers/cloudflare/provider.go @@ -0,0 +1,54 @@ +package cloudflare + +import ( + "os" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +// Provider returns a terraform.ResourceProvider. +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "email": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: envDefaultFunc("CLOUDFLARE_EMAIL"), + Description: "A registered CloudFlare email address.", + }, + + "token": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: envDefaultFunc("CLOUDFLARE_TOKEN"), + Description: "The token key for API operations.", + }, + }, + + ResourcesMap: map[string]*schema.Resource{ + "cloudflare_record": resourceCloudFlareRecord(), + }, + + ConfigureFunc: providerConfigure, + } +} + +func envDefaultFunc(k string) schema.SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return v, nil + } + + return nil, nil + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + config := Config{ + Email: d.Get("email").(string), + Token: d.Get("token").(string), + } + + return config.Client() +} diff --git a/builtin/providers/cloudflare/provider_test.go b/builtin/providers/cloudflare/provider_test.go new file mode 100644 index 000000000..3306633cf --- /dev/null +++ b/builtin/providers/cloudflare/provider_test.go @@ -0,0 +1,43 @@ +package cloudflare + +import ( + "os" + "testing" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +var testAccProviders map[string]terraform.ResourceProvider +var testAccProvider *schema.Provider + +func init() { + testAccProvider = Provider().(*schema.Provider) + testAccProviders = map[string]terraform.ResourceProvider{ + "cloudflare": testAccProvider, + } +} + +func TestProvider(t *testing.T) { + if err := Provider().(*schema.Provider).InternalValidate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProvider_impl(t *testing.T) { + var _ terraform.ResourceProvider = Provider() +} + +func testAccPreCheck(t *testing.T) { + if v := os.Getenv("CLOUDFLARE_EMAIL"); v == "" { + t.Fatal("CLOUDFLARE_EMAIL must be set for acceptance tests") + } + + if v := os.Getenv("CLOUDFLARE_TOKEN"); v == "" { + t.Fatal("CLOUDFLARE_TOKEN must be set for acceptance tests") + } + + if v := os.Getenv("CLOUDFLARE_DOMAIN"); v == "" { + t.Fatal("CLOUDFLARE_DOMAIN must be set for acceptance tests. The domain is used to ` and destroy record against.") + } +} diff --git a/builtin/providers/cloudflare/resource_cloudflare_record.go b/builtin/providers/cloudflare/resource_cloudflare_record.go index 8f9b964a5..c1a547959 100644 --- a/builtin/providers/cloudflare/resource_cloudflare_record.go +++ b/builtin/providers/cloudflare/resource_cloudflare_record.go @@ -4,96 +4,140 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/helper/config" - "github.com/hashicorp/terraform/helper/diff" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/helper/schema" "github.com/pearkes/cloudflare" ) -func resource_cloudflare_record_create( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - client := p.client +func resourceCloudFlareRecord() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudFlareRecordCreate, + Read: resourceCloudFlareRecordRead, + Update: resourceCloudFlareRecordUpdate, + Delete: resourceCloudFlareRecordDelete, - // Merge the diff into the state so that we have all the attributes - // properly. - rs := s.MergeDiff(d) + Schema: map[string]*schema.Schema{ + "domain": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, - var err error + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, - newRecord := cloudflare.CreateRecord{ - Name: rs.Attributes["name"], - Priority: rs.Attributes["priority"], - Type: rs.Attributes["type"], - Content: rs.Attributes["value"], - Ttl: rs.Attributes["ttl"], + "hostname": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "ttl": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "priority": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceCloudFlareRecordCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*cloudflare.Client) + + // Create the new record + newRecord := &cloudflare.CreateRecord{ + Name: d.Get("name").(string), + Type: d.Get("type").(string), + Content: d.Get("value").(string), + } + + if ttl, ok := d.GetOk("ttl"); ok { + newRecord.Ttl = ttl.(string) + } + + if priority, ok := d.GetOk("priority"); ok { + newRecord.Priority = priority.(string) } log.Printf("[DEBUG] record create configuration: %#v", newRecord) - rec, err := client.CreateRecord(rs.Attributes["domain"], &newRecord) + rec, err := client.CreateRecord(d.Get("domain").(string), newRecord) if err != nil { - return nil, fmt.Errorf("Failed to create record: %s", err) + return fmt.Errorf("Failed to create record: %s", err) } - rs.ID = rec.Id - log.Printf("[INFO] record ID: %s", rs.ID) + d.SetId(rec.Id) + log.Printf("[INFO] record ID: %s", d.Id()) - record, err := resource_cloudflare_record_retrieve(rs.Attributes["domain"], rs.ID, client) - if err != nil { - return nil, fmt.Errorf("Couldn't find record: %s", err) - } - - return resource_cloudflare_record_update_state(rs, record) + return resourceCloudFlareRecordRead(d, meta) } -func resource_cloudflare_record_update( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - client := p.client - rs := s.MergeDiff(d) +func resourceCloudFlareRecordRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*cloudflare.Client) - // Cloudflare requires we send all values - // for an update request, so we just - // merge out diff and send the current - // state of affairs to them - updateRecord := cloudflare.UpdateRecord{ - Name: rs.Attributes["name"], - Content: rs.Attributes["value"], - Type: rs.Attributes["type"], - Ttl: rs.Attributes["ttl"], - Priority: rs.Attributes["priority"], + rec, err := client.RetrieveRecord(d.Get("domain").(string), d.Id()) + if err != nil { + return fmt.Errorf("Couldn't find record: %s", err) + } + + d.Set("name", rec.Name) + d.Set("hostname", rec.FullName) + d.Set("type", rec.Type) + d.Set("value", rec.Value) + d.Set("ttl", rec.Ttl) + d.Set("priority", rec.Priority) + + return nil +} + +func resourceCloudFlareRecordUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*cloudflare.Client) + + // CloudFlare requires we send all values for an update request + updateRecord := &cloudflare.UpdateRecord{ + Name: d.Get("name").(string), + Type: d.Get("type").(string), + Content: d.Get("value").(string), + } + + if ttl, ok := d.GetOk("ttl"); ok { + updateRecord.Ttl = ttl.(string) + } + + if priority, ok := d.GetOk("priority"); ok { + updateRecord.Priority = priority.(string) } log.Printf("[DEBUG] record update configuration: %#v", updateRecord) - err := client.UpdateRecord(rs.Attributes["domain"], rs.ID, &updateRecord) + err := client.UpdateRecord(d.Get("domain").(string), d.Id(), updateRecord) if err != nil { - return rs, fmt.Errorf("Failed to update record: %s", err) + return fmt.Errorf("Failed to update record: %s", err) } - record, err := resource_cloudflare_record_retrieve(rs.Attributes["domain"], rs.ID, client) - if err != nil { - return rs, fmt.Errorf("Couldn't find record: %s", err) - } - - return resource_cloudflare_record_update_state(rs, record) + return resourceCloudFlareRecordRead(d, meta) } -func resource_cloudflare_record_destroy( - s *terraform.InstanceState, - meta interface{}) error { - p := meta.(*ResourceProvider) - client := p.client +func resourceCloudFlareRecordDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*cloudflare.Client) - log.Printf("[INFO] Deleting record: %s, %s", s.Attributes["domain"], s.ID) + log.Printf("[INFO] Deleting record: %s, %s", d.Get("domain").(string), d.Id()) - err := client.DestroyRecord(s.Attributes["domain"], s.ID) + err := client.DestroyRecord(d.Get("domain").(string), d.Id()) if err != nil { return fmt.Errorf("Error deleting record: %s", err) @@ -101,82 +145,3 @@ func resource_cloudflare_record_destroy( return nil } - -func resource_cloudflare_record_refresh( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - client := p.client - - rec, err := resource_cloudflare_record_retrieve(s.Attributes["domain"], s.ID, client) - if err != nil { - return nil, err - } - - return resource_cloudflare_record_update_state(s, rec) -} - -func resource_cloudflare_record_diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - - b := &diff.ResourceBuilder{ - Attrs: map[string]diff.AttrType{ - "domain": diff.AttrTypeCreate, - "name": diff.AttrTypeUpdate, - "value": diff.AttrTypeUpdate, - "ttl": diff.AttrTypeUpdate, - "type": diff.AttrTypeUpdate, - "priority": diff.AttrTypeUpdate, - }, - - ComputedAttrs: []string{ - "priority", - "ttl", - "hostname", - }, - - ComputedAttrsUpdate: []string{}, - } - - return b.Diff(s, c) -} - -func resource_cloudflare_record_update_state( - s *terraform.InstanceState, - rec *cloudflare.Record) (*terraform.InstanceState, error) { - - s.Attributes["name"] = rec.Name - s.Attributes["value"] = rec.Value - s.Attributes["type"] = rec.Type - s.Attributes["ttl"] = rec.Ttl - s.Attributes["priority"] = rec.Priority - s.Attributes["hostname"] = rec.FullName - - return s, nil -} - -func resource_cloudflare_record_retrieve(domain string, id string, client *cloudflare.Client) (*cloudflare.Record, error) { - record, err := client.RetrieveRecord(domain, id) - if err != nil { - return nil, err - } - - return record, nil -} - -func resource_cloudflare_record_validation() *config.Validator { - return &config.Validator{ - Required: []string{ - "domain", - "name", - "value", - "type", - }, - Optional: []string{ - "ttl", - "priority", - }, - } -} diff --git a/builtin/providers/cloudflare/resource_cloudflare_record_test.go b/builtin/providers/cloudflare/resource_cloudflare_record_test.go index 9ebfaa5a5..8a15fdca1 100644 --- a/builtin/providers/cloudflare/resource_cloudflare_record_test.go +++ b/builtin/providers/cloudflare/resource_cloudflare_record_test.go @@ -76,7 +76,7 @@ func TestAccCLOudflareRecord_Updated(t *testing.T) { } func testAccCheckCLOudflareRecordDestroy(s *terraform.State) error { - client := testAccProvider.client + client := testAccProvider.Meta().(*cloudflare.Client) for _, rs := range s.RootModule().Resources { if rs.Type != "cloudflare_record" { @@ -127,7 +127,7 @@ func testAccCheckCLOudflareRecordExists(n string, record *cloudflare.Record) res return fmt.Errorf("No Record ID is set") } - client := testAccProvider.client + client := testAccProvider.Meta().(*cloudflare.Client) foundRecord, err := client.RetrieveRecord(rs.Primary.Attributes["domain"], rs.Primary.ID) diff --git a/builtin/providers/cloudflare/resource_provider.go b/builtin/providers/cloudflare/resource_provider.go deleted file mode 100644 index 307498b4d..000000000 --- a/builtin/providers/cloudflare/resource_provider.go +++ /dev/null @@ -1,77 +0,0 @@ -package cloudflare - -import ( - "log" - - "github.com/hashicorp/terraform/helper/config" - "github.com/hashicorp/terraform/terraform" - "github.com/pearkes/cloudflare" -) - -type ResourceProvider struct { - Config Config - - client *cloudflare.Client -} - -func (p *ResourceProvider) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - return c, nil -} - -func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) { - v := &config.Validator{ - Required: []string{ - "token", - "email", - }, - } - - return v.Validate(c) -} - -func (p *ResourceProvider) ValidateResource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - return resourceMap.Validate(t, c) -} - -func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error { - if _, err := config.Decode(&p.Config, c.Config); err != nil { - return err - } - - log.Println("[INFO] Initializing CloudFlare client") - var err error - p.client, err = p.Config.Client() - - if err != nil { - return err - } - - return nil -} - -func (p *ResourceProvider) Apply( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - return resourceMap.Apply(info, s, d, p) -} - -func (p *ResourceProvider) Diff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - return resourceMap.Diff(info, s, c, p) -} - -func (p *ResourceProvider) Refresh( - info *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - return resourceMap.Refresh(info, s, p) -} - -func (p *ResourceProvider) Resources() []terraform.ResourceType { - return resourceMap.Resources() -} diff --git a/builtin/providers/cloudflare/resource_provider_test.go b/builtin/providers/cloudflare/resource_provider_test.go deleted file mode 100644 index ab2d7f995..000000000 --- a/builtin/providers/cloudflare/resource_provider_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package cloudflare - -import ( - "os" - "reflect" - "testing" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *ResourceProvider - -func init() { - testAccProvider = new(ResourceProvider) - testAccProviders = map[string]terraform.ResourceProvider{ - "cloudflare": testAccProvider, - } -} - -func TestResourceProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = new(ResourceProvider) -} - -func TestResourceProvider_Configure(t *testing.T) { - rp := new(ResourceProvider) - var expectedToken string - var expectedEmail string - - if v := os.Getenv("CLOUDFLARE_EMAIL"); v != "" { - expectedEmail = v - } else { - expectedEmail = "foo" - } - - if v := os.Getenv("CLOUDFLARE_TOKEN"); v != "" { - expectedToken = v - } else { - expectedToken = "foo" - } - - raw := map[string]interface{}{ - "token": expectedToken, - "email": expectedEmail, - } - - rawConfig, err := config.NewRawConfig(raw) - if err != nil { - t.Fatalf("err: %s", err) - } - - err = rp.Configure(terraform.NewResourceConfig(rawConfig)) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := Config{ - Token: expectedToken, - Email: expectedEmail, - } - - if !reflect.DeepEqual(rp.Config, expected) { - t.Fatalf("bad: %#v", rp.Config) - } -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("CLOUDFLARE_EMAIL"); v == "" { - t.Fatal("CLOUDFLARE_EMAIL must be set for acceptance tests") - } - - if v := os.Getenv("CLOUDFLARE_TOKEN"); v == "" { - t.Fatal("CLOUDFLARE_TOKEN must be set for acceptance tests") - } - - if v := os.Getenv("CLOUDFLARE_DOMAIN"); v == "" { - t.Fatal("CLOUDFLARE_DOMAIN must be set for acceptance tests. The domain is used to ` and destroy record against.") - } -} diff --git a/builtin/providers/cloudflare/resources.go b/builtin/providers/cloudflare/resources.go deleted file mode 100644 index 3701f6273..000000000 --- a/builtin/providers/cloudflare/resources.go +++ /dev/null @@ -1,24 +0,0 @@ -package cloudflare - -import ( - "github.com/hashicorp/terraform/helper/resource" -) - -// resourceMap is the mapping of resources we support to their basic -// operations. This makes it easy to implement new resource types. -var resourceMap *resource.Map - -func init() { - resourceMap = &resource.Map{ - Mapping: map[string]resource.Resource{ - "cloudflare_record": resource.Resource{ - ConfigValidator: resource_cloudflare_record_validation(), - Create: resource_cloudflare_record_create, - Destroy: resource_cloudflare_record_destroy, - Diff: resource_cloudflare_record_diff, - Update: resource_cloudflare_record_update, - Refresh: resource_cloudflare_record_refresh, - }, - }, - } -} diff --git a/builtin/providers/digitalocean/config.go b/builtin/providers/digitalocean/config.go index 8e1df5389..c9a43bc09 100644 --- a/builtin/providers/digitalocean/config.go +++ b/builtin/providers/digitalocean/config.go @@ -7,12 +7,10 @@ import ( ) type Config struct { - Token string `mapstructure:"token"` + Token string } -// Client() returns a new client for accessing digital -// ocean. -// +// Client() returns a new client for accessing digital ocean. func (c *Config) Client() (*digitalocean.Client, error) { client, err := digitalocean.NewClient(c.Token) diff --git a/builtin/providers/digitalocean/provider.go b/builtin/providers/digitalocean/provider.go index 19629c487..a2dc7651c 100644 --- a/builtin/providers/digitalocean/provider.go +++ b/builtin/providers/digitalocean/provider.go @@ -1,29 +1,48 @@ package digitalocean import ( + "os" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" ) // Provider returns a schema.Provider for DigitalOcean. -// -// NOTE: schema.Provider became available long after the DO provider -// was started, so resources may not be converted to this new structure -// yet. This is a WIP. To assist with the migration, make sure any resources -// you migrate are acceptance tested, then perform the migration. -func Provider() *schema.Provider { - // TODO: Move the configuration to this - +func Provider() terraform.ResourceProvider { return &schema.Provider{ Schema: map[string]*schema.Schema{ "token": &schema.Schema{ - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + DefaultFunc: envDefaultFunc("DIGITALOCEAN_TOKEN"), + Description: "The token key for API operations.", }, }, ResourcesMap: map[string]*schema.Resource{ - "digitalocean_domain": resourceDomain(), - "digitalocean_record": resourceRecord(), + "digitalocean_domain": resourceDigitalOceanDomain(), + "digitalocean_droplet": resourceDigitalOceanDroplet(), + "digitalocean_record": resourceDigitalOceanRecord(), }, + + ConfigureFunc: providerConfigure, } } + +func envDefaultFunc(k string) schema.SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return v, nil + } + + return nil, nil + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + config := Config{ + Token: d.Get("token").(string), + } + + return config.Client() +} diff --git a/builtin/providers/digitalocean/provider_test.go b/builtin/providers/digitalocean/provider_test.go index b1751e54f..fc5f78a2b 100644 --- a/builtin/providers/digitalocean/provider_test.go +++ b/builtin/providers/digitalocean/provider_test.go @@ -1,11 +1,35 @@ package digitalocean import ( + "os" "testing" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" ) +var testAccProviders map[string]terraform.ResourceProvider +var testAccProvider *schema.Provider + +func init() { + testAccProvider = Provider().(*schema.Provider) + testAccProviders = map[string]terraform.ResourceProvider{ + "digitalocean": testAccProvider, + } +} + func TestProvider(t *testing.T) { - if err := Provider().InternalValidate(); err != nil { + if err := Provider().(*schema.Provider).InternalValidate(); err != nil { t.Fatalf("err: %s", err) } } + +func TestProvider_impl(t *testing.T) { + var _ terraform.ResourceProvider = Provider() +} + +func testAccPreCheck(t *testing.T) { + if v := os.Getenv("DIGITALOCEAN_TOKEN"); v == "" { + t.Fatal("DIGITALOCEAN_TOKEN must be set for acceptance tests") + } +} diff --git a/builtin/providers/digitalocean/resource_digitalocean_domain.go b/builtin/providers/digitalocean/resource_digitalocean_domain.go index 865f6167e..eecdcd7dc 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_domain.go +++ b/builtin/providers/digitalocean/resource_digitalocean_domain.go @@ -9,11 +9,11 @@ import ( "github.com/pearkes/digitalocean" ) -func resourceDomain() *schema.Resource { +func resourceDigitalOceanDomain() *schema.Resource { return &schema.Resource{ - Create: resourceDomainCreate, - Read: resourceDomainRead, - Delete: resourceDomainDelete, + Create: resourceDigitalOceanDomainCreate, + Read: resourceDigitalOceanDomainRead, + Delete: resourceDigitalOceanDomainDelete, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -31,18 +31,17 @@ func resourceDomain() *schema.Resource { } } -func resourceDomainCreate(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - client := p.client +func resourceDigitalOceanDomainCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*digitalocean.Client) // Build up our creation options - opts := digitalocean.CreateDomain{ + opts := &digitalocean.CreateDomain{ Name: d.Get("name").(string), IPAddress: d.Get("ip_address").(string), } log.Printf("[DEBUG] Domain create configuration: %#v", opts) - name, err := client.CreateDomain(&opts) + name, err := client.CreateDomain(opts) if err != nil { return fmt.Errorf("Error creating Domain: %s", err) } @@ -50,26 +49,11 @@ func resourceDomainCreate(d *schema.ResourceData, meta interface{}) error { d.SetId(name) log.Printf("[INFO] Domain Name: %s", name) - return nil + return resourceDigitalOceanDomainRead(d, meta) } -func resourceDomainDelete(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - client := p.client - - log.Printf("[INFO] Deleting Domain: %s", d.Id()) - err := client.DestroyDomain(d.Id()) - if err != nil { - return fmt.Errorf("Error deleting Domain: %s", err) - } - - d.SetId("") - return nil -} - -func resourceDomainRead(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - client := p.client +func resourceDigitalOceanDomainRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*digitalocean.Client) domain, err := client.RetrieveDomain(d.Id()) if err != nil { @@ -87,3 +71,16 @@ func resourceDomainRead(d *schema.ResourceData, meta interface{}) error { return nil } + +func resourceDigitalOceanDomainDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*digitalocean.Client) + + log.Printf("[INFO] Deleting Domain: %s", d.Id()) + err := client.DestroyDomain(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting Domain: %s", err) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/digitalocean/resource_digitalocean_domain_test.go b/builtin/providers/digitalocean/resource_digitalocean_domain_test.go index ffd8be696..918eea155 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_domain_test.go +++ b/builtin/providers/digitalocean/resource_digitalocean_domain_test.go @@ -33,7 +33,7 @@ func TestAccDigitalOceanDomain_Basic(t *testing.T) { } func testAccCheckDigitalOceanDomainDestroy(s *terraform.State) error { - client := testAccProvider.client + client := testAccProvider.Meta().(*digitalocean.Client) for _, rs := range s.RootModule().Resources { if rs.Type != "digitalocean_domain" { @@ -74,7 +74,7 @@ func testAccCheckDigitalOceanDomainExists(n string, domain *digitalocean.Domain) return fmt.Errorf("No Record ID is set") } - client := testAccProvider.client + client := testAccProvider.Meta().(*digitalocean.Client) foundDomain, err := client.RetrieveDomain(rs.Primary.ID) diff --git a/builtin/providers/digitalocean/resource_digitalocean_droplet.go b/builtin/providers/digitalocean/resource_digitalocean_droplet.go index 712f94a82..7dbc15bb2 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_droplet.go +++ b/builtin/providers/digitalocean/resource_digitalocean_droplet.go @@ -6,202 +6,335 @@ import ( "strings" "time" - "github.com/hashicorp/terraform/flatmap" - "github.com/hashicorp/terraform/helper/config" - "github.com/hashicorp/terraform/helper/diff" "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/helper/schema" "github.com/pearkes/digitalocean" ) -func resource_digitalocean_droplet_create( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - client := p.client +func resourceDigitalOceanDroplet() *schema.Resource { + return &schema.Resource{ + Create: resourceDigitalOceanDropletCreate, + Read: resourceDigitalOceanDropletRead, + Update: resourceDigitalOceanDropletUpdate, + Delete: resourceDigitalOceanDropletDelete, - // Merge the diff into the state so that we have all the attributes - // properly. - rs := s.MergeDiff(d) + Schema: map[string]*schema.Schema{ + "image": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "size": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "status": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "locked": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "backups": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "ipv6": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + + "ipv6_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "ipv6_address_private": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "private_networking": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + + "ipv4_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "ipv4_address_private": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "ssh_keys": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "user_data": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceDigitalOceanDropletCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*digitalocean.Client) // Build up our creation options - opts := digitalocean.CreateDroplet{ - Backups: rs.Attributes["backups"], - Image: rs.Attributes["image"], - IPV6: rs.Attributes["ipv6"], - Name: rs.Attributes["name"], - PrivateNetworking: rs.Attributes["private_networking"], - Region: rs.Attributes["region"], - Size: rs.Attributes["size"], - UserData: rs.Attributes["user_data"], + opts := &digitalocean.CreateDroplet{ + Image: d.Get("image").(string), + Name: d.Get("name").(string), + Region: d.Get("region").(string), + Size: d.Get("size").(string), } - // Only expand ssh_keys if we have them - if _, ok := rs.Attributes["ssh_keys.#"]; ok { - v := flatmap.Expand(rs.Attributes, "ssh_keys").([]interface{}) - if len(v) > 0 { - vs := make([]string, 0, len(v)) + if attr, ok := d.GetOk("backups"); ok { + opts.Backups = attr.(string) + } - // here we special case the * expanded lists. For example: - // - // ssh_keys = ["${digitalocean_key.foo.*.id}"] - // - if len(v) == 1 && strings.Contains(v[0].(string), ",") { - vs = strings.Split(v[0].(string), ",") - } + if attr, ok := d.GetOk("ipv6"); ok && attr.(bool) { + opts.IPV6 = "true" + } - for _, v := range v { - vs = append(vs, v.(string)) - } + if attr, ok := d.GetOk("private_networking"); ok && attr.(bool) { + opts.PrivateNetworking = "true" + } - opts.SSHKeys = vs + if attr, ok := d.GetOk("user_data"); ok { + opts.UserData = attr.(string) + } + + // Get configured ssh_keys + ssh_keys := d.Get("ssh_keys.#").(int) + if ssh_keys > 0 { + opts.SSHKeys = make([]string, 0, ssh_keys) + for i := 0; i < ssh_keys; i++ { + key := fmt.Sprintf("ssh_keys.%d", i) + opts.SSHKeys = append(opts.SSHKeys, d.Get(key).(string)) } } log.Printf("[DEBUG] Droplet create configuration: %#v", opts) - id, err := client.CreateDroplet(&opts) + id, err := client.CreateDroplet(opts) if err != nil { - return nil, fmt.Errorf("Error creating Droplet: %s", err) + return fmt.Errorf("Error creating droplet: %s", err) } // Assign the droplets id - rs.ID = id + d.SetId(id) - log.Printf("[INFO] Droplet ID: %s", id) + log.Printf("[INFO] Droplet ID: %s", d.Id()) - dropletRaw, err := WaitForDropletAttribute(id, "active", []string{"new"}, "status", client) + _, err = WaitForDropletAttribute(d, "active", []string{"new"}, "status", meta) if err != nil { - return rs, fmt.Errorf( - "Error waiting for droplet (%s) to become ready: %s", - id, err) + return fmt.Errorf( + "Error waiting for droplet (%s) to become ready: %s", d.Id(), err) } - droplet := dropletRaw.(*digitalocean.Droplet) - - // Initialize the connection info - rs.Ephemeral.ConnInfo["type"] = "ssh" - rs.Ephemeral.ConnInfo["host"] = droplet.IPV4Address("public") - - return resource_digitalocean_droplet_update_state(rs, droplet) + return resourceDigitalOceanDropletRead(d, meta) } -func resource_digitalocean_droplet_update( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - client := p.client - rs := s.MergeDiff(d) +func resourceDigitalOceanDropletRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*digitalocean.Client) - var err error + // Retrieve the droplet properties for updating the state + droplet, err := client.RetrieveDroplet(d.Id()) - if attr, ok := d.Attributes["size"]; ok { - err = client.PowerOff(rs.ID) + if err != nil { + return fmt.Errorf("Error retrieving droplet: %s", err) + } + + if droplet.ImageSlug() == "" && droplet.ImageId() != "" { + d.Set("image", droplet.ImageId()) + } else { + d.Set("image", droplet.ImageSlug()) + } + + d.Set("name", droplet.Name) + d.Set("region", droplet.RegionSlug()) + d.Set("size", droplet.SizeSlug) + d.Set("status", droplet.Status) + d.Set("locked", droplet.IsLocked()) + + if droplet.IPV6Address("public") != "" { + d.Set("ipv6", true) + d.Set("ipv6_address", droplet.IPV6Address("public")) + d.Set("ipv6_address_private", droplet.IPV6Address("private")) + } + + d.Set("ipv4_address", droplet.IPV4Address("public")) + + if droplet.NetworkingType() == "private" { + d.Set("private_networking", true) + d.Set("ipv4_address_private", droplet.IPV4Address("private")) + } + + // Initialize the connection info + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": droplet.IPV4Address("public"), + }) + + return nil +} + +func resourceDigitalOceanDropletUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*digitalocean.Client) + + if d.HasChange("size") { + oldSize, newSize := d.GetChange("size") + + err := client.PowerOff(d.Id()) if err != nil && !strings.Contains(err.Error(), "Droplet is already powered off") { - return s, err + return fmt.Errorf( + "Error powering off droplet (%s): %s", d.Id(), err) } // Wait for power off - _, err = WaitForDropletAttribute( - rs.ID, "off", []string{"active"}, "status", client) - - err = client.Resize(rs.ID, attr.New) + _, err = WaitForDropletAttribute(d, "off", []string{"active"}, "status", client) if err != nil { - newErr := power_on_and_wait(rs.ID, client) + return fmt.Errorf( + "Error waiting for droplet (%s) to become powered off: %s", d.Id(), err) + } + + // Resize the droplet + err = client.Resize(d.Id(), newSize.(string)) + + if err != nil { + newErr := power_on_and_wait(d, meta) if newErr != nil { - return rs, newErr + return fmt.Errorf( + "Error powering on droplet (%s) after failed resize: %s", d.Id(), err) } - return rs, err + return fmt.Errorf( + "Error resizing droplet (%s): %s", d.Id(), err) } // Wait for the size to change _, err = WaitForDropletAttribute( - rs.ID, attr.New, []string{"", attr.Old}, "size", client) + d, newSize.(string), []string{"", oldSize.(string)}, "size", meta) if err != nil { - newErr := power_on_and_wait(rs.ID, client) + newErr := power_on_and_wait(d, meta) if newErr != nil { - return rs, newErr + return fmt.Errorf( + "Error powering on droplet (%s) after waiting for resize to finish: %s", d.Id(), err) } - return s, err + return fmt.Errorf( + "Error waiting for resize droplet (%s) to finish: %s", d.Id(), err) } - err = client.PowerOn(rs.ID) + err = client.PowerOn(d.Id()) if err != nil { - return s, err + return fmt.Errorf( + "Error powering on droplet (%s) after resize: %s", d.Id(), err) } // Wait for power off - _, err = WaitForDropletAttribute( - rs.ID, "active", []string{"off"}, "status", client) + _, err = WaitForDropletAttribute(d, "active", []string{"off"}, "status", meta) if err != nil { - return s, err + return err } } - if attr, ok := d.Attributes["name"]; ok { - err = client.Rename(rs.ID, attr.New) + if d.HasChange("name") { + oldName, newName := d.GetChange("name") + + // Rename the droplet + err := client.Rename(d.Id(), newName.(string)) if err != nil { - return s, err + return fmt.Errorf( + "Error renaming droplet (%s): %s", d.Id(), err) } // Wait for the name to change _, err = WaitForDropletAttribute( - rs.ID, attr.New, []string{"", attr.Old}, "name", client) - } - - if attr, ok := d.Attributes["private_networking"]; ok { - err = client.Rename(rs.ID, attr.New) + d, newName.(string), []string{"", oldName.(string)}, "name", meta) if err != nil { - return s, err + return fmt.Errorf( + "Error waiting for rename droplet (%s) to finish: %s", d.Id(), err) } - - // Wait for the private_networking to turn on/off - _, err = WaitForDropletAttribute( - rs.ID, attr.New, []string{"", attr.Old}, "private_networking", client) } - if attr, ok := d.Attributes["ipv6"]; ok { - err = client.Rename(rs.ID, attr.New) + // As there is no way to disable private networking, + // we only check if it needs to be enabled + if d.HasChange("private_networking") && d.Get("private_networking").(bool) { + err := client.EnablePrivateNetworking(d.Id()) if err != nil { - return s, err + return fmt.Errorf( + "Error enabling private networking for droplet (%s): %s", d.Id(), err) } - // Wait for ipv6 to turn on/off + // Wait for the private_networking to turn on _, err = WaitForDropletAttribute( - rs.ID, attr.New, []string{"", attr.Old}, "ipv6", client) + d, "true", []string{"", "false"}, "private_networking", meta) + + return fmt.Errorf( + "Error waiting for private networking to be enabled on for droplet (%s): %s", d.Id(), err) } - droplet, err := resource_digitalocean_droplet_retrieve(rs.ID, client) + // As there is no way to disable IPv6, we only check if it needs to be enabled + if d.HasChange("ipv6") && d.Get("ipv6").(bool) { + err := client.EnableIPV6s(d.Id()) - if err != nil { - return s, err + if err != nil { + return fmt.Errorf( + "Error turning on ipv6 for droplet (%s): %s", d.Id(), err) + } + + // Wait for ipv6 to turn on + _, err = WaitForDropletAttribute( + d, "true", []string{"", "false"}, "ipv6", meta) + + if err != nil { + return fmt.Errorf( + "Error waiting for ipv6 to be turned on for droplet (%s): %s", d.Id(), err) + } } - return resource_digitalocean_droplet_update_state(rs, droplet) + return resourceDigitalOceanDropletRead(d, meta) } -func resource_digitalocean_droplet_destroy( - s *terraform.InstanceState, - meta interface{}) error { - p := meta.(*ResourceProvider) - client := p.client +func resourceDigitalOceanDropletDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*digitalocean.Client) - log.Printf("[INFO] Deleting Droplet: %s", s.ID) + log.Printf("[INFO] Deleting droplet: %s", d.Id()) // Destroy the droplet - err := client.DestroyDroplet(s.ID) + err := client.DestroyDroplet(d.Id()) // Handle remotely destroyed droplets if err != nil && strings.Contains(err.Error(), "404 Not Found") { @@ -209,140 +342,24 @@ func resource_digitalocean_droplet_destroy( } if err != nil { - return fmt.Errorf("Error deleting Droplet: %s", err) + return fmt.Errorf("Error deleting droplet: %s", err) } return nil } -func resource_digitalocean_droplet_refresh( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - client := p.client - - droplet, err := resource_digitalocean_droplet_retrieve(s.ID, client) - - // Handle remotely destroyed droplets - if err != nil && strings.Contains(err.Error(), "404 Not Found") { - return nil, nil - } - - if err != nil { - return nil, err - } - - return resource_digitalocean_droplet_update_state(s, droplet) -} - -func resource_digitalocean_droplet_diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - - b := &diff.ResourceBuilder{ - Attrs: map[string]diff.AttrType{ - "backups": diff.AttrTypeUpdate, - "image": diff.AttrTypeCreate, - "ipv6": diff.AttrTypeUpdate, - "name": diff.AttrTypeUpdate, - "private_networking": diff.AttrTypeUpdate, - "region": diff.AttrTypeCreate, - "size": diff.AttrTypeUpdate, - "ssh_keys": diff.AttrTypeCreate, - "user_data": diff.AttrTypeCreate, - }, - - ComputedAttrs: []string{ - "backups", - "ipv4_address", - "ipv4_address_private", - "ipv6", - "ipv6_address", - "ipv6_address_private", - "locked", - "private_networking", - "status", - }, - } - - return b.Diff(s, c) -} - -func resource_digitalocean_droplet_update_state( - s *terraform.InstanceState, - droplet *digitalocean.Droplet) (*terraform.InstanceState, error) { - - s.Attributes["name"] = droplet.Name - s.Attributes["region"] = droplet.RegionSlug() - - if droplet.ImageSlug() == "" && droplet.ImageId() != "" { - s.Attributes["image"] = droplet.ImageId() - } else { - s.Attributes["image"] = droplet.ImageSlug() - } - - if droplet.IPV6Address("public") != "" { - s.Attributes["ipv6"] = "true" - s.Attributes["ipv6_address"] = droplet.IPV6Address("public") - s.Attributes["ipv6_address_private"] = droplet.IPV6Address("private") - } - - s.Attributes["ipv4_address"] = droplet.IPV4Address("public") - s.Attributes["locked"] = droplet.IsLocked() - - if droplet.NetworkingType() == "private" { - s.Attributes["private_networking"] = "true" - s.Attributes["ipv4_address_private"] = droplet.IPV4Address("private") - } - - s.Attributes["size"] = droplet.SizeSlug() - s.Attributes["status"] = droplet.Status - - return s, nil -} - -// retrieves an ELB by its ID -func resource_digitalocean_droplet_retrieve(id string, client *digitalocean.Client) (*digitalocean.Droplet, error) { - // Retrieve the ELB properties for updating the state - droplet, err := client.RetrieveDroplet(id) - - if err != nil { - return nil, fmt.Errorf("Error retrieving droplet: %s", err) - } - - return &droplet, nil -} - -func resource_digitalocean_droplet_validation() *config.Validator { - return &config.Validator{ - Required: []string{ - "image", - "name", - "region", - "size", - }, - Optional: []string{ - "backups", - "user_data", - "ipv6", - "private_networking", - "ssh_keys.*", - }, - } -} - -func WaitForDropletAttribute(id string, target string, pending []string, attribute string, client *digitalocean.Client) (interface{}, error) { +func WaitForDropletAttribute( + d *schema.ResourceData, target string, pending []string, attribute string, meta interface{}) (interface{}, error) { // Wait for the droplet so we can get the networking attributes // that show up after a while log.Printf( - "[INFO] Waiting for Droplet (%s) to have %s of %s", - id, attribute, target) + "[INFO] Waiting for droplet (%s) to have %s of %s", + d.Id(), attribute, target) stateConf := &resource.StateChangeConf{ Pending: pending, Target: target, - Refresh: new_droplet_state_refresh_func(id, attribute, client), + Refresh: new_droplet_state_refresh_func(d, attribute, meta), Timeout: 10 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, @@ -351,37 +368,36 @@ func WaitForDropletAttribute(id string, target string, pending []string, attribu return stateConf.WaitForState() } -func new_droplet_state_refresh_func(id string, attribute string, client *digitalocean.Client) resource.StateRefreshFunc { +// TODO This function still needs a little more refactoring to make it +// cleaner and more efficient +func new_droplet_state_refresh_func( + d *schema.ResourceData, attribute string, meta interface{}) resource.StateRefreshFunc { + client := meta.(*digitalocean.Client) return func() (interface{}, string, error) { - // Retrieve the ELB properties for updating the state - droplet, err := client.RetrieveDroplet(id) + err := resourceDigitalOceanDropletRead(d, meta) if err != nil { - log.Printf("Error on retrieving droplet when waiting: %s", err) return nil, "", err } // If the droplet is locked, continue waiting. We can // only perform actions on unlocked droplets, so it's // pointless to look at that status - if droplet.IsLocked() == "true" { + if d.Get("locked").(string) == "true" { log.Println("[DEBUG] Droplet is locked, skipping status check and retrying") return nil, "", nil } - // Use our mapping to get back a map of the - // droplet properties - resourceMap, err := resource_digitalocean_droplet_update_state( - &terraform.InstanceState{Attributes: map[string]string{}}, &droplet) - - if err != nil { - log.Printf("Error creating map from droplet: %s", err) - return nil, "", err - } - // See if we can access our attribute - if attr, ok := resourceMap.Attributes[attribute]; ok { - return &droplet, attr, nil + if attr, ok := d.GetOk(attribute); ok { + // Retrieve the droplet properties + droplet, err := client.RetrieveDroplet(d.Id()) + + if err != nil { + return nil, "", fmt.Errorf("Error retrieving droplet: %s", err) + } + + return &droplet, attr.(string), nil } return nil, "", nil @@ -389,16 +405,16 @@ func new_droplet_state_refresh_func(id string, attribute string, client *digital } // Powers on the droplet and waits for it to be active -func power_on_and_wait(id string, client *digitalocean.Client) error { - err := client.PowerOn(id) +func power_on_and_wait(d *schema.ResourceData, meta interface{}) error { + client := meta.(*digitalocean.Client) + err := client.PowerOn(d.Id()) if err != nil { return err } // Wait for power on - _, err = WaitForDropletAttribute( - id, "active", []string{"off"}, "status", client) + _, err = WaitForDropletAttribute(d, "active", []string{"off"}, "status", client) if err != nil { return err diff --git a/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go b/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go index 12d8a362e..587612e01 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go +++ b/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go @@ -94,7 +94,7 @@ func TestAccDigitalOceanDroplet_PrivateNetworkingIpv6(t *testing.T) { } func testAccCheckDigitalOceanDropletDestroy(s *terraform.State) error { - client := testAccProvider.client + client := testAccProvider.Meta().(*digitalocean.Client) for _, rs := range s.RootModule().Resources { if rs.Type != "digitalocean_droplet" { @@ -123,8 +123,8 @@ func testAccCheckDigitalOceanDropletAttributes(droplet *digitalocean.Droplet) re return fmt.Errorf("Bad image_slug: %s", droplet.ImageSlug()) } - if droplet.SizeSlug() != "512mb" { - return fmt.Errorf("Bad size_slug: %s", droplet.SizeSlug()) + if droplet.SizeSlug != "512mb" { + return fmt.Errorf("Bad size_slug: %s", droplet.SizeSlug) } if droplet.RegionSlug() != "nyc3" { @@ -141,8 +141,8 @@ func testAccCheckDigitalOceanDropletAttributes(droplet *digitalocean.Droplet) re func testAccCheckDigitalOceanDropletRenamedAndResized(droplet *digitalocean.Droplet) resource.TestCheckFunc { return func(s *terraform.State) error { - if droplet.SizeSlug() != "1gb" { - return fmt.Errorf("Bad size_slug: %s", droplet.SizeSlug()) + if droplet.SizeSlug != "1gb" { + return fmt.Errorf("Bad size_slug: %s", droplet.SizeSlug) } if droplet.Name != "baz" { @@ -160,8 +160,8 @@ func testAccCheckDigitalOceanDropletAttributes_PrivateNetworkingIpv6(droplet *di return fmt.Errorf("Bad image_slug: %s", droplet.ImageSlug()) } - if droplet.SizeSlug() != "1gb" { - return fmt.Errorf("Bad size_slug: %s", droplet.SizeSlug()) + if droplet.SizeSlug != "1gb" { + return fmt.Errorf("Bad size_slug: %s", droplet.SizeSlug) } if droplet.RegionSlug() != "sgp1" { @@ -207,7 +207,7 @@ func testAccCheckDigitalOceanDropletExists(n string, droplet *digitalocean.Dropl return fmt.Errorf("No Droplet ID is set") } - client := testAccProvider.client + client := testAccProvider.Meta().(*digitalocean.Client) retrieveDroplet, err := client.RetrieveDroplet(rs.Primary.ID) @@ -225,19 +225,23 @@ func testAccCheckDigitalOceanDropletExists(n string, droplet *digitalocean.Dropl } } -func Test_new_droplet_state_refresh_func(t *testing.T) { - droplet := digitalocean.Droplet{ - Name: "foobar", - } - resourceMap, _ := resource_digitalocean_droplet_update_state( - &terraform.InstanceState{Attributes: map[string]string{}}, &droplet) - - // See if we can access our attribute - if _, ok := resourceMap.Attributes["name"]; !ok { - t.Fatalf("bad name: %s", resourceMap.Attributes) - } - -} +// Not sure if this check should remain here as the underlaying +// function is changed and is tested indirectly by almost all +// other test already +// +//func Test_new_droplet_state_refresh_func(t *testing.T) { +// droplet := digitalocean.Droplet{ +// Name: "foobar", +// } +// resourceMap, _ := resource_digitalocean_droplet_update_state( +// &terraform.InstanceState{Attributes: map[string]string{}}, &droplet) +// +// // See if we can access our attribute +// if _, ok := resourceMap.Attributes["name"]; !ok { +// t.Fatalf("bad name: %s", resourceMap.Attributes) +// } +// +//} const testAccCheckDigitalOceanDropletConfig_basic = ` resource "digitalocean_droplet" "foobar" { diff --git a/builtin/providers/digitalocean/resource_digitalocean_record.go b/builtin/providers/digitalocean/resource_digitalocean_record.go index 0ad08265e..d365e4706 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_record.go +++ b/builtin/providers/digitalocean/resource_digitalocean_record.go @@ -9,12 +9,12 @@ import ( "github.com/pearkes/digitalocean" ) -func resourceRecord() *schema.Resource { +func resourceDigitalOceanRecord() *schema.Resource { return &schema.Resource{ - Create: resourceRecordCreate, - Read: resourceRecordRead, - Update: resourceRecordUpdate, - Delete: resourceRecordDelete, + Create: resourceDigitalOceanRecordCreate, + Read: resourceDigitalOceanRecordRead, + Update: resourceDigitalOceanRecordUpdate, + Delete: resourceDigitalOceanRecordDelete, Schema: map[string]*schema.Schema{ "type": &schema.Schema{ @@ -65,9 +65,8 @@ func resourceRecord() *schema.Resource { } } -func resourceRecordCreate(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - client := p.client +func resourceDigitalOceanRecordCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*digitalocean.Client) newRecord := digitalocean.CreateRecord{ Type: d.Get("type").(string), @@ -87,50 +86,11 @@ func resourceRecordCreate(d *schema.ResourceData, meta interface{}) error { d.SetId(recId) log.Printf("[INFO] Record ID: %s", d.Id()) - return resourceRecordRead(d, meta) + return resourceDigitalOceanRecordRead(d, meta) } -func resourceRecordUpdate(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - client := p.client - - var updateRecord digitalocean.UpdateRecord - if v, ok := d.GetOk("name"); ok { - updateRecord.Name = v.(string) - } - - log.Printf("[DEBUG] record update configuration: %#v", updateRecord) - err := client.UpdateRecord(d.Get("domain").(string), d.Id(), &updateRecord) - if err != nil { - return fmt.Errorf("Failed to update record: %s", err) - } - - return resourceRecordRead(d, meta) -} - -func resourceRecordDelete(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - client := p.client - - log.Printf( - "[INFO] Deleting record: %s, %s", d.Get("domain").(string), d.Id()) - err := client.DestroyRecord(d.Get("domain").(string), d.Id()) - if err != nil { - // If the record is somehow already destroyed, mark as - // succesfully gone - if strings.Contains(err.Error(), "404 Not Found") { - return nil - } - - return fmt.Errorf("Error deleting record: %s", err) - } - - return nil -} - -func resourceRecordRead(d *schema.ResourceData, meta interface{}) error { - p := meta.(*ResourceProvider) - client := p.client +func resourceDigitalOceanRecordRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*digitalocean.Client) rec, err := client.RetrieveRecord(d.Get("domain").(string), d.Id()) if err != nil { @@ -153,3 +113,39 @@ func resourceRecordRead(d *schema.ResourceData, meta interface{}) error { return nil } + +func resourceDigitalOceanRecordUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*digitalocean.Client) + + var updateRecord digitalocean.UpdateRecord + if v, ok := d.GetOk("name"); ok { + updateRecord.Name = v.(string) + } + + log.Printf("[DEBUG] record update configuration: %#v", updateRecord) + err := client.UpdateRecord(d.Get("domain").(string), d.Id(), &updateRecord) + if err != nil { + return fmt.Errorf("Failed to update record: %s", err) + } + + return resourceDigitalOceanRecordRead(d, meta) +} + +func resourceDigitalOceanRecordDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*digitalocean.Client) + + log.Printf( + "[INFO] Deleting record: %s, %s", d.Get("domain").(string), d.Id()) + err := client.DestroyRecord(d.Get("domain").(string), d.Id()) + if err != nil { + // If the record is somehow already destroyed, mark as + // succesfully gone + if strings.Contains(err.Error(), "404 Not Found") { + return nil + } + + return fmt.Errorf("Error deleting record: %s", err) + } + + return nil +} diff --git a/builtin/providers/digitalocean/resource_digitalocean_record_test.go b/builtin/providers/digitalocean/resource_digitalocean_record_test.go index 59a0bb4a4..66ac2bb5f 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_record_test.go +++ b/builtin/providers/digitalocean/resource_digitalocean_record_test.go @@ -77,7 +77,7 @@ func TestAccDigitalOceanRecord_Updated(t *testing.T) { } func testAccCheckDigitalOceanRecordDestroy(s *terraform.State) error { - client := testAccProvider.client + client := testAccProvider.Meta().(*digitalocean.Client) for _, rs := range s.RootModule().Resources { if rs.Type != "digitalocean_record" { @@ -128,7 +128,7 @@ func testAccCheckDigitalOceanRecordExists(n string, record *digitalocean.Record) return fmt.Errorf("No Record ID is set") } - client := testAccProvider.client + client := testAccProvider.Meta().(*digitalocean.Client) foundRecord, err := client.RetrieveRecord(rs.Primary.Attributes["domain"], rs.Primary.ID) diff --git a/builtin/providers/digitalocean/resource_provider.go b/builtin/providers/digitalocean/resource_provider.go deleted file mode 100644 index 6ded1018b..000000000 --- a/builtin/providers/digitalocean/resource_provider.go +++ /dev/null @@ -1,99 +0,0 @@ -package digitalocean - -import ( - "log" - - "github.com/hashicorp/terraform/helper/config" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "github.com/pearkes/digitalocean" -) - -type ResourceProvider struct { - Config Config - - client *digitalocean.Client - - // This is the schema.Provider. Eventually this will replace much - // of this structure. For now it is an element of it for compatiblity. - p *schema.Provider -} - -func (p *ResourceProvider) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - return Provider().Input(input, c) -} - -func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) { - prov := Provider() - return prov.Validate(c) -} - -func (p *ResourceProvider) ValidateResource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - prov := Provider() - if _, ok := prov.ResourcesMap[t]; ok { - return prov.ValidateResource(t, c) - } - - return resourceMap.Validate(t, c) -} - -func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error { - if _, err := config.Decode(&p.Config, c.Config); err != nil { - return err - } - - log.Println("[INFO] Initializing DigitalOcean client") - var err error - p.client, err = p.Config.Client() - - if err != nil { - return err - } - - // Create the provider, set the meta - p.p = Provider() - p.p.SetMeta(p) - - return nil -} - -func (p *ResourceProvider) Apply( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - if _, ok := p.p.ResourcesMap[info.Type]; ok { - return p.p.Apply(info, s, d) - } - - return resourceMap.Apply(info, s, d, p) -} - -func (p *ResourceProvider) Diff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - if _, ok := p.p.ResourcesMap[info.Type]; ok { - return p.p.Diff(info, s, c) - } - - return resourceMap.Diff(info, s, c, p) -} - -func (p *ResourceProvider) Refresh( - info *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - if _, ok := p.p.ResourcesMap[info.Type]; ok { - return p.p.Refresh(info, s) - } - - return resourceMap.Refresh(info, s, p) -} - -func (p *ResourceProvider) Resources() []terraform.ResourceType { - result := resourceMap.Resources() - result = append(result, Provider().Resources()...) - return result -} diff --git a/builtin/providers/digitalocean/resource_provider_test.go b/builtin/providers/digitalocean/resource_provider_test.go deleted file mode 100644 index 464fd6cf0..000000000 --- a/builtin/providers/digitalocean/resource_provider_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package digitalocean - -import ( - "os" - "reflect" - "testing" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *ResourceProvider - -func init() { - testAccProvider = new(ResourceProvider) - testAccProviders = map[string]terraform.ResourceProvider{ - "digitalocean": testAccProvider, - } -} - -func TestResourceProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = new(ResourceProvider) -} - -func TestResourceProvider_Configure(t *testing.T) { - rp := new(ResourceProvider) - - raw := map[string]interface{}{ - "token": "foo", - } - - rawConfig, err := config.NewRawConfig(raw) - if err != nil { - t.Fatalf("err: %s", err) - } - - err = rp.Configure(terraform.NewResourceConfig(rawConfig)) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := Config{ - Token: "foo", - } - - if !reflect.DeepEqual(rp.Config, expected) { - t.Fatalf("bad: %#v", rp.Config) - } -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("DIGITALOCEAN_TOKEN"); v == "" { - t.Fatal("DIGITALOCEAN_TOKEN must be set for acceptance tests") - } -} diff --git a/builtin/providers/digitalocean/resources.go b/builtin/providers/digitalocean/resources.go deleted file mode 100644 index 75b396c52..000000000 --- a/builtin/providers/digitalocean/resources.go +++ /dev/null @@ -1,24 +0,0 @@ -package digitalocean - -import ( - "github.com/hashicorp/terraform/helper/resource" -) - -// resourceMap is the mapping of resources we support to their basic -// operations. This makes it easy to implement new resource types. -var resourceMap *resource.Map - -func init() { - resourceMap = &resource.Map{ - Mapping: map[string]resource.Resource{ - "digitalocean_droplet": resource.Resource{ - ConfigValidator: resource_digitalocean_droplet_validation(), - Create: resource_digitalocean_droplet_create, - Destroy: resource_digitalocean_droplet_destroy, - Diff: resource_digitalocean_droplet_diff, - Refresh: resource_digitalocean_droplet_refresh, - Update: resource_digitalocean_droplet_update, - }, - }, - } -} diff --git a/builtin/providers/dnsimple/config.go b/builtin/providers/dnsimple/config.go index 6be132f2f..e8843277d 100644 --- a/builtin/providers/dnsimple/config.go +++ b/builtin/providers/dnsimple/config.go @@ -3,29 +3,17 @@ package dnsimple import ( "fmt" "log" - "os" "github.com/pearkes/dnsimple" ) type Config struct { - Token string `mapstructure:"token"` - Email string `mapstructure:"email"` + Email string + Token string } // Client() returns a new client for accessing dnsimple. -// func (c *Config) Client() (*dnsimple.Client, error) { - - // If we have env vars set (like in the acc) tests, - // we need to override the values passed in here. - if v := os.Getenv("DNSIMPLE_EMAIL"); v != "" { - c.Email = v - } - if v := os.Getenv("DNSIMPLE_TOKEN"); v != "" { - c.Token = v - } - client, err := dnsimple.NewClient(c.Email, c.Token) if err != nil { diff --git a/builtin/providers/dnsimple/provider.go b/builtin/providers/dnsimple/provider.go new file mode 100644 index 000000000..30b8b5976 --- /dev/null +++ b/builtin/providers/dnsimple/provider.go @@ -0,0 +1,54 @@ +package dnsimple + +import ( + "os" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +// Provider returns a terraform.ResourceProvider. +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "email": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: envDefaultFunc("DNSIMPLE_EMAIL"), + Description: "A registered DNSimple email address.", + }, + + "token": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: envDefaultFunc("DNSIMPLE_TOKEN"), + Description: "The token key for API operations.", + }, + }, + + ResourcesMap: map[string]*schema.Resource{ + "dnsimple_record": resourceDNSimpleRecord(), + }, + + ConfigureFunc: providerConfigure, + } +} + +func envDefaultFunc(k string) schema.SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return v, nil + } + + return nil, nil + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + config := Config{ + Email: d.Get("email").(string), + Token: d.Get("token").(string), + } + + return config.Client() +} diff --git a/builtin/providers/dnsimple/provider_test.go b/builtin/providers/dnsimple/provider_test.go new file mode 100644 index 000000000..506efdc6f --- /dev/null +++ b/builtin/providers/dnsimple/provider_test.go @@ -0,0 +1,43 @@ +package dnsimple + +import ( + "os" + "testing" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +var testAccProviders map[string]terraform.ResourceProvider +var testAccProvider *schema.Provider + +func init() { + testAccProvider = Provider().(*schema.Provider) + testAccProviders = map[string]terraform.ResourceProvider{ + "dnsimple": testAccProvider, + } +} + +func TestProvider(t *testing.T) { + if err := Provider().(*schema.Provider).InternalValidate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProvider_impl(t *testing.T) { + var _ terraform.ResourceProvider = Provider() +} + +func testAccPreCheck(t *testing.T) { + if v := os.Getenv("DNSIMPLE_EMAIL"); v == "" { + t.Fatal("DNSIMPLE_EMAIL must be set for acceptance tests") + } + + if v := os.Getenv("DNSIMPLE_TOKEN"); v == "" { + t.Fatal("DNSIMPLE_TOKEN must be set for acceptance tests") + } + + if v := os.Getenv("DNSIMPLE_DOMAIN"); v == "" { + t.Fatal("DNSIMPLE_DOMAIN must be set for acceptance tests. The domain is used to ` and destroy record against.") + } +} diff --git a/builtin/providers/dnsimple/resource_dnsimple_record.go b/builtin/providers/dnsimple/resource_dnsimple_record.go index 36b0a058a..e3669c3c3 100644 --- a/builtin/providers/dnsimple/resource_dnsimple_record.go +++ b/builtin/providers/dnsimple/resource_dnsimple_record.go @@ -4,104 +4,150 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/helper/config" - "github.com/hashicorp/terraform/helper/diff" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/helper/schema" "github.com/pearkes/dnsimple" ) -func resource_dnsimple_record_create( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - client := p.client +func resourceDNSimpleRecord() *schema.Resource { + return &schema.Resource{ + Create: resourceDNSimpleRecordCreate, + Read: resourceDNSimpleRecordRead, + Update: resourceDNSimpleRecordUpdate, + Delete: resourceDNSimpleRecordDelete, - // Merge the diff into the state so that we have all the attributes - // properly. - rs := s.MergeDiff(d) + Schema: map[string]*schema.Schema{ + "domain": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, - var err error + "domain_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, - newRecord := dnsimple.ChangeRecord{ - Name: rs.Attributes["name"], - Value: rs.Attributes["value"], - Type: rs.Attributes["type"], + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "hostname": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "ttl": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "priority": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceDNSimpleRecordCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*dnsimple.Client) + + // Create the new record + newRecord := &dnsimple.ChangeRecord{ + Name: d.Get("name").(string), + Type: d.Get("type").(string), + Value: d.Get("value").(string), } - if attr, ok := rs.Attributes["ttl"]; ok { - newRecord.Ttl = attr + if ttl, ok := d.GetOk("ttl"); ok { + newRecord.Ttl = ttl.(string) } log.Printf("[DEBUG] record create configuration: %#v", newRecord) - recId, err := client.CreateRecord(rs.Attributes["domain"], &newRecord) + recId, err := client.CreateRecord(d.Get("domain").(string), newRecord) if err != nil { - return nil, fmt.Errorf("Failed to create record: %s", err) + return fmt.Errorf("Failed to create record: %s", err) } - rs.ID = recId - log.Printf("[INFO] record ID: %s", rs.ID) + d.SetId(recId) + log.Printf("[INFO] record ID: %s", d.Id()) - record, err := resource_dnsimple_record_retrieve(rs.Attributes["domain"], rs.ID, client) - if err != nil { - return nil, fmt.Errorf("Couldn't find record: %s", err) - } - - return resource_dnsimple_record_update_state(rs, record) + return resourceDNSimpleRecordRead(d, meta) } -func resource_dnsimple_record_update( - s *terraform.InstanceState, - d *terraform.InstanceDiff, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - client := p.client - rs := s.MergeDiff(d) +func resourceDNSimpleRecordRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*dnsimple.Client) - updateRecord := dnsimple.ChangeRecord{} - - if attr, ok := d.Attributes["name"]; ok { - updateRecord.Name = attr.New + rec, err := client.RetrieveRecord(d.Get("domain").(string), d.Id()) + if err != nil { + return fmt.Errorf("Couldn't find record: %s", err) } - if attr, ok := d.Attributes["value"]; ok { - updateRecord.Value = attr.New + d.Set("domain_id", rec.StringDomainId()) + d.Set("name", rec.Name) + d.Set("type", rec.RecordType) + d.Set("value", rec.Content) + d.Set("ttl", rec.StringTtl()) + d.Set("priority", rec.StringPrio()) + + if rec.Name == "" { + d.Set("hostname", d.Get("domain").(string)) + } else { + d.Set("hostname", fmt.Sprintf("%s.%s", rec.Name, d.Get("domain").(string))) } - if attr, ok := d.Attributes["type"]; ok { - updateRecord.Type = attr.New + return nil +} + +func resourceDNSimpleRecordUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*dnsimple.Client) + + updateRecord := &dnsimple.ChangeRecord{} + + if attr, ok := d.GetOk("name"); ok { + updateRecord.Name = attr.(string) } - if attr, ok := d.Attributes["ttl"]; ok { - updateRecord.Ttl = attr.New + if attr, ok := d.GetOk("type"); ok { + updateRecord.Type = attr.(string) + } + + if attr, ok := d.GetOk("value"); ok { + updateRecord.Value = attr.(string) + } + + if attr, ok := d.GetOk("ttl"); ok { + updateRecord.Ttl = attr.(string) } log.Printf("[DEBUG] record update configuration: %#v", updateRecord) - _, err := client.UpdateRecord(rs.Attributes["domain"], rs.ID, &updateRecord) + _, err := client.UpdateRecord(d.Get("domain").(string), d.Id(), updateRecord) if err != nil { - return rs, fmt.Errorf("Failed to update record: %s", err) + return fmt.Errorf("Failed to update record: %s", err) } - record, err := resource_dnsimple_record_retrieve(rs.Attributes["domain"], rs.ID, client) - if err != nil { - return rs, fmt.Errorf("Couldn't find record: %s", err) - } - - return resource_dnsimple_record_update_state(rs, record) + return resourceDNSimpleRecordRead(d, meta) } -func resource_dnsimple_record_destroy( - s *terraform.InstanceState, - meta interface{}) error { - p := meta.(*ResourceProvider) - client := p.client +func resourceDNSimpleRecordDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*dnsimple.Client) - log.Printf("[INFO] Deleting record: %s, %s", s.Attributes["domain"], s.ID) + log.Printf("[INFO] Deleting record: %s, %s", d.Get("domain").(string), d.Id()) - err := client.DestroyRecord(s.Attributes["domain"], s.ID) + err := client.DestroyRecord(d.Get("domain").(string), d.Id()) if err != nil { return fmt.Errorf("Error deleting record: %s", err) @@ -109,88 +155,3 @@ func resource_dnsimple_record_destroy( return nil } - -func resource_dnsimple_record_refresh( - s *terraform.InstanceState, - meta interface{}) (*terraform.InstanceState, error) { - p := meta.(*ResourceProvider) - client := p.client - - rec, err := resource_dnsimple_record_retrieve(s.Attributes["domain"], s.ID, client) - if err != nil { - return nil, err - } - - return resource_dnsimple_record_update_state(s, rec) -} - -func resource_dnsimple_record_diff( - s *terraform.InstanceState, - c *terraform.ResourceConfig, - meta interface{}) (*terraform.InstanceDiff, error) { - - b := &diff.ResourceBuilder{ - Attrs: map[string]diff.AttrType{ - "domain": diff.AttrTypeCreate, - "name": diff.AttrTypeUpdate, - "value": diff.AttrTypeUpdate, - "ttl": diff.AttrTypeUpdate, - "type": diff.AttrTypeUpdate, - }, - - ComputedAttrs: []string{ - "priority", - "domain_id", - "ttl", - }, - - ComputedAttrsUpdate: []string{ - "hostname", - }, - } - - return b.Diff(s, c) -} - -func resource_dnsimple_record_update_state( - s *terraform.InstanceState, - rec *dnsimple.Record) (*terraform.InstanceState, error) { - - s.Attributes["name"] = rec.Name - s.Attributes["value"] = rec.Content - s.Attributes["type"] = rec.RecordType - s.Attributes["ttl"] = rec.StringTtl() - s.Attributes["priority"] = rec.StringPrio() - s.Attributes["domain_id"] = rec.StringDomainId() - - if rec.Name == "" { - s.Attributes["hostname"] = s.Attributes["domain"] - } else { - s.Attributes["hostname"] = fmt.Sprintf("%s.%s", rec.Name, s.Attributes["domain"]) - } - - return s, nil -} - -func resource_dnsimple_record_retrieve(domain string, id string, client *dnsimple.Client) (*dnsimple.Record, error) { - record, err := client.RetrieveRecord(domain, id) - if err != nil { - return nil, err - } - - return record, nil -} - -func resource_dnsimple_record_validation() *config.Validator { - return &config.Validator{ - Required: []string{ - "domain", - "name", - "value", - "type", - }, - Optional: []string{ - "ttl", - }, - } -} diff --git a/builtin/providers/dnsimple/resource_dnsimple_record_test.go b/builtin/providers/dnsimple/resource_dnsimple_record_test.go index 78d825a0a..a07792a54 100644 --- a/builtin/providers/dnsimple/resource_dnsimple_record_test.go +++ b/builtin/providers/dnsimple/resource_dnsimple_record_test.go @@ -76,7 +76,7 @@ func TestAccDNSimpleRecord_Updated(t *testing.T) { } func testAccCheckDNSimpleRecordDestroy(s *terraform.State) error { - client := testAccProvider.client + client := testAccProvider.Meta().(*dnsimple.Client) for _, rs := range s.RootModule().Resources { if rs.Type != "dnsimple_record" { @@ -127,7 +127,7 @@ func testAccCheckDNSimpleRecordExists(n string, record *dnsimple.Record) resourc return fmt.Errorf("No Record ID is set") } - client := testAccProvider.client + client := testAccProvider.Meta().(*dnsimple.Client) foundRecord, err := client.RetrieveRecord(rs.Primary.Attributes["domain"], rs.Primary.ID) diff --git a/builtin/providers/dnsimple/resource_provider.go b/builtin/providers/dnsimple/resource_provider.go deleted file mode 100644 index a9d0f3159..000000000 --- a/builtin/providers/dnsimple/resource_provider.go +++ /dev/null @@ -1,77 +0,0 @@ -package dnsimple - -import ( - "log" - - "github.com/hashicorp/terraform/helper/config" - "github.com/hashicorp/terraform/terraform" - "github.com/pearkes/dnsimple" -) - -type ResourceProvider struct { - Config Config - - client *dnsimple.Client -} - -func (p *ResourceProvider) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - return c, nil -} - -func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) { - v := &config.Validator{ - Required: []string{ - "token", - "email", - }, - } - - return v.Validate(c) -} - -func (p *ResourceProvider) ValidateResource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - return resourceMap.Validate(t, c) -} - -func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error { - if _, err := config.Decode(&p.Config, c.Config); err != nil { - return err - } - - log.Println("[INFO] Initializing DNSimple client") - var err error - p.client, err = p.Config.Client() - - if err != nil { - return err - } - - return nil -} - -func (p *ResourceProvider) Apply( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - return resourceMap.Apply(info, s, d, p) -} - -func (p *ResourceProvider) Diff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - return resourceMap.Diff(info, s, c, p) -} - -func (p *ResourceProvider) Refresh( - info *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - return resourceMap.Refresh(info, s, p) -} - -func (p *ResourceProvider) Resources() []terraform.ResourceType { - return resourceMap.Resources() -} diff --git a/builtin/providers/dnsimple/resource_provider_test.go b/builtin/providers/dnsimple/resource_provider_test.go deleted file mode 100644 index 4867c1ebd..000000000 --- a/builtin/providers/dnsimple/resource_provider_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package dnsimple - -import ( - "os" - "reflect" - "testing" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *ResourceProvider - -func init() { - testAccProvider = new(ResourceProvider) - testAccProviders = map[string]terraform.ResourceProvider{ - "dnsimple": testAccProvider, - } -} - -func TestResourceProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = new(ResourceProvider) -} - -func TestResourceProvider_Configure(t *testing.T) { - rp := new(ResourceProvider) - var expectedToken string - var expectedEmail string - - if v := os.Getenv("DNSIMPLE_EMAIL"); v != "" { - expectedEmail = v - } else { - expectedEmail = "foo" - } - - if v := os.Getenv("DNSIMPLE_TOKEN"); v != "" { - expectedToken = v - } else { - expectedToken = "foo" - } - - raw := map[string]interface{}{ - "token": expectedToken, - "email": expectedEmail, - } - - rawConfig, err := config.NewRawConfig(raw) - if err != nil { - t.Fatalf("err: %s", err) - } - - err = rp.Configure(terraform.NewResourceConfig(rawConfig)) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := Config{ - Token: expectedToken, - Email: expectedEmail, - } - - if !reflect.DeepEqual(rp.Config, expected) { - t.Fatalf("bad: %#v", rp.Config) - } -} - -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("DNSIMPLE_EMAIL"); v == "" { - t.Fatal("DNSIMPLE_EMAIL must be set for acceptance tests") - } - - if v := os.Getenv("DNSIMPLE_TOKEN"); v == "" { - t.Fatal("DNSIMPLE_TOKEN must be set for acceptance tests") - } - - if v := os.Getenv("DNSIMPLE_DOMAIN"); v == "" { - t.Fatal("DNSIMPLE_DOMAIN must be set for acceptance tests. The domain is used to ` and destroy record against.") - } -} diff --git a/builtin/providers/dnsimple/resources.go b/builtin/providers/dnsimple/resources.go deleted file mode 100644 index 7cbd5db91..000000000 --- a/builtin/providers/dnsimple/resources.go +++ /dev/null @@ -1,24 +0,0 @@ -package dnsimple - -import ( - "github.com/hashicorp/terraform/helper/resource" -) - -// resourceMap is the mapping of resources we support to their basic -// operations. This makes it easy to implement new resource types. -var resourceMap *resource.Map - -func init() { - resourceMap = &resource.Map{ - Mapping: map[string]resource.Resource{ - "dnsimple_record": resource.Resource{ - ConfigValidator: resource_dnsimple_record_validation(), - Create: resource_dnsimple_record_create, - Destroy: resource_dnsimple_record_destroy, - Diff: resource_dnsimple_record_diff, - Update: resource_dnsimple_record_update, - Refresh: resource_dnsimple_record_refresh, - }, - }, - } -} diff --git a/builtin/providers/google/config.go b/builtin/providers/google/config.go index 91f8992a0..54c115b4b 100644 --- a/builtin/providers/google/config.go +++ b/builtin/providers/google/config.go @@ -29,20 +29,6 @@ func (c *Config) loadAndValidate() error { var account accountFile var secrets clientSecretsFile - // TODO: validation that it isn't blank - if c.AccountFile == "" { - c.AccountFile = os.Getenv("GOOGLE_ACCOUNT_FILE") - } - if c.ClientSecretsFile == "" { - c.ClientSecretsFile = os.Getenv("GOOGLE_CLIENT_FILE") - } - if c.Project == "" { - c.Project = os.Getenv("GOOGLE_PROJECT") - } - if c.Region == "" { - c.Region = os.Getenv("GOOGLE_REGION") - } - if err := loadJSON(&account, c.AccountFile); err != nil { return fmt.Errorf( "Error loading account file '%s': %s", diff --git a/builtin/providers/google/provider.go b/builtin/providers/google/provider.go index 593b8559b..ea630bbfe 100644 --- a/builtin/providers/google/provider.go +++ b/builtin/providers/google/provider.go @@ -1,6 +1,8 @@ package google import ( + "os" + "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" ) @@ -10,23 +12,27 @@ func Provider() terraform.ResourceProvider { return &schema.Provider{ Schema: map[string]*schema.Schema{ "account_file": &schema.Schema{ - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + DefaultFunc: envDefaultFunc("GOOGLE_ACCOUNT_FILE"), }, "client_secrets_file": &schema.Schema{ - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + DefaultFunc: envDefaultFunc("GOOGLE_CLIENT_FILE"), }, "project": &schema.Schema{ - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + DefaultFunc: envDefaultFunc("GOOGLE_PROJECT"), }, "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + DefaultFunc: envDefaultFunc("GOOGLE_REGION"), }, }, @@ -43,6 +49,16 @@ func Provider() terraform.ResourceProvider { } } +func envDefaultFunc(k string) schema.SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return v, nil + } + + return nil, nil + } +} + func providerConfigure(d *schema.ResourceData) (interface{}, error) { config := Config{ AccountFile: d.Get("account_file").(string), diff --git a/builtin/providers/google/provider_test.go b/builtin/providers/google/provider_test.go index f4903bd27..d5a32be33 100644 --- a/builtin/providers/google/provider_test.go +++ b/builtin/providers/google/provider_test.go @@ -40,4 +40,8 @@ func testAccPreCheck(t *testing.T) { if v := os.Getenv("GOOGLE_PROJECT"); v == "" { t.Fatal("GOOGLE_PROJECT must be set for acceptance tests") } + + if v := os.Getenv("GOOGLE_REGION"); v != "us-central1" { + t.Fatal("GOOGLE_REGION must be set to us-central1 for acceptance tests") + } } diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go index f6b0fde7a..92065fb68 100644 --- a/builtin/providers/google/resource_compute_instance.go +++ b/builtin/providers/google/resource_compute_instance.go @@ -514,10 +514,10 @@ func resourceInstanceTags(d *schema.ResourceData) *compute.Tags { // Calculate the tags var tags *compute.Tags if v := d.Get("tags"); v != nil { - vs := v.(*schema.Set).List() + vs := v.(*schema.Set) tags = new(compute.Tags) - tags.Items = make([]string, len(vs)) - for i, v := range v.(*schema.Set).List() { + tags.Items = make([]string, vs.Len()) + for i, v := range vs.List() { tags.Items[i] = v.(string) } diff --git a/builtin/providers/google/resource_compute_instance_test.go b/builtin/providers/google/resource_compute_instance_test.go index 78c01e04e..f765a44c4 100644 --- a/builtin/providers/google/resource_compute_instance_test.go +++ b/builtin/providers/google/resource_compute_instance_test.go @@ -52,9 +52,6 @@ func TestAccComputeInstance_IP(t *testing.T) { }) } -//!NB requires that disk with name terraform-test-disk is present in gce, -//if created as dependency then it tries to remove it while it is still attached -//to instance and that fails with an error func TestAccComputeInstance_disks(t *testing.T) { var instance compute.Instance @@ -66,6 +63,8 @@ func TestAccComputeInstance_disks(t *testing.T) { resource.TestStep{ Config: testAccComputeInstance_disks, Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), testAccCheckComputeInstanceDisk(&instance, "terraform-test-disk", false, false), ), @@ -287,6 +286,13 @@ resource "google_compute_instance" "foobar" { }` const testAccComputeInstance_disks = ` +resource "google_compute_disk" "foobar" { + name = "terraform-test-disk" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + resource "google_compute_instance" "foobar" { name = "terraform-test" machine_type = "n1-standard-1" @@ -297,9 +303,8 @@ resource "google_compute_instance" "foobar" { } disk { - disk = "terraform-test-disk" + disk = "${google_compute_disk.foobar.name}" auto_delete = false - type = "pd-ssd" } network { diff --git a/builtin/providers/heroku/config.go b/builtin/providers/heroku/config.go index ebaf6380e..2ae7d74fc 100644 --- a/builtin/providers/heroku/config.go +++ b/builtin/providers/heroku/config.go @@ -3,29 +3,18 @@ package heroku import ( "log" "net/http" - "os" "github.com/cyberdelia/heroku-go/v3" ) type Config struct { - APIKey string `mapstructure:"api_key"` - Email string `mapstructure:"email"` + Email string + APIKey string } // Client() returns a new Service for accessing Heroku. // func (c *Config) Client() (*heroku.Service, error) { - - // If we have env vars set (like in the acc) tests, - // we need to override the values passed in here. - if v := os.Getenv("HEROKU_EMAIL"); v != "" { - c.Email = v - } - if v := os.Getenv("HEROKU_API_KEY"); v != "" { - c.APIKey = v - } - service := heroku.NewService(&http.Client{ Transport: &heroku.Transport{ Username: c.Email, diff --git a/builtin/providers/heroku/provider.go b/builtin/providers/heroku/provider.go index dabd9cda5..d8c6ced23 100644 --- a/builtin/providers/heroku/provider.go +++ b/builtin/providers/heroku/provider.go @@ -2,10 +2,10 @@ package heroku import ( "log" + "os" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/mapstructure" ) // Provider returns a terraform.ResourceProvider. @@ -13,13 +13,15 @@ func Provider() terraform.ResourceProvider { return &schema.Provider{ Schema: map[string]*schema.Schema{ "email": &schema.Schema{ - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + DefaultFunc: envDefaultFunc("HEROKU_EMAIL"), }, "api_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + DefaultFunc: envDefaultFunc("HEROKU_API_KEY"), }, }, @@ -34,11 +36,20 @@ func Provider() terraform.ResourceProvider { } } +func envDefaultFunc(k string) schema.SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return v, nil + } + + return nil, nil + } +} + func providerConfigure(d *schema.ResourceData) (interface{}, error) { - var config Config - configRaw := d.Get("").(map[string]interface{}) - if err := mapstructure.Decode(configRaw, &config); err != nil { - return nil, err + config := Config{ + Email: d.Get("email").(string), + APIKey: d.Get("api_key").(string), } log.Println("[INFO] Initializing Heroku client") diff --git a/builtin/providers/heroku/provider_test.go b/builtin/providers/heroku/provider_test.go index 6517198b5..189d39d8e 100644 --- a/builtin/providers/heroku/provider_test.go +++ b/builtin/providers/heroku/provider_test.go @@ -4,7 +4,6 @@ import ( "os" "testing" - "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" ) @@ -29,39 +28,6 @@ func TestProvider_impl(t *testing.T) { var _ terraform.ResourceProvider = Provider() } -func TestProviderConfigure(t *testing.T) { - var expectedKey string - var expectedEmail string - - if v := os.Getenv("HEROKU_EMAIL"); v != "" { - expectedEmail = v - } else { - expectedEmail = "foo" - } - - if v := os.Getenv("HEROKU_API_KEY"); v != "" { - expectedKey = v - } else { - expectedKey = "foo" - } - - raw := map[string]interface{}{ - "api_key": expectedKey, - "email": expectedEmail, - } - - rawConfig, err := config.NewRawConfig(raw) - if err != nil { - t.Fatalf("err: %s", err) - } - - rp := Provider() - err = rp.Configure(terraform.NewResourceConfig(rawConfig)) - if err != nil { - t.Fatalf("err: %s", err) - } -} - func testAccPreCheck(t *testing.T) { if v := os.Getenv("HEROKU_EMAIL"); v == "" { t.Fatal("HEROKU_EMAIL must be set for acceptance tests") diff --git a/builtin/providers/heroku/resource_heroku_app_test.go b/builtin/providers/heroku/resource_heroku_app_test.go index 05162ee44..185d4b7d7 100644 --- a/builtin/providers/heroku/resource_heroku_app_test.go +++ b/builtin/providers/heroku/resource_heroku_app_test.go @@ -128,7 +128,7 @@ func testAccCheckHerokuAppAttributes(app *heroku.App) resource.TestCheckFunc { return fmt.Errorf("Bad region: %s", app.Region.Name) } - if app.Stack.Name != "cedar" { + if app.Stack.Name != "cedar-14" { return fmt.Errorf("Bad stack: %s", app.Stack.Name) } diff --git a/builtin/providers/mailgun/config.go b/builtin/providers/mailgun/config.go index 4c9d6d279..1ec3ddb4c 100644 --- a/builtin/providers/mailgun/config.go +++ b/builtin/providers/mailgun/config.go @@ -2,25 +2,18 @@ package mailgun import ( "log" - "os" "github.com/pearkes/mailgun" ) type Config struct { - APIKey string `mapstructure:"api_key"` + APIKey string } // Client() returns a new client for accessing mailgun. // func (c *Config) Client() (*mailgun.Client, error) { - // If we have env vars set (like in the acc) tests, - // we need to override the values passed in here. - if v := os.Getenv("MAILGUN_API_KEY"); v != "" { - c.APIKey = v - } - // We don't set a domain right away client, err := mailgun.NewClient(c.APIKey) diff --git a/builtin/providers/mailgun/provider.go b/builtin/providers/mailgun/provider.go index 833e682ad..6f3fe4fb3 100644 --- a/builtin/providers/mailgun/provider.go +++ b/builtin/providers/mailgun/provider.go @@ -2,10 +2,10 @@ package mailgun import ( "log" + "os" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/mapstructure" ) // Provider returns a terraform.ResourceProvider. @@ -13,8 +13,9 @@ func Provider() terraform.ResourceProvider { return &schema.Provider{ Schema: map[string]*schema.Schema{ "api_key": &schema.Schema{ - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + DefaultFunc: envDefaultFunc("MAILGUN_API_KEY"), }, }, @@ -26,14 +27,21 @@ func Provider() terraform.ResourceProvider { } } +func envDefaultFunc(k string) schema.SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return v, nil + } + + return nil, nil + } +} + func providerConfigure(d *schema.ResourceData) (interface{}, error) { - var config Config - configRaw := d.Get("").(map[string]interface{}) - if err := mapstructure.Decode(configRaw, &config); err != nil { - return nil, err + config := Config{ + APIKey: d.Get("api_key").(string), } log.Println("[INFO] Initializing Mailgun client") - return config.Client() } diff --git a/builtin/providers/mailgun/provider_test.go b/builtin/providers/mailgun/provider_test.go index 9b70fdbba..f0e8b5d0a 100644 --- a/builtin/providers/mailgun/provider_test.go +++ b/builtin/providers/mailgun/provider_test.go @@ -4,10 +4,8 @@ import ( "os" "testing" - "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" - "github.com/pearkes/mailgun" ) var testAccProviders map[string]terraform.ResourceProvider @@ -30,36 +28,6 @@ func TestProvider_impl(t *testing.T) { var _ terraform.ResourceProvider = Provider() } -func TestProviderConfigure(t *testing.T) { - var expectedKey string - - if v := os.Getenv("MAILGUN_API_KEY"); v != "" { - expectedKey = v - } else { - expectedKey = "foo" - } - - raw := map[string]interface{}{ - "api_key": expectedKey, - } - - rawConfig, err := config.NewRawConfig(raw) - if err != nil { - t.Fatalf("err: %s", err) - } - - rp := Provider().(*schema.Provider) - err = rp.Configure(terraform.NewResourceConfig(rawConfig)) - if err != nil { - t.Fatalf("err: %s", err) - } - - config := rp.Meta().(*mailgun.Client) - if config.ApiKey != expectedKey { - t.Fatalf("bad: %#v", config) - } -} - func testAccPreCheck(t *testing.T) { if v := os.Getenv("MAILGUN_API_KEY"); v == "" { t.Fatal("MAILGUN_API_KEY must be set for acceptance tests") diff --git a/command/init_test.go b/command/init_test.go index 2db54b047..ebd05e0f2 100644 --- a/command/init_test.go +++ b/command/init_test.go @@ -100,3 +100,45 @@ func TestInit_noArgs(t *testing.T) { t.Fatalf("bad: \n%s", ui.OutputWriter.String()) } } + +// https://github.com/hashicorp/terraform/issues/518 +func TestInit_dstInSrc(t *testing.T) { + dir := tempDir(t) + if err := os.MkdirAll(dir, 0755); err != nil { + t.Fatalf("err: %s", err) + } + + // Change to the temporary directory + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.Chdir(dir); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chdir(cwd) + + if _, err := os.Create("issue518.tf"); err != nil { + t.Fatalf("err: %s", err) + } + + ui := new(cli.MockUi) + c := &InitCommand{ + Meta: Meta{ + ContextOpts: testCtxConfig(testProvider()), + Ui: ui, + }, + } + + args := []string{ + ".", + "foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + if _, err := os.Stat(filepath.Join(dir, "foo", "issue518.tf")); err != nil { + t.Fatalf("err: %s", err) + } +} diff --git a/config/config_test.go b/config/config_test.go index 1574ba83a..7b7348fd9 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -98,7 +98,7 @@ func TestConfigValidate_countUserVar(t *testing.T) { func TestConfigValidate_countVar(t *testing.T) { c := testConfig(t, "validate-count-var") if err := c.Validate(); err != nil { - t.Fatal("err: %s", err) + t.Fatalf("err: %s", err) } } @@ -154,7 +154,7 @@ func TestConfigValidate_outputBadField(t *testing.T) { func TestConfigValidate_pathVar(t *testing.T) { c := testConfig(t, "validate-path-var") if err := c.Validate(); err != nil { - t.Fatal("err: %s", err) + t.Fatalf("err: %s", err) } } diff --git a/config/interpolate_funcs.go b/config/interpolate_funcs.go index 2a94a8ad6..cbba62d0d 100644 --- a/config/interpolate_funcs.go +++ b/config/interpolate_funcs.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "io/ioutil" + "strconv" "strings" ) @@ -16,6 +17,7 @@ func init() { "file": interpolationFuncFile, "join": interpolationFuncJoin, "lookup": interpolationFuncLookup, + "element": interpolationFuncElement, } } @@ -87,3 +89,26 @@ func interpolationFuncLookup( return v, nil } + +// interpolationFuncElement implements the "element" function that allows +// a specific index to be looked up in a multi-variable value. Note that this will +// wrap if the index is larger than the number of elements in the multi-variable value. +func interpolationFuncElement( + vs map[string]string, args ...string) (string, error) { + if len(args) != 2 { + return "", fmt.Errorf( + "element expects 2 arguments, got %d", len(args)) + } + + list := strings.Split(args[0], InterpSplitDelim) + + index, err := strconv.Atoi(args[1]) + if err != nil { + return "", fmt.Errorf( + "invalid number for index, got %s", args[1]) + } + + v := list[index % len(list)] + + return v, nil +} diff --git a/config/interpolate_funcs_test.go b/config/interpolate_funcs_test.go index 93bb979c9..332b9af4b 100644 --- a/config/interpolate_funcs_test.go +++ b/config/interpolate_funcs_test.go @@ -189,3 +189,48 @@ func TestInterpolateFuncLookup(t *testing.T) { } } } + +func TestInterpolateFuncElement(t *testing.T) { + cases := []struct { + Args []string + Result string + Error bool + }{ + { + []string{"foo" + InterpSplitDelim + "baz", "1"}, + "baz", + false, + }, + + { + []string{"foo", "0"}, + "foo", + false, + }, + + // Invalid index should wrap vs. out-of-bounds + { + []string{"foo" + InterpSplitDelim + "baz", "2"}, + "foo", + false, + }, + + // Too many args + { + []string{"foo" + InterpSplitDelim + "baz", "0", "1"}, + "", + true, + }, + } + + for i, tc := range cases { + actual, err := interpolationFuncElement(nil, tc.Args...) + if (err != nil) != tc.Error { + t.Fatalf("%d: err: %s", i, err) + } + + if actual != tc.Result { + t.Fatalf("%d: bad: %#v", i, actual) + } + } +} diff --git a/config/module/copy_dir.go b/config/module/copy_dir.go index a6628d1d7..f2ae63b77 100644 --- a/config/module/copy_dir.go +++ b/config/module/copy_dir.go @@ -39,6 +39,11 @@ func copyDir(dst, src string) error { // If we have a directory, make that subdirectory, then continue // the walk. if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + if err := os.MkdirAll(dstPath, 0755); err != nil { return err } diff --git a/config/module/get_file_test.go b/config/module/get_file_test.go index 7cc69bccb..4c9f6126a 100644 --- a/config/module/get_file_test.go +++ b/config/module/get_file_test.go @@ -88,7 +88,7 @@ func TestFileGetter_dirSymlink(t *testing.T) { // Make a symlink if err := os.Symlink(dst2, dst); err != nil { - t.Fatalf("err: %s") + t.Fatalf("err: %s", err) } // With a dir that exists that isn't a symlink diff --git a/helper/schema/schema.go b/helper/schema/schema.go index 7cd0fe711..be5a56aa3 100644 --- a/helper/schema/schema.go +++ b/helper/schema/schema.go @@ -338,7 +338,7 @@ func (m schemaMap) Input( case TypeString: value, err = m.inputString(input, k, v) default: - panic(fmt.Sprintf("Unknown type for input: %s", v.Type)) + panic(fmt.Sprintf("Unknown type for input: %#v", v.Type)) } if err != nil { @@ -653,7 +653,7 @@ func (m schemaMap) diffString( var err error n, err = schema.DefaultFunc() if err != nil { - return fmt.Errorf("%s, error loading default: %s", err) + return fmt.Errorf("%s, error loading default: %s", k, err) } } } @@ -909,7 +909,7 @@ func (m schemaMap) validatePrimitive( return nil, []error{err} } default: - panic(fmt.Sprintf("Unknown validation type: %s", schema.Type)) + panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) } return nil, nil diff --git a/terraform/context.go b/terraform/context.go index e2d8bbd33..d481bdc17 100644 --- a/terraform/context.go +++ b/terraform/context.go @@ -175,7 +175,7 @@ func (c *Context) Input(mode InputMode) error { case config.VariableTypeString: // Good! default: - panic(fmt.Sprintf("Unknown variable type: %s", v.Type())) + panic(fmt.Sprintf("Unknown variable type: %#v", v.Type())) } var defaultString string @@ -483,7 +483,7 @@ func (c *walkContext) Walk() error { case walkValidate: walkFn = c.validateWalkFn() default: - panic(fmt.Sprintf("unknown operation: %s", c.Operation)) + panic(fmt.Sprintf("unknown operation: %#v", c.Operation)) } if err := g.Walk(walkFn); err != nil { @@ -523,7 +523,7 @@ func (c *walkContext) Walk() error { // On Apply, we prune so that we don't do outputs if we destroyed mod.prune() } - if len(mod.Resources) == 0 { + if len(mod.Resources) == 0 && len(conf.Resources) != 0 { mod.Outputs = nil return nil } @@ -550,7 +550,14 @@ func (c *walkContext) Walk() error { } } if vraw != nil { - outputs[o.Name] = vraw.(string) + if list, ok := vraw.([]interface{}); ok { + vraw = list[0] + } + if s, ok := vraw.(string); ok { + outputs[o.Name] = s + } else { + return fmt.Errorf("Type of output '%s' is not a string: %#v", o.Name, vraw) + } } } @@ -922,6 +929,13 @@ func (c *walkContext) planDestroyWalkFn() depgraph.WalkFunc { walkFn = func(n *depgraph.Noun) error { switch m := n.Meta.(type) { case *GraphNodeModule: + // Set the destroy bool on the module + md := result.Diff.ModuleByPath(m.Path) + if md == nil { + md = result.Diff.AddModule(m.Path) + } + md.Destroy = true + // Build another walkContext for this module and walk it. wc := c.Context.walkContext(c.Operation, m.Path) diff --git a/terraform/context_test.go b/terraform/context_test.go index 5ad260763..b19d7945c 100644 --- a/terraform/context_test.go +++ b/terraform/context_test.go @@ -913,6 +913,35 @@ func TestContextApply(t *testing.T) { } } +func TestContextApply_emptyModule(t *testing.T) { + m := testModule(t, "apply-empty-module") + p := testProvider("aws") + p.ApplyFn = testApplyFn + p.DiffFn = testDiffFn + ctx := testContext(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + }) + + if _, err := ctx.Plan(nil); err != nil { + t.Fatalf("err: %s", err) + } + + state, err := ctx.Apply() + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(state.String()) + actual = strings.Replace(actual, " ", "", -1) + expected := strings.TrimSpace(testTerraformApplyEmptyModuleStr) + if actual != expected { + t.Fatalf("bad: \n%s\nexpect:\n%s", actual, expected) + } +} + func TestContextApply_createBeforeDestroy(t *testing.T) { m := testModule(t, "apply-good-create-before") p := testProvider("aws") @@ -2416,6 +2445,55 @@ func TestContextApply_output(t *testing.T) { } } +func TestContextApply_outputInvalid(t *testing.T) { + m := testModule(t, "apply-output-invalid") + p := testProvider("aws") + p.ApplyFn = testApplyFn + p.DiffFn = testDiffFn + ctx := testContext(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + }) + + _, err := ctx.Plan(nil) + if err == nil { + t.Fatalf("err: %s", err) + } + if !strings.Contains(err.Error(), "is not a string") { + t.Fatalf("err: %s", err) + } +} + +func TestContextApply_outputList(t *testing.T) { + m := testModule(t, "apply-output-list") + p := testProvider("aws") + p.ApplyFn = testApplyFn + p.DiffFn = testDiffFn + ctx := testContext(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + }) + + if _, err := ctx.Plan(nil); err != nil { + t.Fatalf("err: %s", err) + } + + state, err := ctx.Apply() + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(testTerraformApplyOutputListStr) + if actual != expected { + t.Fatalf("bad: \n%s", actual) + } +} + func TestContextApply_outputMulti(t *testing.T) { m := testModule(t, "apply-output-multi") p := testProvider("aws") @@ -2656,7 +2734,7 @@ func TestContextApply_createBefore_depends(t *testing.T) { // Test that things were managed _in the right order_ order := h.States diffs := h.Diffs - if order[0].ID != "bar" || diffs[0].Destroy { + if order[0].ID != "" || diffs[0].Destroy { t.Fatalf("should create new instance first: %#v", order) } @@ -2669,6 +2747,93 @@ func TestContextApply_createBefore_depends(t *testing.T) { } } +func TestContextApply_singleDestroy(t *testing.T) { + m := testModule(t, "apply-depends-create-before") + h := new(HookRecordApplyOrder) + p := testProvider("aws") + + invokeCount := 0 + p.ApplyFn = func(info *InstanceInfo, s *InstanceState, d *InstanceDiff) (*InstanceState, error) { + invokeCount++ + switch invokeCount { + case 1: + if d.Destroy { + t.Fatalf("should not destroy") + } + if s.ID != "" { + t.Fatalf("should not have ID") + } + case 2: + if d.Destroy { + t.Fatalf("should not destroy") + } + if s.ID != "baz" { + t.Fatalf("should have id") + } + case 3: + if !d.Destroy { + t.Fatalf("should destroy") + } + if s.ID == "" { + t.Fatalf("should have ID") + } + default: + t.Fatalf("bad invoke count %d", invokeCount) + } + return testApplyFn(info, s, d) + } + p.DiffFn = testDiffFn + state := &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "aws_instance.web": &ResourceState{ + Type: "aws_instance", + Primary: &InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "require_new": "ami-old", + }, + }, + }, + "aws_instance.lb": &ResourceState{ + Type: "aws_instance", + Primary: &InstanceState{ + ID: "baz", + Attributes: map[string]string{ + "instance": "bar", + }, + }, + }, + }, + }, + }, + } + ctx := testContext(t, &ContextOpts{ + Module: m, + Hooks: []Hook{h}, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + State: state, + }) + + if _, err := ctx.Plan(nil); err != nil { + t.Fatalf("err: %s", err) + } + + h.Active = true + state, err := ctx.Apply() + if err != nil { + t.Fatalf("err: %s", err) + } + + if invokeCount != 3 { + t.Fatalf("bad: %d", invokeCount) + } +} + func TestContextPlan(t *testing.T) { m := testModule(t, "plan-good") p := testProvider("aws") diff --git a/terraform/diff.go b/terraform/diff.go index 3770a1d47..c5e821cdf 100644 --- a/terraform/diff.go +++ b/terraform/diff.go @@ -114,6 +114,7 @@ func (d *Diff) init() { type ModuleDiff struct { Path []string Resources map[string]*InstanceDiff + Destroy bool // Set only by the destroy plan } func (d *ModuleDiff) init() { @@ -192,6 +193,10 @@ func (d *ModuleDiff) IsRoot() bool { func (d *ModuleDiff) String() string { var buf bytes.Buffer + if d.Destroy { + buf.WriteString("DESTROY MODULE\n") + } + names := make([]string, 0, len(d.Resources)) for name, _ := range d.Resources { names = append(names, name) diff --git a/terraform/diff_test.go b/terraform/diff_test.go index dcee2b335..47e78c5e8 100644 --- a/terraform/diff_test.go +++ b/terraform/diff_test.go @@ -97,7 +97,7 @@ func TestModuleDiff_ChangeType(t *testing.T) { for i, tc := range cases { actual := tc.Diff.ChangeType() if actual != tc.Result { - t.Fatalf("%d: %s", i, actual) + t.Fatalf("%d: %#v", i, actual) } } } @@ -232,7 +232,7 @@ func TestInstanceDiff_ChangeType(t *testing.T) { for i, tc := range cases { actual := tc.Diff.ChangeType() if actual != tc.Result { - t.Fatalf("%d: %s", i, actual) + t.Fatalf("%d: %#v", i, actual) } } } diff --git a/terraform/graph.go b/terraform/graph.go index 0e561c882..1adcfbf3a 100644 --- a/terraform/graph.go +++ b/terraform/graph.go @@ -80,6 +80,8 @@ type GraphNodeModule struct { Config *config.Module Path []string Graph *depgraph.Graph + State *ModuleState + Flags ResourceFlag } // GraphNodeResource is a node type in the graph that represents a resource @@ -246,6 +248,9 @@ func Graph(opts *GraphOpts) (*depgraph.Graph, error) { // Add the orphan dependencies graphAddOrphanDeps(g, modState) + // Add the orphan module dependencies + graphAddOrphanModuleDeps(g, modState) + // Add the provider dependencies graphAddResourceProviderDeps(g) @@ -269,7 +274,7 @@ func Graph(opts *GraphOpts) (*depgraph.Graph, error) { // If we have a diff, then make sure to add that in if modDiff != nil { - if err := graphAddDiff(g, modDiff); err != nil { + if err := graphAddDiff(g, opts.Diff, modDiff); err != nil { return nil, err } } @@ -298,45 +303,70 @@ func Graph(opts *GraphOpts) (*depgraph.Graph, error) { // allows orphaned resources to be destroyed in the proper order. func graphEncodeDependencies(g *depgraph.Graph) { for _, n := range g.Nouns { - // Ignore any non-resource nodes - rn, ok := n.Meta.(*GraphNodeResource) - if !ok { - continue - } - r := rn.Resource + switch rn := n.Meta.(type) { + case *GraphNodeResource: + // If we are using create-before-destroy, there + // are some special depedencies injected on the + // deposed node that would cause a circular depedency + // chain if persisted. We must only handle the new node, + // node the deposed node. + r := rn.Resource + if r.Flags&FlagDeposed != 0 { + continue + } - // If we are using create-before-destroy, there - // are some special depedencies injected on the - // deposed node that would cause a circular depedency - // chain if persisted. We must only handle the new node, - // node the deposed node. - if r.Flags&FlagDeposed != 0 { - continue - } + // Update the dependencies + var inject []string + for _, dep := range n.Deps { + switch target := dep.Target.Meta.(type) { + case *GraphNodeModule: + inject = append(inject, dep.Target.Name) - // Update the dependencies - var inject []string - for _, dep := range n.Deps { - switch target := dep.Target.Meta.(type) { - case *GraphNodeModule: - inject = append(inject, dep.Target.Name) + case *GraphNodeResource: + if target.Resource.Id == r.Id { + continue + } + inject = append(inject, target.Resource.Id) - case *GraphNodeResource: - if target.Resource.Id == r.Id { - continue + case *GraphNodeResourceProvider: + // Do nothing + + default: + panic(fmt.Sprintf("Unknown graph node: %#v", dep.Target)) } - inject = append(inject, target.Resource.Id) + } - case *GraphNodeResourceProvider: - // Do nothing + // Update the dependencies + r.Dependencies = inject - default: - panic(fmt.Sprintf("Unknown graph node: %#v", dep.Target)) + case *GraphNodeModule: + // Update the dependencies + var inject []string + for _, dep := range n.Deps { + switch target := dep.Target.Meta.(type) { + case *GraphNodeModule: + if dep.Target.Name == n.Name { + continue + } + inject = append(inject, dep.Target.Name) + + case *GraphNodeResource: + inject = append(inject, target.Resource.Id) + + case *GraphNodeResourceProvider: + // Do nothing + + default: + panic(fmt.Sprintf("Unknown graph node: %#v", dep.Target)) + } + + } + + // Update the dependencies + if rn.State != nil { + rn.State.Dependencies = inject } } - - // Update the dependencies - r.Dependencies = inject } } @@ -357,6 +387,14 @@ func graphAddConfigModules( if n, err := graphModuleNoun(m.Name, m, g, opts); err != nil { return err } else { + // Attach the module state if any + if opts.State != nil { + module := n.Meta.(*GraphNodeModule) + module.State = opts.State.ModuleByPath(module.Path) + if module.State == nil { + module.State = opts.State.AddModule(module.Path) + } + } nounsList = append(nounsList, n) } } @@ -506,10 +544,22 @@ func graphAddConfigResources( // destroying the VPC's subnets first, whereas creating a VPC requires // doing it before the subnets are created. This function handles inserting // these nodes for you. -func graphAddDiff(g *depgraph.Graph, d *ModuleDiff) error { +func graphAddDiff(g *depgraph.Graph, gDiff *Diff, d *ModuleDiff) error { var nlist []*depgraph.Noun + var modules []*depgraph.Noun injected := make(map[*depgraph.Dependency]struct{}) for _, n := range g.Nouns { + // A module is being destroyed if all it's resources are being + // destroyed (via a destroy plan) or if it is orphaned. Only in + // those cases do we need to handle depedency inversion. + if mod, ok := n.Meta.(*GraphNodeModule); ok { + md := gDiff.ModuleByPath(mod.Path) + if mod.Flags&FlagOrphan != 0 || (md != nil && md.Destroy) { + modules = append(modules, n) + } + continue + } + rn, ok := n.Meta.(*GraphNodeResource) if !ok { continue @@ -539,6 +589,8 @@ func graphAddDiff(g *depgraph.Graph, d *ModuleDiff) error { rn.Diff = d } + // If we are not expanding, then we assign the + // instance diff to the resource. var rd *InstanceDiff if rn.ExpandMode == ResourceExpandNone { rd = diffs[0] @@ -653,78 +705,81 @@ func graphAddDiff(g *depgraph.Graph, d *ModuleDiff) error { rn.Resource.Diff = rd } - // Go through each noun and make sure we calculate all the dependencies - // properly. - for _, n := range nlist { - deps := n.Deps - num := len(deps) - for i := 0; i < num; i++ { - dep := deps[i] + // Go through each resource and module and make sure we + // calculate all the dependencies properly. + invertDeps := [][]*depgraph.Noun{nlist, modules} + for _, list := range invertDeps { + for _, n := range list { + deps := n.Deps + num := len(deps) + for i := 0; i < num; i++ { + dep := deps[i] - // Check if this dependency was just injected, otherwise - // we will incorrectly flip the depedency twice. - if _, ok := injected[dep]; ok { - continue - } + // Check if this dependency was just injected, otherwise + // we will incorrectly flip the depedency twice. + if _, ok := injected[dep]; ok { + continue + } - switch target := dep.Target.Meta.(type) { - case *GraphNodeResource: - // If the other node is also being deleted, - // we must be deleted first. E.g. if A -> B, - // then when we create, B is created first then A. - // On teardown, A is destroyed first, then B. - // Thus we must flip our depedency and instead inject - // it on B. - for _, n2 := range nlist { - rn2 := n2.Meta.(*GraphNodeResource) - if target.Resource.Id == rn2.Resource.Id { - newDep := &depgraph.Dependency{ - Name: n.Name, - Source: n2, - Target: n, + switch target := dep.Target.Meta.(type) { + case *GraphNodeResource: + // If the other node is also being deleted, + // we must be deleted first. E.g. if A -> B, + // then when we create, B is created first then A. + // On teardown, A is destroyed first, then B. + // Thus we must flip our depedency and instead inject + // it on B. + for _, n2 := range nlist { + rn2 := n2.Meta.(*GraphNodeResource) + if target.Resource.Id == rn2.Resource.Id { + newDep := &depgraph.Dependency{ + Name: n.Name, + Source: n2, + Target: n, + } + injected[newDep] = struct{}{} + n2.Deps = append(n2.Deps, newDep) + break } - injected[newDep] = struct{}{} - n2.Deps = append(n2.Deps, newDep) - break } + + // Drop the dependency. We may have created + // an inverse depedency if the dependent resource + // is also being deleted, but this dependence is + // no longer required. + deps[i], deps[num-1] = deps[num-1], nil + num-- + i-- + + case *GraphNodeModule: + // We invert any module dependencies so we're destroyed + // first, before any modules are applied. + newDep := &depgraph.Dependency{ + Name: n.Name, + Source: dep.Target, + Target: n, + } + dep.Target.Deps = append(dep.Target.Deps, newDep) + + // Drop the dependency. We may have created + // an inverse depedency if the dependent resource + // is also being deleted, but this dependence is + // no longer required. + deps[i], deps[num-1] = deps[num-1], nil + num-- + i-- + case *GraphNodeResourceProvider: + // Keep these around, but fix up the source to be ourselves + // rather than the old node. + newDep := *dep + newDep.Source = n + deps[i] = &newDep + default: + panic(fmt.Errorf("Unhandled depedency type: %#v", dep.Target.Meta)) } - - // Drop the dependency. We may have created - // an inverse depedency if the dependent resource - // is also being deleted, but this dependence is - // no longer required. - deps[i], deps[num-1] = deps[num-1], nil - num-- - i-- - - case *GraphNodeModule: - // We invert any module dependencies so we're destroyed - // first, before any modules are applied. - newDep := &depgraph.Dependency{ - Name: n.Name, - Source: dep.Target, - Target: n, - } - dep.Target.Deps = append(dep.Target.Deps, newDep) - - // Drop the dependency. We may have created - // an inverse depedency if the dependent resource - // is also being deleted, but this dependence is - // no longer required. - deps[i], deps[num-1] = deps[num-1], nil - num-- - i-- - case *GraphNodeResourceProvider: - // Keep these around, but fix up the source to be ourselves - // rather than the old node. - newDep := *dep - newDep.Source = n - deps[i] = &newDep - default: - panic(fmt.Errorf("Unhandled depedency type: %#v", dep.Target.Meta)) } + n.Deps = deps[:num] } - n.Deps = deps[:num] } // Add the nouns to the graph @@ -855,6 +910,10 @@ func graphAddModuleOrphans( if n, err := graphModuleNoun(k, nil, g, opts); err != nil { return err } else { + // Mark this module as being an orphan + module := n.Meta.(*GraphNodeModule) + module.Flags |= FlagOrphan + module.State = m nounsList = append(nounsList, n) } } @@ -914,6 +973,56 @@ func graphAddOrphanDeps(g *depgraph.Graph, mod *ModuleState) { } } +// graphAddOrphanModuleDeps adds the dependencies to the orphan +// modules based on their explicit Dependencies state. +func graphAddOrphanModuleDeps(g *depgraph.Graph, mod *ModuleState) { + for _, n := range g.Nouns { + module, ok := n.Meta.(*GraphNodeModule) + if !ok { + continue + } + if module.Flags&FlagOrphan == 0 { + continue + } + + // If we have no dependencies, then just continue + if len(module.State.Dependencies) == 0 { + continue + } + + for _, n2 := range g.Nouns { + // Don't ever depend on ourselves + if n2.Meta == n.Meta { + continue + } + + var compareName string + switch rn2 := n2.Meta.(type) { + case *GraphNodeModule: + compareName = n2.Name + case *GraphNodeResource: + compareName = rn2.Resource.Id + } + if compareName == "" { + continue + } + + for _, depName := range module.State.Dependencies { + if !strings.HasPrefix(depName, compareName) { + continue + } + dep := &depgraph.Dependency{ + Name: depName, + Source: n, + Target: n2, + } + n.Deps = append(n.Deps, dep) + break + } + } + } +} + // graphAddOrphans adds the orphans to the graph. func graphAddOrphans(g *depgraph.Graph, c *config.Config, mod *ModuleState) { meta := g.Meta.(*GraphMeta) @@ -1648,32 +1757,30 @@ func (n *GraphNodeResource) Expand() (*depgraph.Graph, error) { ModulePath: n.Resource.Info.ModulePath, } - // Determine the nodes to create. If we're just looking for the - // nodes to create, return that. - n.expand(g, count) - - // Add in the diff if we have it - if n.Diff != nil { - if err := graphAddDiff(g, n.Diff); err != nil { - return nil, err - } - } + // Do the initial expansion of the nodes, attaching diffs if + // applicable + n.expand(g, count, n.Diff) // Add all the variable dependencies graphAddVariableDeps(g) - // If we're just expanding the apply, then filter those out and - // return them now. - if n.ExpandMode == ResourceExpandApply { - return n.finalizeGraph(g, false) + // Filter the nodes depending on the expansion type + switch n.ExpandMode { + case ResourceExpandApply: + n.filterResources(g, false) + case ResourceExpandDestroy: + n.filterResources(g, true) + default: + panic(fmt.Sprintf("Unhandled expansion mode %d", n.ExpandMode)) } - return n.finalizeGraph(g, true) + // Return the finalized graph + return g, n.finalizeGraph(g) } // expand expands this resource and adds the resources to the graph. It // adds both create and destroy resources. -func (n *GraphNodeResource) expand(g *depgraph.Graph, count int) { +func (n *GraphNodeResource) expand(g *depgraph.Graph, count int, diff *ModuleDiff) { // Create the list of nouns result := make([]*depgraph.Noun, 0, count) @@ -1727,13 +1834,70 @@ func (n *GraphNodeResource) expand(g *depgraph.Graph, count int) { } } + // Add in the diff if we have it + var inDiff *InstanceDiff + if diff != nil { + // Looup the instance diff + if d, ok := diff.Resources[name]; ok { + inDiff = d + } + + if inDiff == nil { + if count == 1 { + // If the count is one, check the state for ".0" + // appended, which might exist if we go from + // count > 1 to count == 1. + k := r.Id() + ".0" + inDiff = diff.Resources[k] + } else if i == 0 { + // If count is greater than one, check for state + // with just the ID, which might exist if we go + // from count == 1 to count > 1 + inDiff = diff.Resources[r.Id()] + } + } + } + + // Initialize a default state if not available if state == nil { state = &ResourceState{ Type: r.Type, } } - flags := FlagPrimary + // Prepare the diff if it exists + if inDiff != nil { + switch n.ExpandMode { + case ResourceExpandApply: + // Disable Destroy if we aren't doing a destroy expansion. + // There is a seperate expansion for the destruction action. + d := new(InstanceDiff) + *d = *inDiff + inDiff = d + inDiff.Destroy = false + + // If we require a new resource, there is a seperate delete + // phase, so the create phase must not have access to the ID. + if inDiff.RequiresNew() { + s := new(ResourceState) + *s = *state + state = s + state.Primary = nil + } + + case ResourceExpandDestroy: + // If we are doing a destroy, make sure it is exclusively + // a destroy, since there is a seperate expansion for the apply + inDiff = new(InstanceDiff) + inDiff.Destroy = true + + default: + panic(fmt.Sprintf("Unhandled expansion mode %d", n.ExpandMode)) + } + } + + // Inherit the existing flags! + flags := n.Resource.Flags if len(state.Tainted) > 0 { flags |= FlagHasTainted } @@ -1743,6 +1907,7 @@ func (n *GraphNodeResource) expand(g *depgraph.Graph, count int) { resource.CountIndex = i resource.State = state.Primary resource.Flags = flags + resource.Diff = inDiff // Add the result result = append(result, &depgraph.Noun{ @@ -1763,6 +1928,7 @@ func (n *GraphNodeResource) expand(g *depgraph.Graph, count int) { resource.Config = NewResourceConfig(nil) resource.State = rs.Primary resource.Flags = FlagOrphan + resource.Diff = &InstanceDiff{Destroy: true} noun := &depgraph.Noun{ Name: k, @@ -1790,8 +1956,11 @@ func (n *GraphNodeResource) copyResource(id string) *Resource { return &resource } -func (n *GraphNodeResource) finalizeGraph( - g *depgraph.Graph, destroy bool) (*depgraph.Graph, error) { +// filterResources is used to remove resources from the sub-graph based +// on the ExpandMode. This is because there is a Destroy sub-graph, and +// Apply sub-graph, and we cannot includes the same instances in both +// sub-graphs. +func (n *GraphNodeResource) filterResources(g *depgraph.Graph, destroy bool) { result := make([]*depgraph.Noun, 0, len(g.Nouns)) for _, n := range g.Nouns { rn, ok := n.Meta.(*GraphNodeResource) @@ -1799,44 +1968,23 @@ func (n *GraphNodeResource) finalizeGraph( continue } - // If the diff is nil, then we're not destroying, so append only - // in that case. - if rn.Resource.Diff == nil { - if !destroy { + if destroy { + if rn.Resource.Diff != nil && rn.Resource.Diff.Destroy { result = append(result, n) } - continue } - // If we are destroying, append it only if we care about destroys - if rn.Resource.Diff.Destroy { - if destroy { - result = append(result, n) - } - - continue - } - - // If this is an oprhan, we only care about it if we're destroying. - if rn.Resource.Flags&FlagOrphan != 0 { - if destroy { - result = append(result, n) - } - - continue - } - - // If we're not destroying, then add it only if we don't - // care about deploys. - if !destroy { + if rn.Resource.Flags&FlagOrphan != 0 || + rn.Resource.Diff == nil || !rn.Resource.Diff.Destroy { result = append(result, n) } } - - // Set the nouns to be only those we care about g.Nouns = result +} +// finalizeGraph is used to ensure the generated graph is valid +func (n *GraphNodeResource) finalizeGraph(g *depgraph.Graph) error { // Remove the dependencies that don't exist graphRemoveInvalidDeps(g) @@ -1845,10 +1993,9 @@ func (n *GraphNodeResource) finalizeGraph( // Validate if err := g.Validate(); err != nil { - return nil, err + return err } - - return g, nil + return nil } // matchingPrefixes takes a resource type and a set of resource diff --git a/terraform/graph_test.go b/terraform/graph_test.go index a9b8194e5..4f87e245b 100644 --- a/terraform/graph_test.go +++ b/terraform/graph_test.go @@ -731,6 +731,61 @@ func TestGraphAddDiff_module(t *testing.T) { } } +func TestGraphAddDiff_module_depends(t *testing.T) { + m := testModule(t, "graph-diff-module-dep") + diff := &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{ + Path: rootModulePath, + Resources: map[string]*InstanceDiff{ + "aws_instance.foo": &InstanceDiff{ + Destroy: true, + }, + }, + }, + &ModuleDiff{ + Path: []string{"root", "child"}, + Destroy: true, + Resources: map[string]*InstanceDiff{ + "aws_instance.foo": &InstanceDiff{ + Destroy: true, + }, + }, + }, + }, + } + state := &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: []string{"root", "orphan"}, + Resources: map[string]*ResourceState{ + "aws_instance.dead": &ResourceState{ + Type: "aws_instance", + Primary: &InstanceState{ + ID: "dead", + }, + }, + }, + Dependencies: []string{ + "aws_instance.foo", + "module.child", + }, + }, + }, + } + + g, err := Graph(&GraphOpts{Module: m, Diff: diff, State: state}) + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTerraformGraphDiffModuleDependsStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + func TestGraphAddDiff_createBeforeDestroy(t *testing.T) { m := testModule(t, "graph-diff-create-before") diff := &Diff{ @@ -909,7 +964,7 @@ func TestGraphEncodeDependencies_count(t *testing.T) { func TestGraphEncodeDependencies_module(t *testing.T) { m := testModule(t, "graph-modules") - g, err := Graph(&GraphOpts{Module: m}) + g, err := Graph(&GraphOpts{Module: m, State: &State{}}) if err != nil { t.Fatalf("err: %s", err) } @@ -928,6 +983,15 @@ func TestGraphEncodeDependencies_module(t *testing.T) { if web.Dependencies[1] != "module.consul" { t.Fatalf("bad: %#v", web) } + + mod := g.Noun("module.consul").Meta.(*GraphNodeModule) + deps := mod.State.Dependencies + if len(deps) != 1 { + t.Fatalf("Bad: %#v", deps) + } + if deps[0] != "aws_security_group.firewall" { + t.Fatalf("Bad: %#v", deps) + } } func TestGraph_orphan_dependencies(t *testing.T) { @@ -1012,6 +1076,57 @@ func TestGraph_orphanDependenciesModules(t *testing.T) { } } +func TestGraph_orphanModules_Dependencies(t *testing.T) { + m := testModule(t, "graph-modules") + state := &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + + Resources: map[string]*ResourceState{ + "aws_instance.foo": &ResourceState{ + Type: "aws_instance", + Primary: &InstanceState{ + ID: "foo", + }, + Dependencies: []string{ + "module.consul", + }, + }, + }, + }, + + // Add an orphan module + &ModuleState{ + Path: []string{"root", "orphan"}, + Resources: map[string]*ResourceState{ + "aws_instance.bar": &ResourceState{ + Type: "aws_instance", + Primary: &InstanceState{ + ID: "bar", + }, + }, + }, + Dependencies: []string{ + "aws_instance.foo", + "aws_instance.web", + }, + }, + }, + } + + g, err := Graph(&GraphOpts{Module: m, State: state}) + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(testTerraformGraphOrphanedModuleDepsStr) + if actual != expected { + t.Fatalf("bad:\n\nactual:\n%s\n\nexpected:\n%s", actual, expected) + } +} + func TestGraphNodeResourceExpand(t *testing.T) { m := testModule(t, "graph-resource-expand") @@ -1214,6 +1329,22 @@ root root -> module.child ` +const testTerraformGraphDiffModuleDependsStr = ` +root: root +aws_instance.foo + aws_instance.foo -> aws_instance.foo (destroy) +aws_instance.foo (destroy) + aws_instance.foo (destroy) -> module.child + aws_instance.foo (destroy) -> module.orphan +module.child + module.child -> module.orphan +module.orphan +root + root -> aws_instance.foo + root -> module.child + root -> module.orphan +` + const testTerraformGraphModulesStr = ` root: root aws_instance.web @@ -1382,6 +1513,33 @@ root root -> module.consul ` +const testTerraformGraphOrphanedModuleDepsStr = ` +root: root +aws_instance.foo + aws_instance.foo -> module.consul + aws_instance.foo -> provider.aws +aws_instance.web + aws_instance.web -> aws_security_group.firewall + aws_instance.web -> module.consul + aws_instance.web -> provider.aws +aws_security_group.firewall + aws_security_group.firewall -> provider.aws +module.consul + module.consul -> aws_security_group.firewall + module.consul -> provider.aws +module.orphan + module.orphan -> aws_instance.foo + module.orphan -> aws_instance.web + module.orphan -> provider.aws +provider.aws +root + root -> aws_instance.foo + root -> aws_instance.web + root -> aws_security_group.firewall + root -> module.consul + root -> module.orphan +` + const testTerraformGraphResourceExpandStr = ` root: root aws_instance.web.0 diff --git a/terraform/state.go b/terraform/state.go index f13ecf109..eb985c37a 100644 --- a/terraform/state.go +++ b/terraform/state.go @@ -185,6 +185,20 @@ type ModuleState struct { // N instances underneath, although a user only needs to think // about the 1:1 case. Resources map[string]*ResourceState `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` } // IsRoot says whether or not this module diff is for the root module. @@ -280,12 +294,12 @@ func (m *ModuleState) GoString() string { } func (m *ModuleState) String() string { - if len(m.Resources) == 0 { - return "" - } - var buf bytes.Buffer + if len(m.Resources) == 0 { + buf.WriteString("") + } + names := make([]string, 0, len(m.Resources)) for name, _ := range m.Resources { names = append(names, name) diff --git a/terraform/state_test.go b/terraform/state_test.go index 9ee251745..57e1308c1 100644 --- a/terraform/state_test.go +++ b/terraform/state_test.go @@ -180,6 +180,9 @@ func TestReadWriteState(t *testing.T) { Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, + Dependencies: []string{ + "aws_instance.bar", + }, Resources: map[string]*ResourceState{ "foo": &ResourceState{ Primary: &InstanceState{ diff --git a/terraform/terraform_test.go b/terraform/terraform_test.go index 543862950..6ebd2f53f 100644 --- a/terraform/terraform_test.go +++ b/terraform/terraform_test.go @@ -164,6 +164,21 @@ aws_instance.foo: type = aws_instance ` +const testTerraformApplyEmptyModuleStr = ` + +Outputs: + +end = XXXX + +module.child: + +Outputs: + +aws_access_key = YYYYY +aws_route53_zone_id = XXXX +aws_secret_key = ZZZZ +` + const testTerraformApplyDependsCreateBeforeStr = ` aws_instance.lb: ID = foo @@ -377,6 +392,29 @@ Outputs: foo_num = 2 ` +const testTerraformApplyOutputListStr = ` +aws_instance.bar.0: + ID = foo + foo = bar + type = aws_instance +aws_instance.bar.1: + ID = foo + foo = bar + type = aws_instance +aws_instance.bar.2: + ID = foo + foo = bar + type = aws_instance +aws_instance.foo: + ID = foo + num = 2 + type = aws_instance + +Outputs: + +foo_num = bar,bar,bar +` + const testTerraformApplyOutputMultiStr = ` aws_instance.bar.0: ID = foo @@ -739,6 +777,7 @@ DIFF: DESTROY: aws_instance.foo module.child: + DESTROY MODULE DESTROY: aws_instance.foo STATE: diff --git a/terraform/test-fixtures/apply-empty-module/child/main.tf b/terraform/test-fixtures/apply-empty-module/child/main.tf new file mode 100644 index 000000000..6db38ea16 --- /dev/null +++ b/terraform/test-fixtures/apply-empty-module/child/main.tf @@ -0,0 +1,11 @@ +output "aws_route53_zone_id" { + value = "XXXX" +} + +output "aws_access_key" { + value = "YYYYY" +} + +output "aws_secret_key" { + value = "ZZZZ" +} diff --git a/terraform/test-fixtures/apply-empty-module/main.tf b/terraform/test-fixtures/apply-empty-module/main.tf new file mode 100644 index 000000000..50ce84f0b --- /dev/null +++ b/terraform/test-fixtures/apply-empty-module/main.tf @@ -0,0 +1,7 @@ +module "child" { + source = "./child" +} + +output "end" { + value = "${module.child.aws_route53_zone_id}" +} diff --git a/terraform/test-fixtures/apply-output-invalid/main.tf b/terraform/test-fixtures/apply-output-invalid/main.tf new file mode 100644 index 000000000..ee7a9048a --- /dev/null +++ b/terraform/test-fixtures/apply-output-invalid/main.tf @@ -0,0 +1,12 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = "bar" + count = 3 +} + +output "foo_num" { + value = 42 +} diff --git a/terraform/test-fixtures/apply-output-list/main.tf b/terraform/test-fixtures/apply-output-list/main.tf new file mode 100644 index 000000000..11b8107df --- /dev/null +++ b/terraform/test-fixtures/apply-output-list/main.tf @@ -0,0 +1,12 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = "bar" + count = 3 +} + +output "foo_num" { + value = ["${join(",", aws_instance.bar.*.foo)}"] +} diff --git a/terraform/test-fixtures/graph-diff-module-dep/child/main.tf b/terraform/test-fixtures/graph-diff-module-dep/child/main.tf new file mode 100644 index 000000000..84d1de905 --- /dev/null +++ b/terraform/test-fixtures/graph-diff-module-dep/child/main.tf @@ -0,0 +1,5 @@ +resource "aws_instance" "foo" {} + +output "bar" { + value = "baz" +} diff --git a/terraform/test-fixtures/graph-diff-module-dep/main.tf b/terraform/test-fixtures/graph-diff-module-dep/main.tf new file mode 100644 index 000000000..2f61386b2 --- /dev/null +++ b/terraform/test-fixtures/graph-diff-module-dep/main.tf @@ -0,0 +1,8 @@ +resource "aws_instance" "foo" {} + +module "child" { + source = "./child" + in = "${aws_instance.foo.id}" +} + + diff --git a/website/Gemfile.lock b/website/Gemfile.lock index 4f883cde3..f30c41661 100644 --- a/website/Gemfile.lock +++ b/website/Gemfile.lock @@ -1,6 +1,6 @@ GIT remote: git://github.com/hashicorp/middleman-hashicorp.git - revision: 66e68bbb66eef195542bfb334bf0942df6da2c6e + revision: b82c2c2fdc244cd0bd529ff27cfab24e43f07708 specs: middleman-hashicorp (0.1.0) bootstrap-sass (~> 3.2) @@ -11,6 +11,8 @@ GIT middleman-minify-html (~> 3.4) middleman-syntax (~> 2.0) rack-contrib (~> 1.1) + rack-rewrite (~> 1.5) + rack-ssl-enforcer (~> 0.2) redcarpet (~> 3.1) therubyracer (~> 0.12) thin (~> 1.6) @@ -18,18 +20,18 @@ GIT GEM remote: https://rubygems.org/ specs: - activesupport (4.1.6) + activesupport (4.1.8) i18n (~> 0.6, >= 0.6.9) json (~> 1.7, >= 1.7.7) minitest (~> 5.1) thread_safe (~> 0.1) tzinfo (~> 1.1) - bootstrap-sass (3.2.0.2) + bootstrap-sass (3.3.1.0) sass (~> 3.2) builder (3.2.2) celluloid (0.16.0) timers (~> 4.0.0) - chunky_png (1.3.2) + chunky_png (1.3.3) coffee-script (2.3.0) coffee-script-source execjs @@ -65,26 +67,26 @@ GEM http_parser.rb (0.6.0) i18n (0.6.11) json (1.8.1) - kramdown (1.4.2) + kramdown (1.5.0) less (2.6.0) commonjs (~> 0.2.7) libv8 (3.16.14.7) - listen (2.7.11) + listen (2.8.0) celluloid (>= 0.15.2) rb-fsevent (>= 0.9.3) rb-inotify (>= 0.9) - middleman (3.3.6) + middleman (3.3.7) coffee-script (~> 2.2) compass (>= 1.0.0, < 2.0.0) compass-import-once (= 1.0.5) execjs (~> 2.0) haml (>= 4.0.5) kramdown (~> 1.2) - middleman-core (= 3.3.6) + middleman-core (= 3.3.7) middleman-sprockets (>= 3.1.2) sass (>= 3.4.0, < 4.0) uglifier (~> 2.5) - middleman-core (3.3.6) + middleman-core (3.3.7) activesupport (~> 4.1.0) bundler (~> 1.1) erubis @@ -111,7 +113,7 @@ GEM middleman-syntax (2.0.0) middleman-core (~> 3.2) rouge (~> 1.0) - minitest (5.4.2) + minitest (5.4.3) multi_json (1.10.1) padrino-helpers (0.12.4) i18n (~> 0.6, >= 0.6.7) @@ -120,10 +122,12 @@ GEM padrino-support (0.12.4) activesupport (>= 3.1) rack (1.5.2) - rack-contrib (1.1.0) + rack-contrib (1.2.0) rack (>= 0.9.1) rack-livereload (0.3.15) rack + rack-rewrite (1.5.0) + rack-ssl-enforcer (0.2.8) rack-test (0.6.2) rack (>= 1.0) rb-fsevent (0.9.4) @@ -131,9 +135,9 @@ GEM ffi (>= 0.5.0) redcarpet (3.2.0) ref (1.0.5) - rouge (1.7.2) - sass (3.4.6) - sprockets (2.12.2) + rouge (1.7.3) + sass (3.4.8) + sprockets (2.12.3) hike (~> 1.2) multi_json (~> 1.0) rack (~> 1.0) @@ -157,7 +161,7 @@ GEM hitimes tzinfo (1.2.2) thread_safe (~> 0.1) - uber (0.0.9) + uber (0.0.11) uglifier (2.5.3) execjs (>= 0.3.0) json (>= 1.8.0) diff --git a/website/source/docs/commands/graph.html.markdown b/website/source/docs/commands/graph.html.markdown index d01c6d769..e0e17b9a6 100644 --- a/website/source/docs/commands/graph.html.markdown +++ b/website/source/docs/commands/graph.html.markdown @@ -37,5 +37,5 @@ Alternatively, the web-based [GraphViz Workspace](http://graphviz-dev.appspot.co can be used to quickly render DOT file inputs as well. Here is an example graph output: -![Graph Example](/images/graph-example.png) +![Graph Example](graph-example.png) diff --git a/website/source/docs/commands/output.html.markdown b/website/source/docs/commands/output.html.markdown index 0ac423937..ac1ab5a23 100644 --- a/website/source/docs/commands/output.html.markdown +++ b/website/source/docs/commands/output.html.markdown @@ -15,7 +15,7 @@ an output variable from the state file. Usage: `terraform output [options] NAME` -By default, `plan` requires only a variable name and looks in the +By default, `output` requires only a variable name and looks in the current directory for the state file to query. The command-line flags are all optional. The list of available flags are: diff --git a/website/source/docs/configuration/interpolation.html.md b/website/source/docs/configuration/interpolation.html.md index c0fe3372f..326d155fb 100644 --- a/website/source/docs/configuration/interpolation.html.md +++ b/website/source/docs/configuration/interpolation.html.md @@ -74,3 +74,10 @@ The supported built-in functions are: * `lookup(map, key)` - Performs a dynamic lookup into a mapping variable. + + * `element(list, index)` - Returns a single element from a list + at the given index. If the index is greater than the number of + elements, this function will wrap using a standard mod algorithm. + A list is only possible with splat variables from resources with + a count greater than one. + Example: `element(aws_subnet.foo.*.id, count.index)` diff --git a/website/source/docs/modules/usage.html.markdown b/website/source/docs/modules/usage.html.markdown index fb379cc83..8b671163a 100644 --- a/website/source/docs/modules/usage.html.markdown +++ b/website/source/docs/modules/usage.html.markdown @@ -92,13 +92,13 @@ For example, with a configuration similar to what we've built above, here is what the graph output looks like by default:
-![Terraform Module Graph](images/docs/module_graph.png) +![Terraform Module Graph](docs/module_graph.png)
But if we set `-module-depth=-1`, the graph will look like this:
-![Terraform Expanded Module Graph](images/docs/module_graph_expand.png) +![Terraform Expanded Module Graph](docs/module_graph_expand.png)
Other commands work similarly with modules. Note that the `-module-depth` diff --git a/website/source/docs/providers/aws/r/db_instance.html.markdown b/website/source/docs/providers/aws/r/db_instance.html.markdown index d0e96c952..167c8823b 100644 --- a/website/source/docs/providers/aws/r/db_instance.html.markdown +++ b/website/source/docs/providers/aws/r/db_instance.html.markdown @@ -24,6 +24,7 @@ resource "aws_db_instance" "default" { password = "bar" security_group_names = ["${aws_db_security_group.bar.name}"] db_subnet_group_name = "my_database_subnet_group" + parameter_group_name = "default.mysql5.6" } ``` @@ -53,6 +54,7 @@ The following arguments are supported: * `skip_final_snapshot` - (Optional) Enables skipping the final snapshot on deletion. * `security_group_names` - (Optional) List of DB Security Groups to associate. * `db_subnet_group_name` - (Optional) Name of DB subnet group +* `parameter_group_name` - (Optional) Name of the DB parameter group to associate. ## Attributes Reference diff --git a/website/source/docs/providers/aws/r/db_parameter_group.html.markdown b/website/source/docs/providers/aws/r/db_parameter_group.html.markdown new file mode 100644 index 000000000..855665f66 --- /dev/null +++ b/website/source/docs/providers/aws/r/db_parameter_group.html.markdown @@ -0,0 +1,49 @@ +--- +layout: "aws" +page_title: "AWS: aws_db_parameter_group" +sidebar_current: "docs-aws-resource-db-parameter-group" +--- + +# aws\_db\_parameter\_group + +Provides an RDS DB parameter group resource. + +## Example Usage + +``` +resource "aws_db_parameter_group" "default" { + name = "rds_pg" + family = "mysql5.6" + description = "RDS default parameter group" + + parameter { + name = "character_set_server" + value = "utf8" + } + + parameter { + name = "character_set_client" + value = "utf8" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the DB parameter group. +* `family` - (Required) The family of the DB parameter group. +* `description` - (Required) The description of the DB parameter group. +* `parameter` - (Optional) A list of DB parameters to apply. + +Parameter blocks support the following: + +* `name` - (Required) The name of the DB parameter. +* `value` - (Required) The value of the DB parameter. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The db parameter group name. diff --git a/website/source/docs/providers/aws/r/instance.html.markdown b/website/source/docs/providers/aws/r/instance.html.markdown index d7eeef904..8ad171d8e 100644 --- a/website/source/docs/providers/aws/r/instance.html.markdown +++ b/website/source/docs/providers/aws/r/instance.html.markdown @@ -47,6 +47,16 @@ The following arguments are supported: * `iam_instance_profile` - (Optional) The IAM Instance Profile to launch the instance with. * `tags` - (Optional) A mapping of tags to assign to the resource. +* `block_device` - (Optional) A list of block devices to add. Their keys are documented below. + +Each `block_device` supports the following: + +* `device_name` - The name of the device to mount. +* `snapshot_id` - (Optional) The Snapshot ID to mount. +* `volume_type` - (Optional) The type of volume. Can be standard, gp2, or io1. Defaults to standard. +* `volume_size` - (Optional) The size of the volume in gigabytes. +* `delete_on_termination` - (Optional) Should the volume be destroyed on instance termination (defaults true). +* `encrypted` - (Optional) Should encryption be enabled (defaults false). ## Attributes Reference diff --git a/website/source/docs/providers/aws/r/subnet.html.markdown b/website/source/docs/providers/aws/r/subnet.html.markdown index 7ce52ba99..3737ed380 100644 --- a/website/source/docs/providers/aws/r/subnet.html.markdown +++ b/website/source/docs/providers/aws/r/subnet.html.markdown @@ -16,6 +16,10 @@ Provides an VPC subnet resource. resource "aws_subnet" "main" { vpc_id = "${aws_vpc.main.id}" cidr_block = "10.0.1.0/24" + + tags { + Name = "Main" + } } ``` @@ -29,6 +33,7 @@ The following arguments are supported: that instances launched into the subnet should be assigned a public IP address. * `vpc_id` - (Required) The VPC ID. +* `tags` - (Optional) A mapping of tags to assign to the resource. ## Attributes Reference diff --git a/website/source/intro/examples/consul.html.markdown b/website/source/intro/examples/consul.html.markdown index 0f00f1b25..56e708603 100644 --- a/website/source/intro/examples/consul.html.markdown +++ b/website/source/intro/examples/consul.html.markdown @@ -51,7 +51,7 @@ infrastructure to be decoupled from its overall architecture. This enables details to be changed without updating the Terraform configuration. Outputs from Terraform can also be easily stored in Consul. One powerful -features this enables is using Consul for inventory management. If an +feature this enables is using Consul for inventory management. If an application relies on ELB for routing, Terraform can update the application's configuration directly by setting the ELB address into Consul. Any resource attribute can be stored in Consul, allowing an operator to capture anything diff --git a/website/source/intro/getting-started/modules.html.md b/website/source/intro/getting-started/modules.html.md index d7008dd12..d774ae0f6 100644 --- a/website/source/intro/getting-started/modules.html.md +++ b/website/source/intro/getting-started/modules.html.md @@ -62,6 +62,20 @@ of sources including Git, Mercurial, HTTP, and file paths. The other configurations are parameters to our module. Please fill them in with the proper values. +Prior to running any command such as `plan` with a configuration that +uses modules, you'll have to [get](/docs/commands/get.html) the modules. +This is done using the [get command](/docs/commands/get.html). + +``` +$ terraform get +... +``` + +This command will download the modules if they haven't been already. +By default, the command will not check for updates, so it is safe (and fast) +to run multiple times. You can use the `-u` flag to check and download +updates. + ## Planning and Apply Modules With the modules downloaded, we can now plan and apply it. If you run diff --git a/website/source/intro/getting-started/variables.html.md b/website/source/intro/getting-started/variables.html.md index e0c385509..e1f779c8b 100644 --- a/website/source/intro/getting-started/variables.html.md +++ b/website/source/intro/getting-started/variables.html.md @@ -56,7 +56,7 @@ the AWS provider with the given variables. There are three ways to assign variables. First, if you execute `terraform plan` or apply without doing -anythiing, Terraform will ask you to input the variables interactively. +anything, Terraform will ask you to input the variables interactively. These variables are not saved, but provides a nice user experience for getting started with Terraform. diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 2fe57f0d1..50bf07ffb 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -29,6 +29,10 @@ aws_db_subnet_group + > + aws_db_parameter_group + + > aws_eip