Merge branch 'master' of github.com:hashicorp/terraform

This commit is contained in:
JT 2014-07-22 21:28:49 -07:00
commit 1fc73c2ad3
92 changed files with 5100 additions and 878 deletions

2
.gitignore vendored
View File

@ -4,4 +4,6 @@ example.tf
terraform.tfplan terraform.tfplan
terraform.tfstate terraform.tfstate
bin/ bin/
config/y.go
config/y.output
vendor/ vendor/

47
Godeps Normal file
View File

@ -0,0 +1,47 @@
{
"ImportPath": "github.com/hashicorp/terraform",
"GoVersion": "go1.3",
"Deps": [
{
"ImportPath": "code.google.com/p/go.crypto/ssh/terminal",
"Comment": "null-213",
"Rev": "aa2644fe4aa50e3b38d75187b4799b1f0c9ddcef"
},
{
"ImportPath": "github.com/mitchellh/cli",
"Rev": "eaf0e415fc517a431dca53c7b2e7559d42238ebe"
},
{
"ImportPath": "github.com/mitchellh/colorstring",
"Rev": "9797cb57e314a5a2c4d735ff060d5c6dcba57749"
},
{
"ImportPath": "github.com/mitchellh/copystructure",
"Rev": "d8968bce41ab42f33aeb143afa95e331cb1ba886"
},
{
"ImportPath": "github.com/mitchellh/go-libucl",
"Rev": "8da0f346ecb82446ebceafd6e8e9d9be4d549745"
},
{
"ImportPath": "github.com/mitchellh/mapstructure",
"Rev": "740c764bc6149d3f1806231418adb9f52c11bcbf"
},
{
"ImportPath": "github.com/mitchellh/osext",
"Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702"
},
{
"ImportPath": "github.com/mitchellh/panicwrap",
"Rev": "45cbfd3bae250c7676c077fb275be1a2968e066a"
},
{
"ImportPath": "github.com/mitchellh/prefixedio",
"Rev": "64119910ab902e336f308a1ed751a3721ba24c23"
},
{
"ImportPath": "github.com/mitchellh/reflectwalk",
"Rev": "e311b7bc81996c7e4cb570b2cbfd6448eba094b2"
}
]
}

View File

@ -16,27 +16,31 @@ export CGO_CFLAGS CGO_LDFLAGS PATH
default: test default: test
dev: libucl dev: config/y.go libucl
@sh -c "$(CURDIR)/scripts/build.sh" @sh -c "$(CURDIR)/scripts/build.sh"
libucl: vendor/libucl/$(LIBUCL_NAME) libucl: vendor/libucl/$(LIBUCL_NAME)
test: libucl test: config/y.go libucl
TF_ACC= go test $(TEST) $(TESTARGS) -timeout=10s TF_ACC= go test $(TEST) $(TESTARGS) -timeout=10s
testacc: libucl testacc: config/y.go libucl
@if [ "$(TEST)" = "./..." ]; then \ @if [ "$(TEST)" = "./..." ]; then \
echo "ERROR: Set TEST to a specific package"; \ echo "ERROR: Set TEST to a specific package"; \
exit 1; \ exit 1; \
fi fi
TF_ACC=1 go test $(TEST) -v $(TESTARGS) TF_ACC=1 go test $(TEST) -v $(TESTARGS)
testrace: libucl testrace: config/y.go libucl
TF_ACC= go test -race $(TEST) $(TESTARGS) TF_ACC= go test -race $(TEST) $(TESTARGS)
updatedeps: updatedeps: config/y.go
go get -u -v ./... go get -u -v ./...
config/y.go: config/expr.y
cd config/ && \
go tool yacc -p "expr" expr.y
vendor/libucl/libucl.a: vendor/libucl vendor/libucl/libucl.a: vendor/libucl
cd vendor/libucl && \ cd vendor/libucl && \
cmake cmake/ && \ cmake cmake/ && \
@ -51,7 +55,9 @@ vendor/libucl/libucl.dll: vendor/libucl
vendor/libucl: vendor/libucl:
rm -rf vendor/libucl rm -rf vendor/libucl
mkdir -p vendor/libucl mkdir -p vendor/libucl
git clone https://github.com/vstakhov/libucl.git vendor/libucl git clone https://github.com/hashicorp/libucl.git vendor/libucl
cd vendor/libucl && \
git checkout fix-win32-compile
clean: clean:
rm -rf vendor rm -rf vendor

View File

@ -6,4 +6,4 @@ This is just to keep track of what we need to do in the short term.
* Providers/AWS: `aws_security_group` needs an update func * Providers/AWS: `aws_security_group` needs an update func
* Providers/AWS: `aws_eip` needs an update func (re-assocation, I think this * Providers/AWS: `aws_eip` needs an update func (re-assocation, I think this
is possible in the AWS API). `aws_eip` also needs improved tests is possible in the AWS API). `aws_eip` also needs improved tests
* JSON overrides * Providers/AWS: `aws_rds` needs an update func (+goamz changes)

View File

@ -0,0 +1,10 @@
package main
import (
"github.com/hashicorp/terraform/builtin/providers/heroku"
"github.com/hashicorp/terraform/plugin"
)
func main() {
plugin.Serve(new(heroku.ResourceProvider))
}

View File

@ -0,0 +1 @@
package main

View File

@ -0,0 +1,335 @@
package aws
import (
"fmt"
"log"
"strconv"
"time"
"github.com/hashicorp/terraform/flatmap"
"github.com/hashicorp/terraform/helper/config"
"github.com/hashicorp/terraform/helper/diff"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/goamz/rds"
)
func resource_aws_db_instance_create(
s *terraform.ResourceState,
d *terraform.ResourceDiff,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
conn := p.rdsconn
// Merge the diff into the state so that we have all the attributes
// properly.
rs := s.MergeDiff(d)
var err error
var attr string
opts := rds.CreateDBInstance{}
if attr = rs.Attributes["allocated_storage"]; attr != "" {
opts.AllocatedStorage, err = strconv.Atoi(attr)
opts.SetAllocatedStorage = true
}
if attr = rs.Attributes["backup_retention_period"]; attr != "" {
opts.BackupRetentionPeriod, err = strconv.Atoi(attr)
opts.SetBackupRetentionPeriod = true
}
if attr = rs.Attributes["iops"]; attr != "" {
opts.Iops, err = strconv.Atoi(attr)
opts.SetIops = true
}
if attr = rs.Attributes["port"]; attr != "" {
opts.Port, err = strconv.Atoi(attr)
opts.SetPort = true
}
if attr = rs.Attributes["availability_zone"]; attr != "" {
opts.AvailabilityZone = attr
}
if attr = rs.Attributes["instance_class"]; attr != "" {
opts.DBInstanceClass = attr
}
if attr = rs.Attributes["maintenance_window"]; attr != "" {
opts.PreferredMaintenanceWindow = attr
}
if attr = rs.Attributes["backup_window"]; attr != "" {
opts.PreferredBackupWindow = attr
}
if attr = rs.Attributes["multi_az"]; attr == "true" {
opts.MultiAZ = true
}
if attr = rs.Attributes["publicly_accessible"]; attr == "true" {
opts.PubliclyAccessible = true
}
if err != nil {
return nil, fmt.Errorf("Error parsing configuration: %s", err)
}
if _, ok := rs.Attributes["vpc_security_group_ids.#"]; ok {
opts.VpcSecurityGroupIds = expandStringList(flatmap.Expand(
rs.Attributes, "vpc_security_group_ids").([]interface{}))
}
if _, ok := rs.Attributes["security_group_names.#"]; ok {
opts.DBSecurityGroupNames = expandStringList(flatmap.Expand(
rs.Attributes, "security_group_names").([]interface{}))
}
opts.DBInstanceIdentifier = rs.Attributes["identifier"]
opts.DBName = rs.Attributes["name"]
opts.MasterUsername = rs.Attributes["username"]
opts.MasterUserPassword = rs.Attributes["password"]
opts.EngineVersion = rs.Attributes["engine_version"]
opts.EngineVersion = rs.Attributes["engine"]
// Don't keep the password around in the state
delete(rs.Attributes, "password")
log.Printf("[DEBUG] DB Instance create configuration: %#v", opts)
_, err = conn.CreateDBInstance(&opts)
if err != nil {
return nil, fmt.Errorf("Error creating DB Instance: %s", err)
}
rs.ID = rs.Attributes["identifier"]
log.Printf("[INFO] DB Instance ID: %s", rs.ID)
log.Println(
"[INFO] Waiting for DB Instance to be available")
stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "backing-up"},
Target: "available",
Refresh: DBInstanceStateRefreshFunc(rs.ID, conn),
Timeout: 10 * time.Minute,
MinTimeout: 10 * time.Second,
Delay: 30 * time.Second, // Wait 30 secs before starting
}
// Wait, catching any errors
_, err = stateConf.WaitForState()
if err != nil {
return rs, err
}
v, err := resource_aws_db_instance_retrieve(rs.ID, conn)
if err != nil {
return rs, err
}
return resource_aws_db_instance_update_state(rs, v)
}
func resource_aws_db_instance_update(
s *terraform.ResourceState,
d *terraform.ResourceDiff,
meta interface{}) (*terraform.ResourceState, error) {
panic("Cannot update DB")
return nil, nil
}
func resource_aws_db_instance_destroy(
s *terraform.ResourceState,
meta interface{}) error {
p := meta.(*ResourceProvider)
conn := p.rdsconn
log.Printf("[DEBUG] DB Instance destroy: %v", s.ID)
opts := rds.DeleteDBInstance{DBInstanceIdentifier: s.ID}
if s.Attributes["skip_final_snapshot"] == "true" {
opts.SkipFinalSnapshot = true
}
log.Printf("[DEBUG] DB Instance destroy configuration: %v", opts)
_, err := conn.DeleteDBInstance(&opts)
if err != nil {
newerr, ok := err.(*rds.Error)
if ok && newerr.Code == "InvalidDBInstance.NotFound" {
return nil
}
return err
}
return nil
}
func resource_aws_db_instance_refresh(
s *terraform.ResourceState,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
conn := p.rdsconn
v, err := resource_aws_db_instance_retrieve(s.ID, conn)
if err != nil {
return s, err
}
return resource_aws_db_instance_update_state(s, v)
}
func resource_aws_db_instance_diff(
s *terraform.ResourceState,
c *terraform.ResourceConfig,
meta interface{}) (*terraform.ResourceDiff, error) {
b := &diff.ResourceBuilder{
Attrs: map[string]diff.AttrType{
"allocated_storage": diff.AttrTypeCreate,
"availability_zone": diff.AttrTypeCreate,
"backup_retention_period": diff.AttrTypeCreate,
"backup_window": diff.AttrTypeCreate,
"engine": diff.AttrTypeCreate,
"engine_version": diff.AttrTypeCreate,
"identifier": diff.AttrTypeCreate,
"instance_class": diff.AttrTypeCreate,
"iops": diff.AttrTypeCreate,
"maintenance_window": diff.AttrTypeCreate,
"multi_az": diff.AttrTypeCreate,
"name": diff.AttrTypeCreate,
"password": diff.AttrTypeCreate,
"port": diff.AttrTypeCreate,
"publicly_accessible": diff.AttrTypeCreate,
"username": diff.AttrTypeCreate,
"vpc_security_group_ids": diff.AttrTypeCreate,
"security_group_names": diff.AttrTypeCreate,
},
ComputedAttrs: []string{
"address",
"availability_zone",
"backup_retention_period",
"backup_window",
"engine_version",
"maintenance_window",
"endpoint",
"status",
"multi_az",
"port",
"address",
},
}
return b.Diff(s, c)
}
func resource_aws_db_instance_update_state(
s *terraform.ResourceState,
v *rds.DBInstance) (*terraform.ResourceState, error) {
s.Attributes["address"] = v.Address
s.Attributes["allocated_storage"] = strconv.Itoa(v.AllocatedStorage)
s.Attributes["availability_zone"] = v.AvailabilityZone
s.Attributes["backup_retention_period"] = strconv.Itoa(v.BackupRetentionPeriod)
s.Attributes["backup_window"] = v.PreferredBackupWindow
s.Attributes["endpoint"] = fmt.Sprintf("%s:%s", s.Attributes["address"], s.Attributes["port"])
s.Attributes["engine"] = v.Engine
s.Attributes["engine_version"] = v.EngineVersion
s.Attributes["instance_class"] = v.DBInstanceClass
s.Attributes["maintenance_window"] = v.PreferredMaintenanceWindow
s.Attributes["multi_az"] = strconv.FormatBool(v.MultiAZ)
s.Attributes["name"] = v.DBName
s.Attributes["port"] = strconv.Itoa(v.Port)
s.Attributes["status"] = v.DBInstanceStatus
s.Attributes["username"] = v.MasterUsername
// Flatten our group values
toFlatten := make(map[string]interface{})
if len(v.DBSecurityGroupNames) > 0 && v.DBSecurityGroupNames[0] != "" {
toFlatten["security_group_names"] = v.DBSecurityGroupNames
}
if len(v.VpcSecurityGroupIds) > 0 && v.VpcSecurityGroupIds[0] != "" {
toFlatten["vpc_security_group_ids"] = v.VpcSecurityGroupIds
}
for k, v := range flatmap.Flatten(toFlatten) {
s.Attributes[k] = v
}
return s, nil
}
func resource_aws_db_instance_retrieve(id string, conn *rds.Rds) (*rds.DBInstance, error) {
opts := rds.DescribeDBInstances{
DBInstanceIdentifier: id,
}
log.Printf("[DEBUG] DB Instance describe configuration: %#v", opts)
resp, err := conn.DescribeDBInstances(&opts)
if err != nil {
return nil, fmt.Errorf("Error retrieving DB Instances: %s", err)
}
if len(resp.DBInstances) != 1 ||
resp.DBInstances[0].DBInstanceIdentifier != id {
if err != nil {
return nil, fmt.Errorf("Unable to find DB Instance: %#v", resp.DBInstances)
}
}
v := resp.DBInstances[0]
return &v, nil
}
func resource_aws_db_instance_validation() *config.Validator {
return &config.Validator{
Required: []string{
"allocated_storage",
"engine",
"engine_version",
"identifier",
"instance_class",
"name",
"password",
"username",
},
Optional: []string{
"availability_zone",
"backup_retention_period",
"backup_window",
"iops",
"maintenance_window",
"multi_az",
"port",
"publicly_accessible",
"vpc_security_group_ids.*",
"skip_final_snapshot",
"security_group_names.*",
},
}
}
func DBInstanceStateRefreshFunc(id string, conn *rds.Rds) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
v, err := resource_aws_db_instance_retrieve(id, conn)
if err != nil {
log.Printf("Error on retrieving DB Instance when waiting: %s", err)
return nil, "", err
}
return v, v.DBInstanceStatus, nil
}
}

View File

@ -0,0 +1,163 @@
package aws
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/goamz/rds"
)
func TestAccAWSDBInstance(t *testing.T) {
var v rds.DBInstance
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSDBInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSDBInstanceConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &v),
testAccCheckAWSDBInstanceAttributes(&v),
resource.TestCheckResourceAttr(
"aws_db_instance.bar", "instance_identifier", "foobarbaz-test-terraform"),
resource.TestCheckResourceAttr(
"aws_db_instance.bar", "allocated_storage", "foobarbaz-test-terraform"),
resource.TestCheckResourceAttr(
"aws_db_instance.bar", "engine", "mysql"),
resource.TestCheckResourceAttr(
"aws_db_instance.bar", "engine_version", "5.6.17"),
resource.TestCheckResourceAttr(
"aws_db_instance.bar", "instance_class", "db.t1.micro"),
resource.TestCheckResourceAttr(
"aws_db_instance.bar", "name", "baz"),
resource.TestCheckResourceAttr(
"aws_db_instance.bar", "password", "barbarbarbar"),
resource.TestCheckResourceAttr(
"aws_db_instance.bar", "username", "foo"),
resource.TestCheckResourceAttr(
"aws_db_instance.bar", "skip_final_snapshot", "true"),
resource.TestCheckResourceAttr(
"aws_db_instance.bar", "security_group_names.0", "secfoobarbaz-test-terraform"),
),
},
},
})
}
func testAccCheckAWSDBInstanceDestroy(s *terraform.State) error {
conn := testAccProvider.rdsconn
for _, rs := range s.Resources {
if rs.Type != "aws_db_instance" {
continue
}
// Try to find the Group
resp, err := conn.DescribeDBInstances(
&rds.DescribeDBInstances{
DBInstanceIdentifier: rs.ID,
})
if err == nil {
if len(resp.DBInstances) != 0 &&
resp.DBInstances[0].DBInstanceIdentifier == rs.ID {
return fmt.Errorf("DB Instance still exists")
}
}
// Verify the error
newerr, ok := err.(*rds.Error)
if !ok {
return err
}
if newerr.Code != "InvalidDBInstance.NotFound" {
return err
}
}
return nil
}
func testAccCheckAWSDBInstanceAttributes(v *rds.DBInstance) resource.TestCheckFunc {
return func(s *terraform.State) error {
if len(v.DBSecurityGroupNames) == 0 {
return fmt.Errorf("no sec names: %#v", v.DBSecurityGroupNames)
}
if v.Engine != "mysql" {
return fmt.Errorf("bad engine: %#v", v.Engine)
}
if v.EngineVersion != "5.6.17" {
return fmt.Errorf("bad engine_version: %#v", v.EngineVersion)
}
return nil
}
}
func testAccCheckAWSDBInstanceExists(n string, v *rds.DBInstance) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.ID == "" {
return fmt.Errorf("No DB Instance ID is set")
}
conn := testAccProvider.rdsconn
opts := rds.DescribeDBInstances{
DBInstanceIdentifier: rs.ID,
}
resp, err := conn.DescribeDBInstances(&opts)
if err != nil {
return err
}
if len(resp.DBInstances) != 1 ||
resp.DBInstances[0].DBInstanceIdentifier != rs.ID {
return fmt.Errorf("DB Instance not found")
}
*v = resp.DBInstances[0]
return nil
}
}
const testAccAWSDBInstanceConfig = `
resource "aws_db_security_group" "bar" {
name = "secfoobarbaz-test-terraform"
description = "just cuz"
ingress {
cidr = "10.0.0.1/24"
}
}
resource "aws_db_instance" "bar" {
identifier = "foobarbaz-test-terraform"
allocated_storage = 10
engine = "mysql"
engine_version = "5.6.17"
instance_class = "db.t1.micro"
name = "baz"
password = "barbarbarbar"
username = "foo"
skip_final_snapshot = true
security_group_names = [${aws_db_security_group.bar.name}]
}
`

View File

@ -0,0 +1,279 @@
package aws
import (
"fmt"
"log"
"time"
"github.com/hashicorp/terraform/flatmap"
"github.com/hashicorp/terraform/helper/config"
"github.com/hashicorp/terraform/helper/diff"
"github.com/hashicorp/terraform/helper/multierror"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/goamz/rds"
)
func resource_aws_db_security_group_create(
s *terraform.ResourceState,
d *terraform.ResourceDiff,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
conn := p.rdsconn
// Merge the diff into the state so that we have all the attributes
// properly.
rs := s.MergeDiff(d)
var err error
var errs []error
opts := rds.CreateDBSecurityGroup{
DBSecurityGroupName: rs.Attributes["name"],
DBSecurityGroupDescription: rs.Attributes["description"],
}
log.Printf("[DEBUG] DB Security Group create configuration: %#v", opts)
_, err = conn.CreateDBSecurityGroup(&opts)
if err != nil {
return nil, fmt.Errorf("Error creating DB Security Group: %s", err)
}
rs.ID = rs.Attributes["name"]
log.Printf("[INFO] DB Security Group ID: %s", rs.ID)
v, err := resource_aws_db_security_group_retrieve(rs.ID, conn)
if err != nil {
return rs, err
}
log.Printf("%#v", rs.Attributes)
if _, ok := rs.Attributes["ingress.#"]; ok {
ingresses := flatmap.Expand(
rs.Attributes, "ingress").([]interface{})
for _, ing := range ingresses {
err = authorize_ingress_rule(ing, v.Name, conn)
if err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 {
return rs, &multierror.Error{Errors: errs}
}
}
log.Println(
"[INFO] Waiting for Ingress Authorizations to be authorized")
stateConf := &resource.StateChangeConf{
Pending: []string{"authorizing"},
Target: "authorized",
Refresh: DBSecurityGroupStateRefreshFunc(rs.ID, conn),
Timeout: 10 * time.Minute,
}
// Wait, catching any errors
_, err = stateConf.WaitForState()
if err != nil {
return rs, err
}
return resource_aws_db_security_group_update_state(rs, v)
}
func resource_aws_db_security_group_update(
s *terraform.ResourceState,
d *terraform.ResourceDiff,
meta interface{}) (*terraform.ResourceState, error) {
panic("Cannot update DB")
return nil, nil
}
func resource_aws_db_security_group_destroy(
s *terraform.ResourceState,
meta interface{}) error {
p := meta.(*ResourceProvider)
conn := p.rdsconn
log.Printf("[DEBUG] DB Security Group destroy: %v", s.ID)
opts := rds.DeleteDBSecurityGroup{DBSecurityGroupName: s.ID}
log.Printf("[DEBUG] DB Security Group destroy configuration: %v", opts)
_, err := conn.DeleteDBSecurityGroup(&opts)
if err != nil {
newerr, ok := err.(*rds.Error)
if ok && newerr.Code == "InvalidDBSecurityGroup.NotFound" {
return nil
}
return err
}
return nil
}
func resource_aws_db_security_group_refresh(
s *terraform.ResourceState,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
conn := p.rdsconn
v, err := resource_aws_db_security_group_retrieve(s.ID, conn)
if err != nil {
return s, err
}
return resource_aws_db_security_group_update_state(s, v)
}
func resource_aws_db_security_group_diff(
s *terraform.ResourceState,
c *terraform.ResourceConfig,
meta interface{}) (*terraform.ResourceDiff, error) {
b := &diff.ResourceBuilder{
Attrs: map[string]diff.AttrType{
"name": diff.AttrTypeCreate,
"description": diff.AttrTypeCreate,
"ingress": diff.AttrTypeCreate,
},
ComputedAttrs: []string{
"ingress_cidr",
"ingress_security_groups",
},
}
return b.Diff(s, c)
}
func resource_aws_db_security_group_update_state(
s *terraform.ResourceState,
v *rds.DBSecurityGroup) (*terraform.ResourceState, error) {
s.Attributes["name"] = v.Name
s.Attributes["description"] = v.Description
// Flatten our group values
toFlatten := make(map[string]interface{})
if len(v.EC2SecurityGroupOwnerIds) > 0 && v.EC2SecurityGroupOwnerIds[0] != "" {
toFlatten["ingress_security_groups"] = v.EC2SecurityGroupOwnerIds
}
if len(v.CidrIps) > 0 && v.CidrIps[0] != "" {
toFlatten["ingress_cidr"] = v.CidrIps
}
for k, v := range flatmap.Flatten(toFlatten) {
s.Attributes[k] = v
}
return s, nil
}
func resource_aws_db_security_group_retrieve(id string, conn *rds.Rds) (*rds.DBSecurityGroup, error) {
opts := rds.DescribeDBSecurityGroups{
DBSecurityGroupName: id,
}
log.Printf("[DEBUG] DB Security Group describe configuration: %#v", opts)
resp, err := conn.DescribeDBSecurityGroups(&opts)
if err != nil {
return nil, fmt.Errorf("Error retrieving DB Security Groups: %s", err)
}
if len(resp.DBSecurityGroups) != 1 ||
resp.DBSecurityGroups[0].Name != id {
if err != nil {
return nil, fmt.Errorf("Unable to find DB Security Group: %#v", resp.DBSecurityGroups)
}
}
v := resp.DBSecurityGroups[0]
return &v, nil
}
// Authorizes the ingress rule on the db security group
func authorize_ingress_rule(ingress interface{}, dbSecurityGroupName string, conn *rds.Rds) error {
ing := ingress.(map[string]interface{})
opts := rds.AuthorizeDBSecurityGroupIngress{
DBSecurityGroupName: dbSecurityGroupName,
}
if attr, ok := ing["cidr"].(string); ok && attr != "" {
opts.Cidr = attr
}
if attr, ok := ing["security_group_name"].(string); ok && attr != "" {
opts.EC2SecurityGroupName = attr
}
if attr, ok := ing["security_group_id"].(string); ok && attr != "" {
opts.EC2SecurityGroupId = attr
}
if attr, ok := ing["security_group_owner_id"].(string); ok && attr != "" {
opts.EC2SecurityGroupOwnerId = attr
}
log.Printf("[DEBUG] Authorize ingress rule configuration: %#v", opts)
_, err := conn.AuthorizeDBSecurityGroupIngress(&opts)
if err != nil {
return fmt.Errorf("Error authorizing security group ingress: %s", err)
}
return nil
}
func resource_aws_db_security_group_validation() *config.Validator {
return &config.Validator{
Required: []string{
"name",
"description",
},
Optional: []string{
"ingress.*",
"ingress.*.cidr",
"ingress.*.security_group_name",
"ingress.*.security_group_id",
"ingress.*.security_group_owner_id",
},
}
}
func DBSecurityGroupStateRefreshFunc(id string, conn *rds.Rds) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
v, err := resource_aws_db_security_group_retrieve(id, conn)
if err != nil {
log.Printf("Error on retrieving DB Security Group when waiting: %s", err)
return nil, "", err
}
statuses := append(v.EC2SecurityGroupStatuses, v.CidrStatuses...)
for _, stat := range statuses {
// Not done
if stat != "authorized" {
return nil, "authorizing", nil
}
}
return v, "authorized", nil
}
}

View File

@ -0,0 +1,142 @@
package aws
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/goamz/rds"
)
func TestAccAWSDBSecurityGroup(t *testing.T) {
var v rds.DBSecurityGroup
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSDBSecurityGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSDBSecurityGroupConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSDBSecurityGroupExists("aws_db_security_group.bar", &v),
testAccCheckAWSDBSecurityGroupAttributes(&v),
resource.TestCheckResourceAttr(
"aws_db_security_group.bar", "name", "secgroup-terraform"),
resource.TestCheckResourceAttr(
"aws_db_security_group.bar", "description", "just cuz"),
resource.TestCheckResourceAttr(
"aws_db_security_group.bar", "ingress.0.cidr", "10.0.0.1/24"),
resource.TestCheckResourceAttr(
"aws_db_security_group.bar", "ingress.#", "1"),
),
},
},
})
}
func testAccCheckAWSDBSecurityGroupDestroy(s *terraform.State) error {
conn := testAccProvider.rdsconn
for _, rs := range s.Resources {
if rs.Type != "aws_db_security_group" {
continue
}
// Try to find the Group
resp, err := conn.DescribeDBSecurityGroups(
&rds.DescribeDBSecurityGroups{
DBSecurityGroupName: rs.ID,
})
if err == nil {
if len(resp.DBSecurityGroups) != 0 &&
resp.DBSecurityGroups[0].Name == rs.ID {
return fmt.Errorf("DB Security Group still exists")
}
}
// Verify the error
newerr, ok := err.(*rds.Error)
if !ok {
return err
}
if newerr.Code != "InvalidDBSecurityGroup.NotFound" {
return err
}
}
return nil
}
func testAccCheckAWSDBSecurityGroupAttributes(group *rds.DBSecurityGroup) resource.TestCheckFunc {
return func(s *terraform.State) error {
if len(group.CidrIps) == 0 {
return fmt.Errorf("no cidr: %#v", group.CidrIps)
}
if group.CidrIps[0] != "10.0.0.1/24" {
return fmt.Errorf("bad cidr: %#v", group.CidrIps)
}
if group.CidrStatuses[0] != "authorized" {
return fmt.Errorf("bad status: %#v", group.CidrStatuses)
}
if group.Name != "secgroup-terraform" {
return fmt.Errorf("bad name: %#v", group.Name)
}
if group.Description != "just cuz" {
return fmt.Errorf("bad description: %#v", group.Description)
}
return nil
}
}
func testAccCheckAWSDBSecurityGroupExists(n string, v *rds.DBSecurityGroup) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.ID == "" {
return fmt.Errorf("No DB Security Group ID is set")
}
conn := testAccProvider.rdsconn
opts := rds.DescribeDBSecurityGroups{
DBSecurityGroupName: rs.ID,
}
resp, err := conn.DescribeDBSecurityGroups(&opts)
if err != nil {
return err
}
if len(resp.DBSecurityGroups) != 1 ||
resp.DBSecurityGroups[0].Name != rs.ID {
return fmt.Errorf("DB Security Group not found")
}
*v = resp.DBSecurityGroups[0]
return nil
}
}
const testAccAWSDBSecurityGroupConfig = `
resource "aws_db_security_group" "bar" {
name = "secgroup-terraform"
description = "just cuz"
ingress {
cidr = "10.0.0.1/24"
}
}
`

View File

@ -0,0 +1,208 @@
package aws
import (
"fmt"
"log"
"strconv"
"strings"
"time"
"github.com/hashicorp/terraform/flatmap"
"github.com/hashicorp/terraform/helper/config"
"github.com/hashicorp/terraform/helper/diff"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/goamz/route53"
)
func resource_aws_r53_record_validation() *config.Validator {
return &config.Validator{
Required: []string{
"zone_id",
"name",
"type",
"ttl",
"records.*",
},
}
}
func resource_aws_r53_record_create(
s *terraform.ResourceState,
d *terraform.ResourceDiff,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
conn := p.route53
// Merge the diff into the state so that we have all the attributes
// properly.
rs := s.MergeDiff(d)
// Get the record
rec, err := resource_aws_r53_build_record_set(rs)
if err != nil {
return rs, err
}
// Create the new records
req := &route53.ChangeResourceRecordSetsRequest{
Comment: "Managed by Terraform",
Changes: []route53.Change{
route53.Change{
Action: "UPSERT",
Record: *rec,
},
},
}
zone := rs.Attributes["zone_id"]
log.Printf("[DEBUG] Creating resource records for zone: %s, name: %s",
zone, rs.Attributes["name"])
resp, err := conn.ChangeResourceRecordSets(zone, req)
if err != nil {
return rs, err
}
// Generate an ID
rs.ID = fmt.Sprintf("%s_%s_%s", zone, rs.Attributes["name"], rs.Attributes["type"])
rs.Dependencies = []terraform.ResourceDependency{
terraform.ResourceDependency{ID: zone},
}
// Wait until we are done
wait := resource.StateChangeConf{
Delay: 30 * time.Second,
Pending: []string{"PENDING"},
Target: "INSYNC",
Timeout: 10 * time.Minute,
MinTimeout: 5 * time.Second,
Refresh: func() (result interface{}, state string, err error) {
return resource_aws_r53_wait(conn, resp.ChangeInfo.ID)
},
}
_, err = wait.WaitForState()
if err != nil {
return rs, err
}
return rs, nil
}
func resource_aws_r53_build_record_set(s *terraform.ResourceState) (*route53.ResourceRecordSet, error) {
// Parse the TTL
ttl, err := strconv.ParseInt(s.Attributes["ttl"], 10, 32)
if err != nil {
return nil, err
}
// Expand the records
recRaw := flatmap.Expand(s.Attributes, "records")
var records []string
for _, raw := range recRaw.([]interface{}) {
records = append(records, raw.(string))
}
rec := &route53.ResourceRecordSet{
Name: s.Attributes["name"],
Type: s.Attributes["type"],
TTL: int(ttl),
Records: records,
}
return rec, nil
}
func resource_aws_r53_record_destroy(
s *terraform.ResourceState,
meta interface{}) error {
p := meta.(*ResourceProvider)
conn := p.route53
// Get the record
rec, err := resource_aws_r53_build_record_set(s)
if err != nil {
return err
}
// Create the new records
req := &route53.ChangeResourceRecordSetsRequest{
Comment: "Deleted by Terraform",
Changes: []route53.Change{
route53.Change{
Action: "DELETE",
Record: *rec,
},
},
}
zone := s.Attributes["zone_id"]
log.Printf("[DEBUG] Deleting resource records for zone: %s, name: %s",
zone, s.Attributes["name"])
_, err = conn.ChangeResourceRecordSets(zone, req)
if err != nil {
return err
}
return nil
}
func resource_aws_r53_record_refresh(
s *terraform.ResourceState,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
conn := p.route53
zone := s.Attributes["zone_id"]
lopts := &route53.ListOpts{
Name: s.Attributes["name"],
Type: s.Attributes["type"],
}
resp, err := conn.ListResourceRecordSets(zone, lopts)
if err != nil {
return s, err
}
// Scan for a matching record
found := false
for _, record := range resp.Records {
if route53.FQDN(record.Name) != route53.FQDN(lopts.Name) {
continue
}
if strings.ToUpper(record.Type) != strings.ToUpper(lopts.Type) {
continue
}
found = true
resource_aws_r53_record_update_state(s, &record)
break
}
if !found {
s.ID = ""
}
return s, nil
}
func resource_aws_r53_record_update_state(
s *terraform.ResourceState,
rec *route53.ResourceRecordSet) {
flatRec := flatmap.Flatten(map[string]interface{}{
"records": rec.Records,
})
for k, v := range flatRec {
s.Attributes[k] = v
}
s.Attributes["ttl"] = strconv.FormatInt(int64(rec.TTL), 10)
}
func resource_aws_r53_record_diff(
s *terraform.ResourceState,
c *terraform.ResourceConfig,
meta interface{}) (*terraform.ResourceDiff, error) {
b := &diff.ResourceBuilder{
Attrs: map[string]diff.AttrType{
"zone_id": diff.AttrTypeCreate,
"name": diff.AttrTypeCreate,
"type": diff.AttrTypeCreate,
"ttl": diff.AttrTypeUpdate,
"records": diff.AttrTypeUpdate,
},
}
return b.Diff(s, c)
}

View File

@ -0,0 +1,102 @@
package aws
import (
"fmt"
"strings"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/goamz/route53"
)
func TestAccRoute53Record(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckRoute53RecordDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccRoute53RecordConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckRoute53RecordExists("aws_route53_record.default"),
),
},
},
})
}
func testAccCheckRoute53RecordDestroy(s *terraform.State) error {
conn := testAccProvider.route53
for _, rs := range s.Resources {
if rs.Type != "aws_route53_record" {
continue
}
parts := strings.Split(rs.ID, "_")
zone := parts[0]
name := parts[1]
rType := parts[2]
lopts := &route53.ListOpts{Name: name, Type: rType}
resp, err := conn.ListResourceRecordSets(zone, lopts)
if err != nil {
return err
}
if len(resp.Records) == 0 {
return nil
}
rec := resp.Records[0]
if route53.FQDN(rec.Name) == route53.FQDN(name) && rec.Type == rType {
return fmt.Errorf("Record still exists: %#v", rec)
}
}
return nil
}
func testAccCheckRoute53RecordExists(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
conn := testAccProvider.route53
rs, ok := s.Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.ID == "" {
return fmt.Errorf("No hosted zone ID is set")
}
parts := strings.Split(rs.ID, "_")
zone := parts[0]
name := parts[1]
rType := parts[2]
lopts := &route53.ListOpts{Name: name, Type: rType}
resp, err := conn.ListResourceRecordSets(zone, lopts)
if err != nil {
return err
}
if len(resp.Records) == 0 {
return fmt.Errorf("Record does not exist")
}
rec := resp.Records[0]
if route53.FQDN(rec.Name) == route53.FQDN(name) && rec.Type == rType {
return nil
}
return fmt.Errorf("Record does not exist: %#v", rec)
}
}
const testAccRoute53RecordConfig = `
resource "aws_route53_zone" "main" {
name = "hashicorp.com"
}
resource "aws_route53_record" "default" {
zone_id = "${aws_route53_zone.main.zone_id}"
name = "www.hashicorp.com"
type = "A"
ttl = "30"
records = ["127.0.0.1"]
}
`

View File

@ -0,0 +1,118 @@
package aws
import (
"log"
"time"
"github.com/hashicorp/terraform/helper/config"
"github.com/hashicorp/terraform/helper/diff"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/goamz/route53"
)
func resource_aws_r53_zone_validation() *config.Validator {
return &config.Validator{
Required: []string{
"name",
},
}
}
func resource_aws_r53_zone_create(
s *terraform.ResourceState,
d *terraform.ResourceDiff,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
r53 := p.route53
// Merge the diff into the state so that we have all the attributes
// properly.
rs := s.MergeDiff(d)
req := &route53.CreateHostedZoneRequest{
Name: rs.Attributes["name"],
Comment: "Managed by Terraform",
}
log.Printf("[DEBUG] Creating Route53 hosted zone: %s", req.Name)
resp, err := r53.CreateHostedZone(req)
if err != nil {
return rs, err
}
// Store the zone_id
zone := route53.CleanZoneID(resp.HostedZone.ID)
rs.ID = zone
rs.Attributes["zone_id"] = zone
// Wait until we are done initializing
wait := resource.StateChangeConf{
Delay: 30 * time.Second,
Pending: []string{"PENDING"},
Target: "INSYNC",
Timeout: 10 * time.Minute,
MinTimeout: 5 * time.Second,
Refresh: func() (result interface{}, state string, err error) {
return resource_aws_r53_wait(r53, resp.ChangeInfo.ID)
},
}
_, err = wait.WaitForState()
if err != nil {
return rs, err
}
return rs, nil
}
// resource_aws_r53_wait checks the status of a change
func resource_aws_r53_wait(r53 *route53.Route53, ref string) (result interface{}, state string, err error) {
status, err := r53.GetChange(ref)
if err != nil {
return nil, "UNKNOWN", err
}
return true, status, nil
}
func resource_aws_r53_zone_destroy(
s *terraform.ResourceState,
meta interface{}) error {
p := meta.(*ResourceProvider)
r53 := p.route53
log.Printf("[DEBUG] Deleting Route53 hosted zone: %s (ID: %s)",
s.Attributes["name"], s.Attributes["zone_id"])
_, err := r53.DeleteHostedZone(s.Attributes["zone_id"])
if err != nil {
return err
}
return nil
}
func resource_aws_r53_zone_refresh(
s *terraform.ResourceState,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
r53 := p.route53
_, err := r53.GetHostedZone(s.Attributes["zone_id"])
if err != nil {
return s, err
}
return s, nil
}
func resource_aws_r53_zone_diff(
s *terraform.ResourceState,
c *terraform.ResourceConfig,
meta interface{}) (*terraform.ResourceDiff, error) {
b := &diff.ResourceBuilder{
Attrs: map[string]diff.AttrType{
"name": diff.AttrTypeCreate,
},
ComputedAttrs: []string{
"zone_id",
},
}
return b.Diff(s, c)
}

View File

@ -0,0 +1,66 @@
package aws
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccRoute53Zone(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckRoute53ZoneDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccRoute53ZoneConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckRoute53ZoneExists("aws_route53_zone.main"),
),
},
},
})
}
func testAccCheckRoute53ZoneDestroy(s *terraform.State) error {
conn := testAccProvider.route53
for _, rs := range s.Resources {
if rs.Type != "aws_route53_zone" {
continue
}
_, err := conn.GetHostedZone(rs.ID)
if err == nil {
return fmt.Errorf("Hosted zone still exists")
}
}
return nil
}
func testAccCheckRoute53ZoneExists(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.ID == "" {
return fmt.Errorf("No hosted zone ID is set")
}
conn := testAccProvider.route53
_, err := conn.GetHostedZone(rs.ID)
if err != nil {
return fmt.Errorf("Hosted zone err: %v", err)
}
return nil
}
}
const testAccRoute53ZoneConfig = `
resource "aws_route53_zone" "main" {
name = "hashicorp.com"
}
`

View File

@ -0,0 +1,93 @@
package aws
import (
"fmt"
"log"
"github.com/hashicorp/terraform/helper/config"
"github.com/hashicorp/terraform/helper/diff"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/goamz/s3"
)
func resource_aws_s3_bucket_validation() *config.Validator {
return &config.Validator{
Required: []string{
"bucket",
},
Optional: []string{
"acl",
},
}
}
func resource_aws_s3_bucket_create(
s *terraform.ResourceState,
d *terraform.ResourceDiff,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
s3conn := p.s3conn
// Merge the diff into the state so that we have all the attributes
// properly.
rs := s.MergeDiff(d)
// Get the bucket and optional acl
bucket := rs.Attributes["bucket"]
acl := "private"
if other, ok := rs.Attributes["acl"]; ok {
acl = other
}
log.Printf("[DEBUG] S3 bucket create: %s, ACL: %s", bucket, acl)
s3Bucket := s3conn.Bucket(bucket)
err := s3Bucket.PutBucket(s3.ACL(acl))
if err != nil {
return nil, fmt.Errorf("Error creating S3 bucket: %s", err)
}
// Assign the bucket name as the resource ID
rs.ID = bucket
return rs, nil
}
func resource_aws_s3_bucket_destroy(
s *terraform.ResourceState,
meta interface{}) error {
p := meta.(*ResourceProvider)
s3conn := p.s3conn
name := s.Attributes["bucket"]
bucket := s3conn.Bucket(name)
log.Printf("[DEBUG] S3 Delete Bucket: %s", name)
return bucket.DelBucket()
}
func resource_aws_s3_bucket_refresh(
s *terraform.ResourceState,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
s3conn := p.s3conn
bucket := s3conn.Bucket(s.Attributes["bucket"])
resp, err := bucket.Head("/")
if err != nil {
return s, err
}
defer resp.Body.Close()
return s, nil
}
func resource_aws_s3_bucket_diff(
s *terraform.ResourceState,
c *terraform.ResourceConfig,
meta interface{}) (*terraform.ResourceDiff, error) {
b := &diff.ResourceBuilder{
Attrs: map[string]diff.AttrType{
"bucket": diff.AttrTypeCreate,
},
}
return b.Diff(s, c)
}

View File

@ -0,0 +1,72 @@
package aws
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSS3Bucket(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSS3BucketDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSS3BucketConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSS3BucketExists("aws_s3_bucket.bar"),
),
},
},
})
}
func testAccCheckAWSS3BucketDestroy(s *terraform.State) error {
conn := testAccProvider.s3conn
for _, rs := range s.Resources {
if rs.Type != "aws_s3_bucket" {
continue
}
bucket := conn.Bucket(rs.ID)
resp, err := bucket.Head("/")
if err == nil {
return fmt.Errorf("S3Bucket still exists")
}
defer resp.Body.Close()
}
return nil
}
func testAccCheckAWSS3BucketExists(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.ID == "" {
return fmt.Errorf("No S3 Bucket ID is set")
}
conn := testAccProvider.s3conn
bucket := conn.Bucket(rs.ID)
resp, err := bucket.Head("/")
if err != nil {
return fmt.Errorf("S3Bucket error: %v", err)
}
defer resp.Body.Close()
return nil
}
}
const testAccAWSS3BucketConfig = `
resource "aws_s3_bucket" "bar" {
bucket = "tf-test-bucket"
acl = "public-read"
}
`

View File

@ -9,6 +9,9 @@ import (
"github.com/mitchellh/goamz/autoscaling" "github.com/mitchellh/goamz/autoscaling"
"github.com/mitchellh/goamz/ec2" "github.com/mitchellh/goamz/ec2"
"github.com/mitchellh/goamz/elb" "github.com/mitchellh/goamz/elb"
"github.com/mitchellh/goamz/rds"
"github.com/mitchellh/goamz/route53"
"github.com/mitchellh/goamz/s3"
) )
type ResourceProvider struct { type ResourceProvider struct {
@ -17,6 +20,9 @@ type ResourceProvider struct {
ec2conn *ec2.EC2 ec2conn *ec2.EC2
elbconn *elb.ELB elbconn *elb.ELB
autoscalingconn *autoscaling.AutoScaling autoscalingconn *autoscaling.AutoScaling
s3conn *s3.S3
rdsconn *rds.Rds
route53 *route53.Route53
} }
func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) { func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) {
@ -63,6 +69,12 @@ func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error {
p.elbconn = elb.New(auth, region) p.elbconn = elb.New(auth, region)
log.Println("[INFO] Initializing AutoScaling connection") log.Println("[INFO] Initializing AutoScaling connection")
p.autoscalingconn = autoscaling.New(auth, region) p.autoscalingconn = autoscaling.New(auth, region)
log.Println("[INFO] Initializing S3 connection")
p.s3conn = s3.New(auth, region)
log.Println("[INFO] Initializing RDS connection")
p.rdsconn = rds.New(auth, region)
log.Println("[INFO] Initializing Route53 connection")
p.route53 = route53.New(auth, region)
} }
if len(errs) > 0 { if len(errs) > 0 {

View File

@ -21,6 +21,24 @@ func init() {
Update: resource_aws_autoscaling_group_update, Update: resource_aws_autoscaling_group_update,
}, },
"aws_db_instance": resource.Resource{
ConfigValidator: resource_aws_db_instance_validation(),
Create: resource_aws_db_instance_create,
Destroy: resource_aws_db_instance_destroy,
Diff: resource_aws_db_instance_diff,
Refresh: resource_aws_db_instance_refresh,
Update: resource_aws_db_instance_update,
},
"aws_db_security_group": resource.Resource{
ConfigValidator: resource_aws_db_security_group_validation(),
Create: resource_aws_db_security_group_create,
Destroy: resource_aws_db_security_group_destroy,
Diff: resource_aws_db_security_group_diff,
Refresh: resource_aws_db_security_group_refresh,
Update: resource_aws_db_security_group_update,
},
"aws_elb": resource.Resource{ "aws_elb": resource.Resource{
ConfigValidator: resource_aws_elb_validation(), ConfigValidator: resource_aws_elb_validation(),
Create: resource_aws_elb_create, Create: resource_aws_elb_create,
@ -94,6 +112,31 @@ func init() {
Update: resource_aws_route_table_association_update, Update: resource_aws_route_table_association_update,
}, },
"aws_route53_zone": resource.Resource{
ConfigValidator: resource_aws_r53_zone_validation(),
Create: resource_aws_r53_zone_create,
Destroy: resource_aws_r53_zone_destroy,
Diff: resource_aws_r53_zone_diff,
Refresh: resource_aws_r53_zone_refresh,
},
"aws_route53_record": resource.Resource{
ConfigValidator: resource_aws_r53_record_validation(),
Create: resource_aws_r53_record_create,
Destroy: resource_aws_r53_record_destroy,
Diff: resource_aws_r53_record_diff,
Refresh: resource_aws_r53_record_refresh,
Update: resource_aws_r53_record_create,
},
"aws_s3_bucket": resource.Resource{
ConfigValidator: resource_aws_s3_bucket_validation(),
Create: resource_aws_s3_bucket_create,
Destroy: resource_aws_s3_bucket_destroy,
Diff: resource_aws_s3_bucket_diff,
Refresh: resource_aws_s3_bucket_refresh,
},
"aws_security_group": resource.Resource{ "aws_security_group": resource.Resource{
ConfigValidator: resource_aws_security_group_validation(), ConfigValidator: resource_aws_security_group_validation(),
Create: resource_aws_security_group_create, Create: resource_aws_security_group_create,

View File

@ -325,6 +325,14 @@ func new_droplet_state_refresh_func(id string, attribute string, client *digital
return nil, "", err return nil, "", err
} }
// If the droplet is locked, continue waiting. We can
// only perform actions on unlocked droplets, so it's
// pointless to look at that status
if droplet.IsLocked() == "true" {
log.Println("[DEBUG] Droplet is locked, skipping status check and retrying")
return nil, "", nil
}
// Use our mapping to get back a map of the // Use our mapping to get back a map of the
// droplet properties // droplet properties
resourceMap, err := resource_digitalocean_droplet_update_state( resourceMap, err := resource_digitalocean_droplet_update_state(

View File

@ -59,7 +59,7 @@ func TestAccDigitalOceanDroplet_Update(t *testing.T) {
testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &droplet), testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &droplet),
testAccCheckDigitalOceanDropletRenamedAndResized(&droplet), testAccCheckDigitalOceanDropletRenamedAndResized(&droplet),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"digitalocean_droplet.foobar", "name", "foo"), "digitalocean_droplet.foobar", "name", "baz"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"digitalocean_droplet.foobar", "size", "1gb"), "digitalocean_droplet.foobar", "size", "1gb"),
), ),

View File

@ -0,0 +1,33 @@
package heroku
import (
"log"
"os"
"github.com/bgentry/heroku-go"
)
type Config struct {
APIKey string `mapstructure:"api_key"`
Email string `mapstructure:"email"`
}
// Client() returns a new client for accessing heroku.
//
func (c *Config) Client() (*heroku.Client, error) {
// If we have env vars set (like in the acc) tests,
// we need to override the values passed in here.
if v := os.Getenv("HEROKU_EMAIL"); v != "" {
c.Email = v
}
if v := os.Getenv("HEROKU_API_KEY"); v != "" {
c.APIKey = v
}
client := heroku.Client{Username: c.Email, Password: c.APIKey}
log.Printf("[INFO] Heroku Client configured for user: %s", c.Email)
return &client, nil
}

View File

@ -0,0 +1,153 @@
package heroku
import (
"fmt"
"log"
"github.com/hashicorp/terraform/helper/config"
"github.com/hashicorp/terraform/helper/diff"
"github.com/hashicorp/terraform/terraform"
"github.com/bgentry/heroku-go"
)
func resource_heroku_app_create(
s *terraform.ResourceState,
d *terraform.ResourceDiff,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
client := p.client
// Merge the diff into the state so that we have all the attributes
// properly.
rs := s.MergeDiff(d)
// Build up our creation options
opts := heroku.AppCreateOpts{}
if attr := rs.Attributes["name"]; attr != "" {
opts.Name = &attr
}
if attr := rs.Attributes["region"]; attr != "" {
opts.Region = &attr
}
if attr := rs.Attributes["stack"]; attr != "" {
opts.Stack = &attr
}
log.Printf("[DEBUG] App create configuration: %#v", opts)
app, err := client.AppCreate(&opts)
if err != nil {
return s, err
}
rs.ID = app.Name
log.Printf("[INFO] App ID: %s", rs.ID)
return resource_heroku_app_update_state(rs, app)
}
func resource_heroku_app_update(
s *terraform.ResourceState,
d *terraform.ResourceDiff,
meta interface{}) (*terraform.ResourceState, error) {
panic("does not update")
return nil, nil
}
func resource_heroku_app_destroy(
s *terraform.ResourceState,
meta interface{}) error {
p := meta.(*ResourceProvider)
client := p.client
log.Printf("[INFO] Deleting App: %s", s.ID)
// Destroy the app
err := client.AppDelete(s.ID)
if err != nil {
return fmt.Errorf("Error deleting App: %s", err)
}
return nil
}
func resource_heroku_app_refresh(
s *terraform.ResourceState,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
client := p.client
app, err := resource_heroku_app_retrieve(s.ID, client)
if err != nil {
return nil, err
}
return resource_heroku_app_update_state(s, app)
}
func resource_heroku_app_diff(
s *terraform.ResourceState,
c *terraform.ResourceConfig,
meta interface{}) (*terraform.ResourceDiff, error) {
b := &diff.ResourceBuilder{
Attrs: map[string]diff.AttrType{
"name": diff.AttrTypeCreate,
"region": diff.AttrTypeUpdate,
"stack": diff.AttrTypeCreate,
},
ComputedAttrs: []string{
"name",
"region",
"stack",
"git_url",
"web_url",
"id",
},
}
return b.Diff(s, c)
}
func resource_heroku_app_update_state(
s *terraform.ResourceState,
app *heroku.App) (*terraform.ResourceState, error) {
s.Attributes["name"] = app.Name
s.Attributes["stack"] = app.Stack.Name
s.Attributes["region"] = app.Region.Name
s.Attributes["git_url"] = app.GitURL
s.Attributes["web_url"] = app.WebURL
s.Attributes["id"] = app.Id
return s, nil
}
func resource_heroku_app_retrieve(id string, client *heroku.Client) (*heroku.App, error) {
app, err := client.AppInfo(id)
if err != nil {
return nil, fmt.Errorf("Error retrieving app: %s", err)
}
return app, nil
}
func resource_heroku_app_validation() *config.Validator {
return &config.Validator{
Required: []string{},
Optional: []string{
"name",
"region",
"stack",
},
}
}

View File

@ -0,0 +1,93 @@
package heroku
import (
"fmt"
"testing"
"github.com/bgentry/heroku-go"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccHerokuApp_Basic(t *testing.T) {
var app heroku.App
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckHerokuAppDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckHerokuAppConfig_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckHerokuAppExists("heroku_app.foobar", &app),
testAccCheckHerokuAppAttributes(&app),
resource.TestCheckResourceAttr(
"heroku_app.foobar", "name", "terraform-test-app"),
),
},
},
})
}
func testAccCheckHerokuAppDestroy(s *terraform.State) error {
client := testAccProvider.client
for _, rs := range s.Resources {
if rs.Type != "heroku_app" {
continue
}
_, err := client.AppInfo(rs.ID)
if err == nil {
return fmt.Errorf("App still exists")
}
}
return nil
}
func testAccCheckHerokuAppAttributes(app *heroku.App) resource.TestCheckFunc {
return func(s *terraform.State) error {
// check attrs
return nil
}
}
func testAccCheckHerokuAppExists(n string, app *heroku.App) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.Resources[n]
fmt.Printf("resources %#v", s.Resources)
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.ID == "" {
return fmt.Errorf("No App Name is set")
}
client := testAccProvider.client
foundApp, err := client.AppInfo(rs.ID)
if err != nil {
return err
}
if foundApp.Name != rs.ID {
return fmt.Errorf("App not found")
}
app = foundApp
return nil
}
}
const testAccCheckHerokuAppConfig_basic = `
resource "heroku_app" "foobar" {
name = "terraform-test-app"
}`

View File

@ -0,0 +1,68 @@
package heroku
import (
"log"
"github.com/bgentry/heroku-go"
"github.com/hashicorp/terraform/helper/config"
"github.com/hashicorp/terraform/terraform"
)
type ResourceProvider struct {
Config Config
client *heroku.Client
}
func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) {
v := &config.Validator{
Required: []string{
"email",
"api_key",
},
}
return v.Validate(c)
}
func (p *ResourceProvider) ValidateResource(
t string, c *terraform.ResourceConfig) ([]string, []error) {
return resourceMap.Validate(t, c)
}
func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error {
if _, err := config.Decode(&p.Config, c.Config); err != nil {
return err
}
log.Println("[INFO] Initializing Heroku client")
var err error
p.client, err = p.Config.Client()
if err != nil {
return err
}
return nil
}
func (p *ResourceProvider) Apply(
s *terraform.ResourceState,
d *terraform.ResourceDiff) (*terraform.ResourceState, error) {
return resourceMap.Apply(s, d, p)
}
func (p *ResourceProvider) Diff(
s *terraform.ResourceState,
c *terraform.ResourceConfig) (*terraform.ResourceDiff, error) {
return resourceMap.Diff(s, c, p)
}
func (p *ResourceProvider) Refresh(
s *terraform.ResourceState) (*terraform.ResourceState, error) {
return resourceMap.Refresh(s, p)
}
func (p *ResourceProvider) Resources() []terraform.ResourceType {
return resourceMap.Resources()
}

View File

@ -0,0 +1,76 @@
package heroku
import (
"os"
"reflect"
"testing"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/terraform"
)
var testAccProviders map[string]terraform.ResourceProvider
var testAccProvider *ResourceProvider
func init() {
testAccProvider = new(ResourceProvider)
testAccProviders = map[string]terraform.ResourceProvider{
"heroku": testAccProvider,
}
}
func TestResourceProvider_impl(t *testing.T) {
var _ terraform.ResourceProvider = new(ResourceProvider)
}
func TestResourceProvider_Configure(t *testing.T) {
rp := new(ResourceProvider)
var expectedKey string
var expectedEmail string
if v := os.Getenv("HEROKU_EMAIL"); v != "" {
expectedEmail = v
} else {
expectedEmail = "foo"
}
if v := os.Getenv("HEROKU_API_KEY"); v != "" {
expectedKey = v
} else {
expectedKey = "foo"
}
raw := map[string]interface{}{
"api_key": expectedKey,
"email": expectedEmail,
}
rawConfig, err := config.NewRawConfig(raw)
if err != nil {
t.Fatalf("err: %s", err)
}
err = rp.Configure(terraform.NewResourceConfig(rawConfig))
if err != nil {
t.Fatalf("err: %s", err)
}
expected := Config{
APIKey: expectedKey,
Email: expectedEmail,
}
if !reflect.DeepEqual(rp.Config, expected) {
t.Fatalf("bad: %#v", rp.Config)
}
}
func testAccPreCheck(t *testing.T) {
if v := os.Getenv("HEROKU_EMAIL"); v == "" {
t.Fatal("HEROKU_EMAIL must be set for acceptance tests")
}
if v := os.Getenv("HEROKU_API_KEY"); v == "" {
t.Fatal("HEROKU_API_KEY must be set for acceptance tests")
}
}

View File

@ -0,0 +1,24 @@
package heroku
import (
"github.com/hashicorp/terraform/helper/resource"
)
// resourceMap is the mapping of resources we support to their basic
// operations. This makes it easy to implement new resource types.
var resourceMap *resource.Map
func init() {
resourceMap = &resource.Map{
Mapping: map[string]resource.Resource{
"heroku_app": resource.Resource{
ConfigValidator: resource_heroku_app_validation(),
Create: resource_heroku_app_create,
Destroy: resource_heroku_app_destroy,
Diff: resource_heroku_app_diff,
Refresh: resource_heroku_app_refresh,
Update: resource_heroku_app_update,
},
},
}
}

View File

@ -14,31 +14,31 @@ import (
type ResourceProvisioner struct{} type ResourceProvisioner struct{}
func (p *ResourceProvisioner) Apply(s *terraform.ResourceState, func (p *ResourceProvisioner) Apply(s *terraform.ResourceState,
c *terraform.ResourceConfig) (*terraform.ResourceState, error) { c *terraform.ResourceConfig) error {
// Ensure the connection type is SSH // Ensure the connection type is SSH
if err := helper.VerifySSH(s); err != nil { if err := helper.VerifySSH(s); err != nil {
return s, err return err
} }
// Get the SSH configuration // Get the SSH configuration
conf, err := helper.ParseSSHConfig(s) conf, err := helper.ParseSSHConfig(s)
if err != nil { if err != nil {
return s, err return err
} }
// Get the source and destination // Get the source and destination
sRaw := c.Config["source"] sRaw := c.Config["source"]
src, ok := sRaw.(string) src, ok := sRaw.(string)
if !ok { if !ok {
return s, fmt.Errorf("Unsupported 'source' type! Must be string.") return fmt.Errorf("Unsupported 'source' type! Must be string.")
} }
dRaw := c.Config["destination"] dRaw := c.Config["destination"]
dst, ok := dRaw.(string) dst, ok := dRaw.(string)
if !ok { if !ok {
return s, fmt.Errorf("Unsupported 'destination' type! Must be string.") return fmt.Errorf("Unsupported 'destination' type! Must be string.")
} }
return s, p.copyFiles(conf, src, dst) return p.copyFiles(conf, src, dst)
} }
func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) (ws []string, es []error) { func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) (ws []string, es []error) {

View File

@ -21,16 +21,16 @@ type ResourceProvisioner struct{}
func (p *ResourceProvisioner) Apply( func (p *ResourceProvisioner) Apply(
s *terraform.ResourceState, s *terraform.ResourceState,
c *terraform.ResourceConfig) (*terraform.ResourceState, error) { c *terraform.ResourceConfig) error {
// Get the command // Get the command
commandRaw, ok := c.Config["command"] commandRaw, ok := c.Config["command"]
if !ok { if !ok {
return s, fmt.Errorf("local-exec provisioner missing 'command'") return fmt.Errorf("local-exec provisioner missing 'command'")
} }
command, ok := commandRaw.(string) command, ok := commandRaw.(string)
if !ok { if !ok {
return s, fmt.Errorf("local-exec provisioner command must be a string") return fmt.Errorf("local-exec provisioner command must be a string")
} }
// Execute the command using a shell // Execute the command using a shell
@ -51,10 +51,10 @@ func (p *ResourceProvisioner) Apply(
// Run the command to completion // Run the command to completion
if err := cmd.Run(); err != nil { if err := cmd.Run(); err != nil {
return s, fmt.Errorf("Error running command '%s': %v. Output: %s", return fmt.Errorf("Error running command '%s': %v. Output: %s",
command, err, output.Bytes()) command, err, output.Bytes())
} }
return s, nil return nil
} }
func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) { func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) {

View File

@ -21,8 +21,7 @@ func TestResourceProvider_Apply(t *testing.T) {
}) })
p := new(ResourceProvisioner) p := new(ResourceProvisioner)
_, err := p.Apply(nil, c) if err := p.Apply(nil, c); err != nil {
if err != nil {
t.Fatalf("err: %v", err) t.Fatalf("err: %v", err)
} }

View File

@ -23,22 +23,22 @@ const (
type ResourceProvisioner struct{} type ResourceProvisioner struct{}
func (p *ResourceProvisioner) Apply(s *terraform.ResourceState, func (p *ResourceProvisioner) Apply(s *terraform.ResourceState,
c *terraform.ResourceConfig) (*terraform.ResourceState, error) { c *terraform.ResourceConfig) error {
// Ensure the connection type is SSH // Ensure the connection type is SSH
if err := helper.VerifySSH(s); err != nil { if err := helper.VerifySSH(s); err != nil {
return s, err return err
} }
// Get the SSH configuration // Get the SSH configuration
conf, err := helper.ParseSSHConfig(s) conf, err := helper.ParseSSHConfig(s)
if err != nil { if err != nil {
return s, err return err
} }
// Collect the scripts // Collect the scripts
scripts, err := p.collectScripts(c) scripts, err := p.collectScripts(c)
if err != nil { if err != nil {
return s, err return err
} }
for _, s := range scripts { for _, s := range scripts {
defer s.Close() defer s.Close()
@ -46,9 +46,9 @@ func (p *ResourceProvisioner) Apply(s *terraform.ResourceState,
// Copy and execute each script // Copy and execute each script
if err := p.runScripts(conf, scripts); err != nil { if err := p.runScripts(conf, scripts); err != nil {
return s, err return err
} }
return s, nil return nil
} }
func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) (ws []string, es []error) { func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) (ws []string, es []error) {

View File

@ -487,7 +487,7 @@ func TestApply_vars(t *testing.T) {
actual = v.(string) actual = v.(string)
} }
return nil, nil return &terraform.ResourceDiff{}, nil
} }
args := []string{ args := []string{
@ -530,7 +530,7 @@ func TestApply_varFile(t *testing.T) {
actual = v.(string) actual = v.(string)
} }
return nil, nil return &terraform.ResourceDiff{}, nil
} }
args := []string{ args := []string{

View File

@ -85,6 +85,7 @@ func testStateFile(t *testing.T, s *terraform.State) string {
func testProvider() *terraform.MockResourceProvider { func testProvider() *terraform.MockResourceProvider {
p := new(terraform.MockResourceProvider) p := new(terraform.MockResourceProvider)
p.DiffReturn = &terraform.ResourceDiff{}
p.RefreshFn = func( p.RefreshFn = func(
s *terraform.ResourceState) (*terraform.ResourceState, error) { s *terraform.ResourceState) (*terraform.ResourceState, error) {
return s, nil return s, nil

View File

@ -39,7 +39,14 @@ func FormatState(s *terraform.State, c *colorstring.Colorize) string {
id = "<not created>" id = "<not created>"
} }
buf.WriteString(fmt.Sprintf("%s:\n", k)) taintStr := ""
if s.Tainted != nil {
if _, ok := s.Tainted[id]; ok {
taintStr = " (tainted)"
}
}
buf.WriteString(fmt.Sprintf("%s:%s\n", k, taintStr))
buf.WriteString(fmt.Sprintf(" id = %s\n", id)) buf.WriteString(fmt.Sprintf(" id = %s\n", id))
// Sort the attributes // Sort the attributes

View File

@ -35,6 +35,7 @@ func init() {
BuiltinConfig.Providers = map[string]string{ BuiltinConfig.Providers = map[string]string{
"aws": "terraform-provider-aws", "aws": "terraform-provider-aws",
"digitalocean": "terraform-provider-digitalocean", "digitalocean": "terraform-provider-digitalocean",
"heroku": "terraform-provider-heroku",
} }
BuiltinConfig.Provisioners = map[string]string{ BuiltinConfig.Provisioners = map[string]string{
"local-exec": "terraform-provisioner-local-exec", "local-exec": "terraform-provisioner-local-exec",

58
config/append.go Normal file
View File

@ -0,0 +1,58 @@
package config
// Append appends one configuration to another.
//
// Append assumes that both configurations will not have
// conflicting variables, resources, etc. If they do, the
// problems will be caught in the validation phase.
//
// It is possible that c1, c2 on their own are not valid. For
// example, a resource in c2 may reference a variable in c1. But
// together, they would be valid.
func Append(c1, c2 *Config) (*Config, error) {
c := new(Config)
// Append unknown keys, but keep them unique since it is a set
unknowns := make(map[string]struct{})
for _, k := range c1.unknownKeys {
unknowns[k] = struct{}{}
}
for _, k := range c2.unknownKeys {
unknowns[k] = struct{}{}
}
for k, _ := range unknowns {
c.unknownKeys = append(c.unknownKeys, k)
}
if len(c1.Outputs) > 0 || len(c2.Outputs) > 0 {
c.Outputs = make(
[]*Output, 0, len(c1.Outputs)+len(c2.Outputs))
c.Outputs = append(c.Outputs, c1.Outputs...)
c.Outputs = append(c.Outputs, c2.Outputs...)
}
if len(c1.ProviderConfigs) > 0 || len(c2.ProviderConfigs) > 0 {
c.ProviderConfigs = make(
[]*ProviderConfig,
0, len(c1.ProviderConfigs)+len(c2.ProviderConfigs))
c.ProviderConfigs = append(c.ProviderConfigs, c1.ProviderConfigs...)
c.ProviderConfigs = append(c.ProviderConfigs, c2.ProviderConfigs...)
}
if len(c1.Resources) > 0 || len(c2.Resources) > 0 {
c.Resources = make(
[]*Resource,
0, len(c1.Resources)+len(c2.Resources))
c.Resources = append(c.Resources, c1.Resources...)
c.Resources = append(c.Resources, c2.Resources...)
}
if len(c1.Variables) > 0 || len(c2.Variables) > 0 {
c.Variables = make(
[]*Variable, 0, len(c1.Variables)+len(c2.Variables))
c.Variables = append(c.Variables, c1.Variables...)
c.Variables = append(c.Variables, c2.Variables...)
}
return c, nil
}

83
config/append_test.go Normal file
View File

@ -0,0 +1,83 @@
package config
import (
"reflect"
"testing"
)
func TestAppend(t *testing.T) {
cases := []struct {
c1, c2, result *Config
err bool
}{
{
&Config{
Outputs: []*Output{
&Output{Name: "foo"},
},
ProviderConfigs: []*ProviderConfig{
&ProviderConfig{Name: "foo"},
},
Resources: []*Resource{
&Resource{Name: "foo"},
},
Variables: []*Variable{
&Variable{Name: "foo"},
},
unknownKeys: []string{"foo"},
},
&Config{
Outputs: []*Output{
&Output{Name: "bar"},
},
ProviderConfigs: []*ProviderConfig{
&ProviderConfig{Name: "bar"},
},
Resources: []*Resource{
&Resource{Name: "bar"},
},
Variables: []*Variable{
&Variable{Name: "bar"},
},
unknownKeys: []string{"bar"},
},
&Config{
Outputs: []*Output{
&Output{Name: "foo"},
&Output{Name: "bar"},
},
ProviderConfigs: []*ProviderConfig{
&ProviderConfig{Name: "foo"},
&ProviderConfig{Name: "bar"},
},
Resources: []*Resource{
&Resource{Name: "foo"},
&Resource{Name: "bar"},
},
Variables: []*Variable{
&Variable{Name: "foo"},
&Variable{Name: "bar"},
},
unknownKeys: []string{"foo", "bar"},
},
false,
},
}
for i, tc := range cases {
actual, err := Append(tc.c1, tc.c2)
if (err != nil) != tc.err {
t.Fatalf("%d: error fail", i)
}
if !reflect.DeepEqual(actual, tc.result) {
t.Fatalf("%d: bad:\n\n%#v", i, actual)
}
}
}

View File

@ -4,19 +4,20 @@ package config
import ( import (
"fmt" "fmt"
"strconv"
"strings" "strings"
"github.com/hashicorp/terraform/flatmap"
"github.com/hashicorp/terraform/helper/multierror" "github.com/hashicorp/terraform/helper/multierror"
"github.com/mitchellh/mapstructure"
) )
// Config is the configuration that comes from loading a collection // Config is the configuration that comes from loading a collection
// of Terraform templates. // of Terraform templates.
type Config struct { type Config struct {
ProviderConfigs map[string]*ProviderConfig ProviderConfigs []*ProviderConfig
Resources []*Resource Resources []*Resource
Variables map[string]*Variable Variables []*Variable
Outputs map[string]*Output Outputs []*Output
// The fields below can be filled in by loaders for validation // The fields below can be filled in by loaders for validation
// purposes. // purposes.
@ -28,6 +29,7 @@ type Config struct {
// For example, Terraform needs to set the AWS access keys for the AWS // For example, Terraform needs to set the AWS access keys for the AWS
// resource provider. // resource provider.
type ProviderConfig struct { type ProviderConfig struct {
Name string
RawConfig *RawConfig RawConfig *RawConfig
} }
@ -40,6 +42,7 @@ type Resource struct {
Count int Count int
RawConfig *RawConfig RawConfig *RawConfig
Provisioners []*Provisioner Provisioners []*Provisioner
DependsOn []string
} }
// Provisioner is a configured provisioner step on a resource. // Provisioner is a configured provisioner step on a resource.
@ -51,9 +54,9 @@ type Provisioner struct {
// Variable is a variable defined within the configuration. // Variable is a variable defined within the configuration.
type Variable struct { type Variable struct {
Default string Name string
Default interface{}
Description string Description string
defaultSet bool
} }
// Output is an output defined within the configuration. An output is // Output is an output defined within the configuration. An output is
@ -63,44 +66,23 @@ type Output struct {
RawConfig *RawConfig RawConfig *RawConfig
} }
// An InterpolatedVariable is a variable that is embedded within a string // VariableType is the type of value a variable is holding, and returned
// in the configuration, such as "hello ${world}" (world in this case is // by the Type() function on variables.
// an interpolated variable). type VariableType byte
//
// These variables can come from a variety of sources, represented by
// implementations of this interface.
type InterpolatedVariable interface {
FullKey() string
}
// A ResourceVariable is a variable that is referencing the field const (
// of a resource, such as "${aws_instance.foo.ami}" VariableTypeUnknown VariableType = iota
type ResourceVariable struct { VariableTypeString
Type string // Resource type, i.e. "aws_instance" VariableTypeMap
Name string // Resource name )
Field string // Resource field
Multi bool // True if multi-variable: aws_instance.foo.*.id
Index int // Index for multi-variable: aws_instance.foo.1.id == 1
key string
}
// A UserVariable is a variable that is referencing a user variable
// that is inputted from outside the configuration. This looks like
// "${var.foo}"
type UserVariable struct {
Name string
key string
}
// ProviderConfigName returns the name of the provider configuration in // ProviderConfigName returns the name of the provider configuration in
// the given mapping that maps to the proper provider configuration // the given mapping that maps to the proper provider configuration
// for this resource. // for this resource.
func ProviderConfigName(t string, pcs map[string]*ProviderConfig) string { func ProviderConfigName(t string, pcs []*ProviderConfig) string {
lk := "" lk := ""
for k, _ := range pcs { for _, v := range pcs {
k := v.Name
if strings.HasPrefix(t, k) && len(k) > len(lk) { if strings.HasPrefix(t, k) && len(k) > len(lk) {
lk = k lk = k
} }
@ -124,6 +106,18 @@ func (c *Config) Validate() error {
} }
vars := c.allVariables() vars := c.allVariables()
varMap := make(map[string]*Variable)
for _, v := range c.Variables {
varMap[v.Name] = v
}
for _, v := range c.Variables {
if v.Type() == VariableTypeUnknown {
errs = append(errs, fmt.Errorf(
"Variable '%s': must be string or mapping",
v.Name))
}
}
// Check for references to user variables that do not actually // Check for references to user variables that do not actually
// exist and record those errors. // exist and record those errors.
@ -134,7 +128,7 @@ func (c *Config) Validate() error {
continue continue
} }
if _, ok := c.Variables[uv.Name]; !ok { if _, ok := varMap[uv.Name]; !ok {
errs = append(errs, fmt.Errorf( errs = append(errs, fmt.Errorf(
"%s: unknown variable referenced: %s", "%s: unknown variable referenced: %s",
source, source,
@ -161,6 +155,17 @@ func (c *Config) Validate() error {
} }
dupped = nil dupped = nil
// Make sure all dependsOn are valid in resources
for n, r := range resources {
for _, d := range r.DependsOn {
if _, ok := resources[d]; !ok {
errs = append(errs, fmt.Errorf(
"%s: resource depends on non-existent resource '%s'",
n, d))
}
}
}
for source, vs := range vars { for source, vs := range vars {
for _, v := range vs { for _, v := range vs {
rv, ok := v.(*ResourceVariable) rv, ok := v.(*ResourceVariable)
@ -220,8 +225,8 @@ func (c *Config) Validate() error {
// are valid in the Validate step. // are valid in the Validate step.
func (c *Config) allVariables() map[string][]InterpolatedVariable { func (c *Config) allVariables() map[string][]InterpolatedVariable {
result := make(map[string][]InterpolatedVariable) result := make(map[string][]InterpolatedVariable)
for n, pc := range c.ProviderConfigs { for _, pc := range c.ProviderConfigs {
source := fmt.Sprintf("provider config '%s'", n) source := fmt.Sprintf("provider config '%s'", pc.Name)
for _, v := range pc.RawConfig.Variables { for _, v := range pc.RawConfig.Variables {
result[source] = append(result[source], v) result[source] = append(result[source], v)
} }
@ -244,61 +249,127 @@ func (c *Config) allVariables() map[string][]InterpolatedVariable {
return result return result
} }
// Required tests whether a variable is required or not. func (o *Output) mergerName() string {
func (v *Variable) Required() bool { return o.Name
return !v.defaultSet
} }
func NewResourceVariable(key string) (*ResourceVariable, error) { func (o *Output) mergerMerge(m merger) merger {
parts := strings.SplitN(key, ".", 3) o2 := m.(*Output)
field := parts[2]
multi := false
var index int
if idx := strings.Index(field, "."); idx != -1 { result := *o
indexStr := field[:idx] result.Name = o2.Name
multi = indexStr == "*" result.RawConfig = result.RawConfig.merge(o2.RawConfig)
index = -1
if !multi { return &result
indexInt, err := strconv.ParseInt(indexStr, 0, 0) }
if err == nil {
multi = true
index = int(indexInt)
}
}
if multi { func (c *ProviderConfig) mergerName() string {
field = field[idx+1:] return c.Name
} }
func (c *ProviderConfig) mergerMerge(m merger) merger {
c2 := m.(*ProviderConfig)
result := *c
result.Name = c2.Name
result.RawConfig = result.RawConfig.merge(c2.RawConfig)
return &result
}
func (r *Resource) mergerName() string {
return fmt.Sprintf("%s.%s", r.Type, r.Name)
}
func (r *Resource) mergerMerge(m merger) merger {
r2 := m.(*Resource)
result := *r
result.Name = r2.Name
result.Type = r2.Type
result.RawConfig = result.RawConfig.merge(r2.RawConfig)
if r2.Count > 0 {
result.Count = r2.Count
} }
return &ResourceVariable{ if len(r2.Provisioners) > 0 {
Type: parts[0], result.Provisioners = r2.Provisioners
Name: parts[1], }
Field: field,
Multi: multi, return &result
Index: index,
key: key,
}, nil
} }
func (v *ResourceVariable) ResourceId() string { // DefaultsMap returns a map of default values for this variable.
return fmt.Sprintf("%s.%s", v.Type, v.Name) func (v *Variable) DefaultsMap() map[string]string {
if v.Default == nil {
return nil
}
n := fmt.Sprintf("var.%s", v.Name)
switch v.Type() {
case VariableTypeString:
return map[string]string{n: v.Default.(string)}
case VariableTypeMap:
result := flatmap.Flatten(map[string]interface{}{
n: v.Default.(map[string]string),
})
result[n] = v.Name
return result
default:
return nil
}
} }
func (v *ResourceVariable) FullKey() string { // Merge merges two variables to create a new third variable.
return v.key func (v *Variable) Merge(v2 *Variable) *Variable {
// Shallow copy the variable
result := *v
// The names should be the same, but the second name always wins.
result.Name = v2.Name
if v2.Default != nil {
result.Default = v2.Default
}
if v2.Description != "" {
result.Description = v2.Description
}
return &result
} }
func NewUserVariable(key string) (*UserVariable, error) { // Type returns the type of varialbe this is.
name := key[len("var."):] func (v *Variable) Type() VariableType {
return &UserVariable{ if v.Default == nil {
key: key, return VariableTypeString
Name: name, }
}, nil
var strVal string
if err := mapstructure.WeakDecode(v.Default, &strVal); err == nil {
v.Default = strVal
return VariableTypeString
}
var m map[string]string
if err := mapstructure.WeakDecode(v.Default, &m); err == nil {
v.Default = m
return VariableTypeMap
}
return VariableTypeUnknown
} }
func (v *UserVariable) FullKey() string { func (v *Variable) mergerName() string {
return v.key return v.Name
}
func (v *Variable) mergerMerge(m merger) merger {
return v.Merge(m.(*Variable))
}
// Required tests whether a variable is required or not.
func (v *Variable) Required() bool {
return v.Default == nil
} }

View File

@ -2,6 +2,7 @@ package config
import ( import (
"path/filepath" "path/filepath"
"reflect"
"testing" "testing"
) )
@ -15,6 +16,13 @@ func TestConfigValidate(t *testing.T) {
} }
} }
func TestConfigValidate_badDependsOn(t *testing.T) {
c := testConfig(t, "validate-bad-depends-on")
if err := c.Validate(); err == nil {
t.Fatal("should not be valid")
}
}
func TestConfigValidate_badMultiResource(t *testing.T) { func TestConfigValidate_badMultiResource(t *testing.T) {
c := testConfig(t, "validate-bad-multi-resource") c := testConfig(t, "validate-bad-multi-resource")
if err := c.Validate(); err == nil { if err := c.Validate(); err == nil {
@ -64,98 +72,26 @@ func TestConfigValidate_unknownVar(t *testing.T) {
} }
} }
func TestNewResourceVariable(t *testing.T) { func TestConfigValidate_varDefault(t *testing.T) {
v, err := NewResourceVariable("foo.bar.baz") c := testConfig(t, "validate-var-default")
if err != nil { if err := c.Validate(); err != nil {
t.Fatalf("err: %s", err) t.Fatalf("should be valid: %s", err)
}
if v.Type != "foo" {
t.Fatalf("bad: %#v", v)
}
if v.Name != "bar" {
t.Fatalf("bad: %#v", v)
}
if v.Field != "baz" {
t.Fatalf("bad: %#v", v)
}
if v.Multi {
t.Fatal("should not be multi")
}
if v.FullKey() != "foo.bar.baz" {
t.Fatalf("bad: %#v", v)
} }
} }
func TestResourceVariable_Multi(t *testing.T) { func TestConfigValidate_varDefaultBadType(t *testing.T) {
v, err := NewResourceVariable("foo.bar.*.baz") c := testConfig(t, "validate-var-default-bad-type")
if err != nil { if err := c.Validate(); err == nil {
t.Fatalf("err: %s", err) t.Fatal("should not be valid")
}
if v.Type != "foo" {
t.Fatalf("bad: %#v", v)
}
if v.Name != "bar" {
t.Fatalf("bad: %#v", v)
}
if v.Field != "baz" {
t.Fatalf("bad: %#v", v)
}
if !v.Multi {
t.Fatal("should be multi")
}
}
func TestResourceVariable_MultiIndex(t *testing.T) {
cases := []struct {
Input string
Index int
Field string
}{
{"foo.bar.*.baz", -1, "baz"},
{"foo.bar.0.baz", 0, "baz"},
{"foo.bar.5.baz", 5, "baz"},
}
for _, tc := range cases {
v, err := NewResourceVariable(tc.Input)
if err != nil {
t.Fatalf("err: %s", err)
}
if !v.Multi {
t.Fatalf("should be multi: %s", tc.Input)
}
if v.Index != tc.Index {
t.Fatalf("bad: %d\n\n%s", v.Index, tc.Input)
}
if v.Field != tc.Field {
t.Fatalf("bad: %s\n\n%s", v.Field, tc.Input)
}
}
}
func TestNewUserVariable(t *testing.T) {
v, err := NewUserVariable("var.bar")
if err != nil {
t.Fatalf("err: %s", err)
}
if v.Name != "bar" {
t.Fatalf("bad: %#v", v.Name)
}
if v.FullKey() != "var.bar" {
t.Fatalf("bad: %#v", v)
} }
} }
func TestProviderConfigName(t *testing.T) { func TestProviderConfigName(t *testing.T) {
pcs := map[string]*ProviderConfig{ pcs := []*ProviderConfig{
"aw": new(ProviderConfig), &ProviderConfig{Name: "aw"},
"aws": new(ProviderConfig), &ProviderConfig{Name: "aws"},
"a": new(ProviderConfig), &ProviderConfig{Name: "a"},
"gce_": new(ProviderConfig), &ProviderConfig{Name: "gce_"},
} }
n := ProviderConfigName("aws_instance", pcs) n := ProviderConfigName("aws_instance", pcs)
@ -164,6 +100,43 @@ func TestProviderConfigName(t *testing.T) {
} }
} }
func TestVariableDefaultsMap(t *testing.T) {
cases := []struct {
Default interface{}
Output map[string]string
}{
{
nil,
nil,
},
{
"foo",
map[string]string{"var.foo": "foo"},
},
{
map[interface{}]interface{}{
"foo": "bar",
"bar": "baz",
},
map[string]string{
"var.foo": "foo",
"var.foo.foo": "bar",
"var.foo.bar": "baz",
},
},
}
for i, tc := range cases {
v := &Variable{Name: "foo", Default: tc.Default}
actual := v.DefaultsMap()
if !reflect.DeepEqual(actual, tc.Output) {
t.Fatalf("%d: bad: %#v", i, actual)
}
}
}
func testConfig(t *testing.T, name string) *Config { func testConfig(t *testing.T, name string) *Config {
c, err := Load(filepath.Join(fixtureDir, name, "main.tf")) c, err := Load(filepath.Join(fixtureDir, name, "main.tf"))
if err != nil { if err != nil {

91
config/expr.y Normal file
View File

@ -0,0 +1,91 @@
// This is the yacc input for creating the parser for interpolation
// expressions in Go.
// To build it:
//
// go tool yacc -p "expr" expr.y (produces y.go)
//
%{
package config
import (
"fmt"
)
%}
%union {
expr Interpolation
str string
variable InterpolatedVariable
args []Interpolation
}
%type <args> args
%type <expr> expr
%type <str> string
%type <variable> variable
%token <str> STRING IDENTIFIER
%token <str> COMMA LEFTPAREN RIGHTPAREN
%%
top:
expr
{
exprResult = $1
}
expr:
string
{
$$ = &LiteralInterpolation{Literal: $1}
}
| variable
{
$$ = &VariableInterpolation{Variable: $1}
}
| IDENTIFIER LEFTPAREN args RIGHTPAREN
{
f, ok := Funcs[$1]
if !ok {
exprErrors = append(exprErrors, fmt.Errorf(
"Unknown function: %s", $1))
}
$$ = &FunctionInterpolation{Func: f, Args: $3}
}
args:
{
$$ = nil
}
| expr COMMA expr
{
$$ = append($$, $1, $3)
}
| expr
{
$$ = append($$, $1)
}
string:
STRING
{
$$ = $1
}
variable:
IDENTIFIER
{
var err error
$$, err = NewInterpolatedVariable($1)
if err != nil {
exprErrors = append(exprErrors, fmt.Errorf(
"Error parsing variable '%s': %s", $1, err))
}
}
%%

131
config/expr_lex.go Normal file
View File

@ -0,0 +1,131 @@
package config
import (
"bytes"
"log"
"unicode"
"unicode/utf8"
)
// The parser expects the lexer to return 0 on EOF.
const lexEOF = 0
// The parser uses the type <prefix>Lex as a lexer. It must provide
// the methods Lex(*<prefix>SymType) int and Error(string).
type exprLex struct {
input string
pos int
width int
}
// The parser calls this method to get each new token.
func (x *exprLex) Lex(yylval *exprSymType) int {
for {
c := x.next()
if c == lexEOF {
return lexEOF
}
// Ignore all whitespace
if unicode.IsSpace(c) {
continue
}
switch c {
case '"':
return x.lexString(yylval)
case ',':
return COMMA
case '(':
return LEFTPAREN
case ')':
return RIGHTPAREN
default:
x.backup()
return x.lexId(yylval)
}
}
}
func (x *exprLex) lexId(yylval *exprSymType) int {
var b bytes.Buffer
for {
c := x.next()
if c == lexEOF {
break
}
// If this isn't a character we want in an ID, return out.
// One day we should make this a regexp.
if c != '_' &&
c != '-' &&
c != '.' &&
c != '*' &&
!unicode.IsLetter(c) &&
!unicode.IsNumber(c) {
x.backup()
break
}
if _, err := b.WriteRune(c); err != nil {
log.Printf("ERR: %s", err)
return lexEOF
}
}
yylval.str = b.String()
return IDENTIFIER
}
func (x *exprLex) lexString(yylval *exprSymType) int {
var b bytes.Buffer
for {
c := x.next()
if c == lexEOF {
break
}
// String end
if c == '"' {
break
}
if _, err := b.WriteRune(c); err != nil {
log.Printf("ERR: %s", err)
return lexEOF
}
}
yylval.str = b.String()
return STRING
}
// Return the next rune for the lexer.
func (x *exprLex) next() rune {
if int(x.pos) >= len(x.input) {
x.width = 0
return lexEOF
}
r, w := utf8.DecodeRuneInString(x.input[x.pos:])
x.width = w
x.pos += x.width
return r
}
// peek returns but does not consume the next rune in the input
func (x *exprLex) peek() rune {
r := x.next()
x.backup()
return r
}
// backup steps back one rune. Can only be called once per next.
func (x *exprLex) backup() {
x.pos -= x.width
}
// The parser calls this method on a parse error.
func (x *exprLex) Error(s string) {
log.Printf("parse error: %s", s)
}

34
config/expr_parse.go Normal file
View File

@ -0,0 +1,34 @@
package config
import (
"sync"
"github.com/hashicorp/terraform/helper/multierror"
)
// exprErrors are the errors built up from parsing. These should not
// be accessed directly.
var exprErrors []error
var exprLock sync.Mutex
var exprResult Interpolation
// ExprParse parses the given expression and returns an executable
// Interpolation.
func ExprParse(v string) (Interpolation, error) {
exprLock.Lock()
defer exprLock.Unlock()
exprErrors = nil
exprResult = nil
// Parse
exprParse(&exprLex{input: v})
// Build up the errors
var err error
if len(exprErrors) > 0 {
err = &multierror.Error{Errors: exprErrors}
exprResult = nil
}
return exprResult, err
}

123
config/expr_parse_test.go Normal file
View File

@ -0,0 +1,123 @@
package config
import (
"reflect"
"testing"
)
func TestExprParse(t *testing.T) {
cases := []struct {
Input string
Result Interpolation
Error bool
}{
{
"foo",
nil,
true,
},
{
`"foo"`,
&LiteralInterpolation{Literal: "foo"},
false,
},
{
"var.foo",
&VariableInterpolation{
Variable: &UserVariable{
Name: "foo",
key: "var.foo",
},
},
false,
},
{
"lookup(var.foo, var.bar)",
&FunctionInterpolation{
Func: nil, // Funcs["lookup"]
Args: []Interpolation{
&VariableInterpolation{
Variable: &UserVariable{
Name: "foo",
key: "var.foo",
},
},
&VariableInterpolation{
Variable: &UserVariable{
Name: "bar",
key: "var.bar",
},
},
},
},
false,
},
{
"lookup(var.foo, lookup(var.baz, var.bar))",
&FunctionInterpolation{
Func: nil, // Funcs["lookup"]
Args: []Interpolation{
&VariableInterpolation{
Variable: &UserVariable{
Name: "foo",
key: "var.foo",
},
},
&FunctionInterpolation{
Func: nil, // Funcs["lookup"]
Args: []Interpolation{
&VariableInterpolation{
Variable: &UserVariable{
Name: "baz",
key: "var.baz",
},
},
&VariableInterpolation{
Variable: &UserVariable{
Name: "bar",
key: "var.bar",
},
},
},
},
},
},
false,
},
}
for i, tc := range cases {
actual, err := ExprParse(tc.Input)
if (err != nil) != tc.Error {
t.Fatalf("%d. Error: %s", i, err)
}
// This is jank, but reflect.DeepEqual never has functions
// being the same.
f, ok := actual.(*FunctionInterpolation)
if ok {
fs := make([]*FunctionInterpolation, 1)
fs[0] = f
for len(fs) > 0 {
f := fs[0]
fs = fs[1:]
f.Func = nil
for _, a := range f.Args {
f, ok := a.(*FunctionInterpolation)
if ok {
fs = append(fs, f)
}
}
}
}
if !reflect.DeepEqual(actual, tc.Result) {
t.Fatalf("%d bad: %#v", i, actual)
}
}
}

View File

@ -3,7 +3,6 @@ package config
import ( import (
"fmt" "fmt"
"io" "io"
"strings"
) )
// configurable is an interface that must be implemented by any configuration // configurable is an interface that must be implemented by any configuration
@ -33,11 +32,15 @@ type fileLoaderFunc func(path string) (configurable, []string, error)
// executes the proper fileLoaderFunc. // executes the proper fileLoaderFunc.
func loadTree(root string) (*importTree, error) { func loadTree(root string) (*importTree, error) {
var f fileLoaderFunc var f fileLoaderFunc
if strings.HasSuffix(root, ".tf") { switch ext(root) {
case ".tf":
fallthrough
case ".tf.json":
f = loadFileLibucl f = loadFileLibucl
} else if strings.HasSuffix(root, ".tf.json") { default:
f = loadFileLibucl }
} else {
if f == nil {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"%s: unknown configuration format. Use '.tf' or '.tf.json' extension", "%s: unknown configuration format. Use '.tf' or '.tf.json' extension",
root) root)

215
config/interpolate.go Normal file
View File

@ -0,0 +1,215 @@
package config
import (
"fmt"
"regexp"
"strconv"
"strings"
)
// We really need to replace this with a real parser.
var funcRegexp *regexp.Regexp = regexp.MustCompile(
`(?i)([a-z0-9_]+)\(\s*(?:([.a-z0-9_]+)\s*,\s*)*([.a-z0-9_]+)\s*\)`)
// Interpolation is something that can be contained in a "${}" in a
// configuration value.
//
// Interpolations might be simple variable references, or it might be
// function calls, or even nested function calls.
type Interpolation interface {
Interpolate(map[string]string) (string, error)
Variables() map[string]InterpolatedVariable
}
// InterpolationFunc is the function signature for implementing
// callable functions in Terraform configurations.
type InterpolationFunc func(map[string]string, ...string) (string, error)
// An InterpolatedVariable is a variable reference within an interpolation.
//
// Implementations of this interface represents various sources where
// variables can come from: user variables, resources, etc.
type InterpolatedVariable interface {
FullKey() string
}
// FunctionInterpolation is an Interpolation that executes a function
// with some variable number of arguments to generate a value.
type FunctionInterpolation struct {
Func InterpolationFunc
Args []Interpolation
}
// LiteralInterpolation implements Interpolation for literals. Ex:
// ${"foo"} will equal "foo".
type LiteralInterpolation struct {
Literal string
}
// VariableInterpolation implements Interpolation for simple variable
// interpolation. Ex: "${var.foo}" or "${aws_instance.foo.bar}"
type VariableInterpolation struct {
Variable InterpolatedVariable
}
// A ResourceVariable is a variable that is referencing the field
// of a resource, such as "${aws_instance.foo.ami}"
type ResourceVariable struct {
Type string // Resource type, i.e. "aws_instance"
Name string // Resource name
Field string // Resource field
Multi bool // True if multi-variable: aws_instance.foo.*.id
Index int // Index for multi-variable: aws_instance.foo.1.id == 1
key string
}
// A UserVariable is a variable that is referencing a user variable
// that is inputted from outside the configuration. This looks like
// "${var.foo}"
type UserVariable struct {
Name string
Elem string
key string
}
func NewInterpolatedVariable(v string) (InterpolatedVariable, error) {
if !strings.HasPrefix(v, "var.") {
return NewResourceVariable(v)
}
return NewUserVariable(v)
}
func (i *FunctionInterpolation) Interpolate(
vs map[string]string) (string, error) {
args := make([]string, len(i.Args))
for idx, a := range i.Args {
v, err := a.Interpolate(vs)
if err != nil {
return "", err
}
args[idx] = v
}
return i.Func(vs, args...)
}
func (i *FunctionInterpolation) GoString() string {
return fmt.Sprintf("*%#v", *i)
}
func (i *FunctionInterpolation) Variables() map[string]InterpolatedVariable {
result := make(map[string]InterpolatedVariable)
for _, a := range i.Args {
for k, v := range a.Variables() {
result[k] = v
}
}
return result
}
func (i *LiteralInterpolation) Interpolate(
map[string]string) (string, error) {
return i.Literal, nil
}
func (i *LiteralInterpolation) Variables() map[string]InterpolatedVariable {
return nil
}
func (i *VariableInterpolation) Interpolate(
vs map[string]string) (string, error) {
v, ok := vs[i.Variable.FullKey()]
if !ok {
return "", fmt.Errorf(
"%s: value for variable not found",
i.Variable.FullKey())
}
return v, nil
}
func (i *VariableInterpolation) GoString() string {
return fmt.Sprintf("*%#v", *i)
}
func (i *VariableInterpolation) Variables() map[string]InterpolatedVariable {
return map[string]InterpolatedVariable{i.Variable.FullKey(): i.Variable}
}
func NewResourceVariable(key string) (*ResourceVariable, error) {
parts := strings.SplitN(key, ".", 3)
if len(parts) < 3 {
return nil, fmt.Errorf(
"%s: resource variables must be three parts: type.name.attr",
key)
}
field := parts[2]
multi := false
var index int
if idx := strings.Index(field, "."); idx != -1 {
indexStr := field[:idx]
multi = indexStr == "*"
index = -1
if !multi {
indexInt, err := strconv.ParseInt(indexStr, 0, 0)
if err == nil {
multi = true
index = int(indexInt)
}
}
if multi {
field = field[idx+1:]
}
}
return &ResourceVariable{
Type: parts[0],
Name: parts[1],
Field: field,
Multi: multi,
Index: index,
key: key,
}, nil
}
func (v *ResourceVariable) ResourceId() string {
return fmt.Sprintf("%s.%s", v.Type, v.Name)
}
func (v *ResourceVariable) FullKey() string {
return v.key
}
func NewUserVariable(key string) (*UserVariable, error) {
name := key[len("var."):]
elem := ""
if idx := strings.Index(name, "."); idx > -1 {
elem = name[idx+1:]
name = name[:idx]
}
return &UserVariable{
key: key,
Name: name,
Elem: elem,
}, nil
}
func (v *UserVariable) FullKey() string {
return v.key
}
func (v *UserVariable) GoString() string {
return fmt.Sprintf("*%#v", *v)
}

View File

@ -0,0 +1,54 @@
package config
import (
"fmt"
"io/ioutil"
"strings"
)
// Funcs is the mapping of built-in functions for configuration.
var Funcs map[string]InterpolationFunc
func init() {
Funcs = map[string]InterpolationFunc{
"file": interpolationFuncFile,
"lookup": interpolationFuncLookup,
}
}
// interpolationFuncFile implements the "file" function that allows
// loading contents from a file.
func interpolationFuncFile(
vs map[string]string, args ...string) (string, error) {
if len(args) != 1 {
return "", fmt.Errorf(
"file expects 1 arguments, got %d", len(args))
}
data, err := ioutil.ReadFile(args[0])
if err != nil {
return "", err
}
return string(data), nil
}
// interpolationFuncLookup implements the "lookup" function that allows
// dynamic lookups of map types within a Terraform configuration.
func interpolationFuncLookup(
vs map[string]string, args ...string) (string, error) {
if len(args) != 2 {
return "", fmt.Errorf(
"lookup expects 2 arguments, got %d", len(args))
}
k := fmt.Sprintf("var.%s", strings.Join(args, "."))
v, ok := vs[k]
if !ok {
return "", fmt.Errorf(
"lookup in '%s' failed to find '%s'",
args[0], args[1])
}
return v, nil
}

View File

@ -0,0 +1,104 @@
package config
import (
"io/ioutil"
"os"
"testing"
)
func TestInterpolateFuncFile(t *testing.T) {
tf, err := ioutil.TempFile("", "tf")
if err != nil {
t.Fatalf("err: %s", err)
}
path := tf.Name()
tf.Write([]byte("foo"))
tf.Close()
defer os.Remove(path)
cases := []struct {
Args []string
Result string
Error bool
}{
{
[]string{path},
"foo",
false,
},
// Invalid path
{
[]string{"/i/dont/exist"},
"",
true,
},
// Too many args
{
[]string{"foo", "bar"},
"",
true,
},
}
for i, tc := range cases {
actual, err := interpolationFuncFile(nil, tc.Args...)
if (err != nil) != tc.Error {
t.Fatalf("%d: err: %s", i, err)
}
if actual != tc.Result {
t.Fatalf("%d: bad: %#v", i, actual)
}
}
}
func TestInterpolateFuncLookup(t *testing.T) {
cases := []struct {
M map[string]string
Args []string
Result string
Error bool
}{
{
map[string]string{
"var.foo.bar": "baz",
},
[]string{"foo", "bar"},
"baz",
false,
},
// Invalid key
{
map[string]string{
"var.foo.bar": "baz",
},
[]string{"foo", "baz"},
"",
true,
},
// Too many args
{
map[string]string{
"var.foo.bar": "baz",
},
[]string{"foo", "bar", "baz"},
"",
true,
},
}
for i, tc := range cases {
actual, err := interpolationFuncLookup(tc.M, tc.Args...)
if (err != nil) != tc.Error {
t.Fatalf("%d: err: %s", i, err)
}
if actual != tc.Result {
t.Fatalf("%d: bad: %#v", i, actual)
}
}
}

260
config/interpolate_test.go Normal file
View File

@ -0,0 +1,260 @@
package config
import (
"reflect"
"strings"
"testing"
)
func TestNewInterpolatedVariable(t *testing.T) {
cases := []struct {
Input string
Result InterpolatedVariable
Error bool
}{
{
"var.foo",
&UserVariable{
Name: "foo",
key: "var.foo",
},
false,
},
}
for i, tc := range cases {
actual, err := NewInterpolatedVariable(tc.Input)
if (err != nil) != tc.Error {
t.Fatalf("%d. Error: %s", i, err)
}
if !reflect.DeepEqual(actual, tc.Result) {
t.Fatalf("%d bad: %#v", i, actual)
}
}
}
func TestNewResourceVariable(t *testing.T) {
v, err := NewResourceVariable("foo.bar.baz")
if err != nil {
t.Fatalf("err: %s", err)
}
if v.Type != "foo" {
t.Fatalf("bad: %#v", v)
}
if v.Name != "bar" {
t.Fatalf("bad: %#v", v)
}
if v.Field != "baz" {
t.Fatalf("bad: %#v", v)
}
if v.Multi {
t.Fatal("should not be multi")
}
if v.FullKey() != "foo.bar.baz" {
t.Fatalf("bad: %#v", v)
}
}
func TestNewUserVariable(t *testing.T) {
v, err := NewUserVariable("var.bar")
if err != nil {
t.Fatalf("err: %s", err)
}
if v.Name != "bar" {
t.Fatalf("bad: %#v", v.Name)
}
if v.FullKey() != "var.bar" {
t.Fatalf("bad: %#v", v)
}
}
func TestNewUserVariable_map(t *testing.T) {
v, err := NewUserVariable("var.bar.baz")
if err != nil {
t.Fatalf("err: %s", err)
}
if v.Name != "bar" {
t.Fatalf("bad: %#v", v.Name)
}
if v.Elem != "baz" {
t.Fatalf("bad: %#v", v.Elem)
}
if v.FullKey() != "var.bar.baz" {
t.Fatalf("bad: %#v", v)
}
}
func TestFunctionInterpolation_impl(t *testing.T) {
var _ Interpolation = new(FunctionInterpolation)
}
func TestFunctionInterpolation(t *testing.T) {
v1, err := NewInterpolatedVariable("var.foo")
if err != nil {
t.Fatalf("err: %s", err)
}
v2, err := NewInterpolatedVariable("var.bar")
if err != nil {
t.Fatalf("err: %s", err)
}
fn := func(vs map[string]string, args ...string) (string, error) {
return strings.Join(args, " "), nil
}
i := &FunctionInterpolation{
Func: fn,
Args: []Interpolation{
&VariableInterpolation{Variable: v1},
&VariableInterpolation{Variable: v2},
},
}
expected := map[string]InterpolatedVariable{
"var.foo": v1,
"var.bar": v2,
}
if !reflect.DeepEqual(i.Variables(), expected) {
t.Fatalf("bad: %#v", i.Variables())
}
actual, err := i.Interpolate(map[string]string{
"var.foo": "bar",
"var.bar": "baz",
})
if err != nil {
t.Fatalf("err: %s", err)
}
if actual != "bar baz" {
t.Fatalf("bad: %#v", actual)
}
}
func TestLiteralInterpolation_impl(t *testing.T) {
var _ Interpolation = new(LiteralInterpolation)
}
func TestLiteralInterpolation(t *testing.T) {
i := &LiteralInterpolation{
Literal: "bar",
}
if i.Variables() != nil {
t.Fatalf("bad: %#v", i.Variables())
}
actual, err := i.Interpolate(nil)
if err != nil {
t.Fatalf("err: %s", err)
}
if actual != "bar" {
t.Fatalf("bad: %#v", actual)
}
}
func TestResourceVariable_impl(t *testing.T) {
var _ InterpolatedVariable = new(ResourceVariable)
}
func TestResourceVariable_Multi(t *testing.T) {
v, err := NewResourceVariable("foo.bar.*.baz")
if err != nil {
t.Fatalf("err: %s", err)
}
if v.Type != "foo" {
t.Fatalf("bad: %#v", v)
}
if v.Name != "bar" {
t.Fatalf("bad: %#v", v)
}
if v.Field != "baz" {
t.Fatalf("bad: %#v", v)
}
if !v.Multi {
t.Fatal("should be multi")
}
}
func TestResourceVariable_MultiIndex(t *testing.T) {
cases := []struct {
Input string
Index int
Field string
}{
{"foo.bar.*.baz", -1, "baz"},
{"foo.bar.0.baz", 0, "baz"},
{"foo.bar.5.baz", 5, "baz"},
}
for _, tc := range cases {
v, err := NewResourceVariable(tc.Input)
if err != nil {
t.Fatalf("err: %s", err)
}
if !v.Multi {
t.Fatalf("should be multi: %s", tc.Input)
}
if v.Index != tc.Index {
t.Fatalf("bad: %d\n\n%s", v.Index, tc.Input)
}
if v.Field != tc.Field {
t.Fatalf("bad: %s\n\n%s", v.Field, tc.Input)
}
}
}
func TestUserVariable_impl(t *testing.T) {
var _ InterpolatedVariable = new(UserVariable)
}
func TestVariableInterpolation_impl(t *testing.T) {
var _ Interpolation = new(VariableInterpolation)
}
func TestVariableInterpolation(t *testing.T) {
uv, err := NewUserVariable("var.foo")
if err != nil {
t.Fatalf("err: %s", err)
}
i := &VariableInterpolation{Variable: uv}
expected := map[string]InterpolatedVariable{"var.foo": uv}
if !reflect.DeepEqual(i.Variables(), expected) {
t.Fatalf("bad: %#v", i.Variables())
}
actual, err := i.Interpolate(map[string]string{
"var.foo": "bar",
})
if err != nil {
t.Fatalf("err: %s", err)
}
if actual != "bar" {
t.Fatalf("bad: %#v", actual)
}
}
func TestVariableInterpolation_missing(t *testing.T) {
uv, err := NewUserVariable("var.foo")
if err != nil {
t.Fatalf("err: %s", err)
}
i := &VariableInterpolation{Variable: uv}
_, err = i.Interpolate(map[string]string{
"var.bar": "bar",
})
if err == nil {
t.Fatal("should error")
}
}

155
config/interpolate_walk.go Normal file
View File

@ -0,0 +1,155 @@
package config
import (
"fmt"
"reflect"
"regexp"
"strings"
"github.com/mitchellh/reflectwalk"
)
// interpRegexp is a regexp that matches interpolations such as ${foo.bar}
var interpRegexp *regexp.Regexp = regexp.MustCompile(
`(?i)(\$+)\{([\s*-.,\(\)a-z0-9_]+)\}`)
// interpolationWalker implements interfaces for the reflectwalk package
// (github.com/mitchellh/reflectwalk) that can be used to automatically
// execute a callback for an interpolation.
type interpolationWalker struct {
F interpolationWalkerFunc
Replace bool
key []string
loc reflectwalk.Location
cs []reflect.Value
csData interface{}
unknownKeys []string
}
// interpolationWalkerFunc is the callback called by interpolationWalk.
// It is called with any interpolation found. It should return a value
// to replace the interpolation with, along with any errors.
//
// If Replace is set to false in interpolationWalker, then the replace
// value can be anything as it will have no effect.
type interpolationWalkerFunc func(Interpolation) (string, error)
func (w *interpolationWalker) Enter(loc reflectwalk.Location) error {
w.loc = loc
return nil
}
func (w *interpolationWalker) Exit(loc reflectwalk.Location) error {
w.loc = reflectwalk.None
switch loc {
case reflectwalk.Map:
w.cs = w.cs[:len(w.cs)-1]
case reflectwalk.MapValue:
w.key = w.key[:len(w.key)-1]
}
return nil
}
func (w *interpolationWalker) Map(m reflect.Value) error {
w.cs = append(w.cs, m)
return nil
}
func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error {
w.csData = k
w.key = append(w.key, k.String())
return nil
}
func (w *interpolationWalker) Primitive(v reflect.Value) error {
setV := v
// We only care about strings
if v.Kind() == reflect.Interface {
setV = v
v = v.Elem()
}
if v.Kind() != reflect.String {
return nil
}
// XXX: This can be a lot more efficient if we used a real
// parser. A regexp is a hammer though that will get this working.
matches := interpRegexp.FindAllStringSubmatch(v.String(), -1)
if len(matches) == 0 {
return nil
}
result := v.String()
for _, match := range matches {
dollars := len(match[1])
// If there are even amounts of dollar signs, then it is escaped
if dollars%2 == 0 {
continue
}
// Interpolation found, instantiate it
key := match[2]
i, err := ExprParse(key)
if err != nil {
return err
}
replaceVal, err := w.F(i)
if err != nil {
return fmt.Errorf(
"%s: %s",
key,
err)
}
if w.Replace {
// If this is an unknown variable, then we remove it from
// the configuration.
if replaceVal == UnknownVariableValue {
w.removeCurrent()
return nil
}
result = strings.Replace(result, match[0], replaceVal, -1)
}
}
if w.Replace {
resultVal := reflect.ValueOf(result)
if w.loc == reflectwalk.MapValue {
// If we're in a map, then the only way to set a map value is
// to set it directly.
m := w.cs[len(w.cs)-1]
mk := w.csData.(reflect.Value)
m.SetMapIndex(mk, resultVal)
} else {
// Otherwise, we should be addressable
setV.Set(resultVal)
}
}
return nil
}
func (w *interpolationWalker) removeCurrent() {
c := w.cs[len(w.cs)-1]
switch c.Kind() {
case reflect.Map:
// Zero value so that we delete the map key
var val reflect.Value
// Get the key and delete it
k := w.csData.(reflect.Value)
c.SetMapIndex(k, val)
}
// Append the key to the unknown keys
w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, "."))
}

View File

@ -0,0 +1,121 @@
package config
import (
"reflect"
"testing"
"github.com/mitchellh/reflectwalk"
)
func TestInterpolationWalker_detect(t *testing.T) {
cases := []struct {
Input interface{}
Result []Interpolation
}{
{
Input: map[string]interface{}{
"foo": "$${var.foo}",
},
Result: nil,
},
{
Input: map[string]interface{}{
"foo": "${var.foo}",
},
Result: []Interpolation{
&VariableInterpolation{
Variable: &UserVariable{
Name: "foo",
key: "var.foo",
},
},
},
},
{
Input: map[string]interface{}{
"foo": "${lookup(var.foo)}",
},
Result: []Interpolation{
&FunctionInterpolation{
Func: nil,
Args: []Interpolation{
&VariableInterpolation{
Variable: &UserVariable{
Name: "foo",
key: "var.foo",
},
},
},
},
},
},
}
for i, tc := range cases {
var actual []Interpolation
detectFn := func(i Interpolation) (string, error) {
actual = append(actual, i)
return "", nil
}
w := &interpolationWalker{F: detectFn}
if err := reflectwalk.Walk(tc.Input, w); err != nil {
t.Fatalf("err: %s", err)
}
for _, a := range actual {
// This is jank, but reflect.DeepEqual never has functions
// being the same.
if f, ok := a.(*FunctionInterpolation); ok {
f.Func = nil
}
}
if !reflect.DeepEqual(actual, tc.Result) {
t.Fatalf("%d: bad:\n\n%#v", i, actual)
}
}
}
func TestInterpolationWalker_replace(t *testing.T) {
cases := []struct {
Input interface{}
Output interface{}
}{
{
Input: map[string]interface{}{
"foo": "$${var.foo}",
},
Output: map[string]interface{}{
"foo": "$${var.foo}",
},
},
{
Input: map[string]interface{}{
"foo": "hello, ${var.foo}",
},
Output: map[string]interface{}{
"foo": "hello, bar",
},
},
}
for i, tc := range cases {
fn := func(i Interpolation) (string, error) {
return "bar", nil
}
w := &interpolationWalker{F: fn, Replace: true}
if err := reflectwalk.Walk(tc.Input, w); err != nil {
t.Fatalf("err: %s", err)
}
if !reflect.DeepEqual(tc.Input, tc.Output) {
t.Fatalf("%d: bad:\n\n%#v", i, tc.Input)
}
}
}

View File

@ -2,7 +2,11 @@ package config
import ( import (
"fmt" "fmt"
"io"
"os"
"path/filepath" "path/filepath"
"sort"
"strings"
) )
// Load loads the Terraform configuration from a given file. // Load loads the Terraform configuration from a given file.
@ -29,28 +33,82 @@ func Load(path string) (*Config, error) {
} }
// LoadDir loads all the Terraform configuration files in a single // LoadDir loads all the Terraform configuration files in a single
// directory and merges them together. // directory and appends them together.
func LoadDir(path string) (*Config, error) { //
matches, err := filepath.Glob(filepath.Join(path, "*.tf")) // Special files known as "override files" can also be present, which
// are merged into the loaded configuration. That is, the non-override
// files are loaded first to create the configuration. Then, the overrides
// are merged into the configuration to create the final configuration.
//
// Files are loaded in lexical order.
func LoadDir(root string) (*Config, error) {
var files, overrides []string
f, err := os.Open(root)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(matches) == 0 { err = nil
for err != io.EOF {
var fis []os.FileInfo
fis, err = f.Readdir(128)
if err != nil && err != io.EOF {
f.Close()
return nil, err
}
for _, fi := range fis {
// Ignore directories
if fi.IsDir() {
continue
}
// Only care about files that are valid to load
name := fi.Name()
extValue := ext(name)
if extValue == "" {
continue
}
// Determine if we're dealing with an override
nameNoExt := name[:len(name)-len(extValue)]
override := nameNoExt == "override" ||
strings.HasSuffix(nameNoExt, "_override")
path := filepath.Join(root, name)
if override {
overrides = append(overrides, path)
} else {
files = append(files, path)
}
}
}
// Close the directory, we're done with it
f.Close()
if len(files) == 0 {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"No Terraform configuration files found in directory: %s", "No Terraform configuration files found in directory: %s",
path) root)
} }
var result *Config var result *Config
for _, f := range matches {
// Sort the files and overrides so we have a deterministic order
sort.Strings(files)
sort.Strings(overrides)
// Load all the regular files, append them to each other.
for _, f := range files {
c, err := Load(f) c, err := Load(f)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if result != nil { if result != nil {
result, err = Merge(result, c) result, err = Append(result, c)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -59,5 +117,30 @@ func LoadDir(path string) (*Config, error) {
} }
} }
// Load all the overrides, and merge them into the config
for _, f := range overrides {
c, err := Load(f)
if err != nil {
return nil, err
}
result, err = Merge(result, c)
if err != nil {
return nil, err
}
}
return result, nil return result, nil
} }
// Ext returns the Terraform configuration extension of the given
// path, or a blank string if it is an invalid function.
func ext(path string) string {
if strings.HasSuffix(path, ".tf") {
return ".tf"
} else if strings.HasSuffix(path, ".tf.json") {
return ".tf.json"
} else {
return ""
}
}

View File

@ -30,7 +30,7 @@ func (t *libuclConfigurable) Config() (*Config, error) {
} }
type LibuclVariable struct { type LibuclVariable struct {
Default string Default interface{}
Description string Description string
Fields []string `libucl:",decodedFields"` Fields []string `libucl:",decodedFields"`
} }
@ -45,21 +45,33 @@ func (t *libuclConfigurable) Config() (*Config, error) {
// Start building up the actual configuration. We start with // Start building up the actual configuration. We start with
// variables. // variables.
// TODO(mitchellh): Make function like loadVariablesLibucl so that
// duplicates aren't overriden
config := new(Config) config := new(Config)
config.Variables = make(map[string]*Variable) if len(rawConfig.Variable) > 0 {
for k, v := range rawConfig.Variable { config.Variables = make([]*Variable, 0, len(rawConfig.Variable))
defaultSet := false for k, v := range rawConfig.Variable {
for _, f := range v.Fields { // Defaults turn into a slice of map[string]interface{} and
if f == "Default" { // we need to make sure to convert that down into the
defaultSet = true // proper type for Config.
break if ms, ok := v.Default.([]map[string]interface{}); ok {
} def := make(map[string]interface{})
} for _, m := range ms {
for k, v := range m {
def[k] = v
}
}
config.Variables[k] = &Variable{ v.Default = def
Default: v.Default, }
Description: v.Description,
defaultSet: defaultSet, newVar := &Variable{
Name: k,
Default: v.Default,
Description: v.Description,
}
config.Variables = append(config.Variables, newVar)
} }
} }
@ -178,7 +190,7 @@ func loadFileLibucl(root string) (configurable, []string, error) {
// LoadOutputsLibucl recurses into the given libucl object and turns // LoadOutputsLibucl recurses into the given libucl object and turns
// it into a mapping of outputs. // it into a mapping of outputs.
func loadOutputsLibucl(o *libucl.Object) (map[string]*Output, error) { func loadOutputsLibucl(o *libucl.Object) ([]*Output, error) {
objects := make(map[string]*libucl.Object) objects := make(map[string]*libucl.Object)
// Iterate over all the "output" blocks and get the keys along with // Iterate over all the "output" blocks and get the keys along with
@ -196,8 +208,13 @@ func loadOutputsLibucl(o *libucl.Object) (map[string]*Output, error) {
} }
iter.Close() iter.Close()
// If we have none, just return nil
if len(objects) == 0 {
return nil, nil
}
// Go through each object and turn it into an actual result. // Go through each object and turn it into an actual result.
result := make(map[string]*Output) result := make([]*Output, 0, len(objects))
for n, o := range objects { for n, o := range objects {
var config map[string]interface{} var config map[string]interface{}
@ -213,10 +230,10 @@ func loadOutputsLibucl(o *libucl.Object) (map[string]*Output, error) {
err) err)
} }
result[n] = &Output{ result = append(result, &Output{
Name: n, Name: n,
RawConfig: rawConfig, RawConfig: rawConfig,
} })
} }
return result, nil return result, nil
@ -224,7 +241,7 @@ func loadOutputsLibucl(o *libucl.Object) (map[string]*Output, error) {
// LoadProvidersLibucl recurses into the given libucl object and turns // LoadProvidersLibucl recurses into the given libucl object and turns
// it into a mapping of provider configs. // it into a mapping of provider configs.
func loadProvidersLibucl(o *libucl.Object) (map[string]*ProviderConfig, error) { func loadProvidersLibucl(o *libucl.Object) ([]*ProviderConfig, error) {
objects := make(map[string]*libucl.Object) objects := make(map[string]*libucl.Object)
// Iterate over all the "provider" blocks and get the keys along with // Iterate over all the "provider" blocks and get the keys along with
@ -242,8 +259,12 @@ func loadProvidersLibucl(o *libucl.Object) (map[string]*ProviderConfig, error) {
} }
iter.Close() iter.Close()
if len(objects) == 0 {
return nil, nil
}
// Go through each object and turn it into an actual result. // Go through each object and turn it into an actual result.
result := make(map[string]*ProviderConfig) result := make([]*ProviderConfig, 0, len(objects))
for n, o := range objects { for n, o := range objects {
var config map[string]interface{} var config map[string]interface{}
@ -259,9 +280,10 @@ func loadProvidersLibucl(o *libucl.Object) (map[string]*ProviderConfig, error) {
err) err)
} }
result[n] = &ProviderConfig{ result = append(result, &ProviderConfig{
Name: n,
RawConfig: rawConfig, RawConfig: rawConfig,
} })
} }
return result, nil return result, nil
@ -330,16 +352,11 @@ func loadResourcesLibucl(o *libucl.Object) ([]*Resource, error) {
err) err)
} }
// Remove the "count" from the config, since we treat that special // Remove the fields we handle specially
delete(config, "count")
// Delete the "provisioner" section from the config since
// that is treated specially.
delete(config, "provisioner")
// Delete the "connection" section since we handle that
// seperately
delete(config, "connection") delete(config, "connection")
delete(config, "count")
delete(config, "depends_on")
delete(config, "provisioner")
rawConfig, err := NewRawConfig(config) rawConfig, err := NewRawConfig(config)
if err != nil { if err != nil {
@ -379,6 +396,20 @@ func loadResourcesLibucl(o *libucl.Object) ([]*Resource, error) {
} }
} }
// If we have depends fields, then add those in
var dependsOn []string
if deps := r.Get("depends_on"); deps != nil {
err := deps.Decode(&dependsOn)
deps.Close()
if err != nil {
return nil, fmt.Errorf(
"Error reading depends_on for %s[%s]: %s",
t.Key(),
r.Key(),
err)
}
}
// If we have provisioners, then parse those out // If we have provisioners, then parse those out
var provisioners []*Provisioner var provisioners []*Provisioner
if po := r.Get("provisioner"); po != nil { if po := r.Get("provisioner"); po != nil {
@ -400,6 +431,7 @@ func loadResourcesLibucl(o *libucl.Object) ([]*Resource, error) {
Count: count, Count: count,
RawConfig: rawConfig, RawConfig: rawConfig,
Provisioners: provisioners, Provisioners: provisioners,
DependsOn: dependsOn,
}) })
} }
} }

View File

@ -116,16 +116,6 @@ func TestLoad_variables(t *testing.T) {
if actual != strings.TrimSpace(variablesVariablesStr) { if actual != strings.TrimSpace(variablesVariablesStr) {
t.Fatalf("bad:\n%s", actual) t.Fatalf("bad:\n%s", actual)
} }
if !c.Variables["foo"].Required() {
t.Fatal("foo should be required")
}
if c.Variables["bar"].Required() {
t.Fatal("bar should not be required")
}
if c.Variables["baz"].Required() {
t.Fatal("baz should not be required")
}
} }
func TestLoadDir_basic(t *testing.T) { func TestLoadDir_basic(t *testing.T) {
@ -166,16 +156,64 @@ func TestLoadDir_noConfigs(t *testing.T) {
} }
} }
func outputsStr(os map[string]*Output) string { func TestLoadDir_noMerge(t *testing.T) {
c, err := LoadDir(filepath.Join(fixtureDir, "dir-merge"))
if err != nil {
t.Fatalf("err: %s", err)
}
if c == nil {
t.Fatal("config should not be nil")
}
if err := c.Validate(); err == nil {
t.Fatal("should not be valid")
}
}
func TestLoadDir_override(t *testing.T) {
c, err := LoadDir(filepath.Join(fixtureDir, "dir-override"))
if err != nil {
t.Fatalf("err: %s", err)
}
if c == nil {
t.Fatal("config should not be nil")
}
actual := variablesStr(c.Variables)
if actual != strings.TrimSpace(dirOverrideVariablesStr) {
t.Fatalf("bad:\n%s", actual)
}
actual = providerConfigsStr(c.ProviderConfigs)
if actual != strings.TrimSpace(dirOverrideProvidersStr) {
t.Fatalf("bad:\n%s", actual)
}
actual = resourcesStr(c.Resources)
if actual != strings.TrimSpace(dirOverrideResourcesStr) {
t.Fatalf("bad:\n%s", actual)
}
actual = outputsStr(c.Outputs)
if actual != strings.TrimSpace(dirOverrideOutputsStr) {
t.Fatalf("bad:\n%s", actual)
}
}
func outputsStr(os []*Output) string {
ns := make([]string, 0, len(os)) ns := make([]string, 0, len(os))
for n, _ := range os { m := make(map[string]*Output)
ns = append(ns, n) for _, o := range os {
ns = append(ns, o.Name)
m[o.Name] = o
} }
sort.Strings(ns) sort.Strings(ns)
result := "" result := ""
for _, n := range ns { for _, n := range ns {
o := os[n] o := m[n]
result += fmt.Sprintf("%s\n", n) result += fmt.Sprintf("%s\n", n)
@ -256,17 +294,19 @@ func TestLoad_connections(t *testing.T) {
// This helper turns a provider configs field into a deterministic // This helper turns a provider configs field into a deterministic
// string value for comparison in tests. // string value for comparison in tests.
func providerConfigsStr(pcs map[string]*ProviderConfig) string { func providerConfigsStr(pcs []*ProviderConfig) string {
result := "" result := ""
ns := make([]string, 0, len(pcs)) ns := make([]string, 0, len(pcs))
for n, _ := range pcs { m := make(map[string]*ProviderConfig)
ns = append(ns, n) for _, n := range pcs {
ns = append(ns, n.Name)
m[n.Name] = n
} }
sort.Strings(ns) sort.Strings(ns)
for _, n := range ns { for _, n := range ns {
pc := pcs[n] pc := m[n]
result += fmt.Sprintf("%s\n", n) result += fmt.Sprintf("%s\n", n)
@ -353,6 +393,13 @@ func resourcesStr(rs []*Resource) string {
} }
} }
if len(r.DependsOn) > 0 {
result += fmt.Sprintf(" dependsOn\n")
for _, d := range r.DependsOn {
result += fmt.Sprintf(" %s\n", d)
}
}
if len(r.RawConfig.Variables) > 0 { if len(r.RawConfig.Variables) > 0 {
result += fmt.Sprintf(" vars\n") result += fmt.Sprintf(" vars\n")
@ -384,18 +431,25 @@ func resourcesStr(rs []*Resource) string {
// This helper turns a variables field into a deterministic // This helper turns a variables field into a deterministic
// string value for comparison in tests. // string value for comparison in tests.
func variablesStr(vs map[string]*Variable) string { func variablesStr(vs []*Variable) string {
result := "" result := ""
ks := make([]string, 0, len(vs)) ks := make([]string, 0, len(vs))
for k, _ := range vs { m := make(map[string]*Variable)
ks = append(ks, k) for _, v := range vs {
ks = append(ks, v.Name)
m[v.Name] = v
} }
sort.Strings(ks) sort.Strings(ks)
for _, k := range ks { for _, k := range ks {
v := vs[k] v := m[k]
if v.Default == "" { required := ""
if v.Required() {
required = " (required)"
}
if v.Default == nil || v.Default == "" {
v.Default = "<>" v.Default = "<>"
} }
if v.Description == "" { if v.Description == "" {
@ -403,8 +457,9 @@ func variablesStr(vs map[string]*Variable) string {
} }
result += fmt.Sprintf( result += fmt.Sprintf(
"%s\n %s\n %s\n", "%s%s\n %v\n %s\n",
k, k,
required,
v.Default, v.Default,
v.Description) v.Description)
} }
@ -431,6 +486,8 @@ do
const basicResourcesStr = ` const basicResourcesStr = `
aws_instance[db] (x1) aws_instance[db] (x1)
security_groups security_groups
dependsOn
aws_instance.web
vars vars
resource: aws_security_group.firewall.*.id resource: aws_security_group.firewall.*.id
aws_instance[web] (x1) aws_instance[web] (x1)
@ -486,8 +543,46 @@ foo
bar bar
` `
const dirOverrideOutputsStr = `
web_ip
vars
resource: aws_instance.web.private_ip
`
const dirOverrideProvidersStr = `
aws
access_key
secret_key
do
api_key
vars
user: var.foo
`
const dirOverrideResourcesStr = `
aws_instance[db] (x1)
ami
security_groups
aws_instance[web] (x1)
ami
foo
network_interface
security_groups
vars
resource: aws_security_group.firewall.foo
user: var.foo
aws_security_group[firewall] (x5)
`
const dirOverrideVariablesStr = `
foo
bar
bar
`
const importProvidersStr = ` const importProvidersStr = `
aws aws
bar
foo foo
` `
@ -497,7 +592,7 @@ aws_security_group[web] (x1)
` `
const importVariablesStr = ` const importVariablesStr = `
bar bar (required)
<> <>
<> <>
foo foo
@ -538,7 +633,7 @@ bar
baz baz
foo foo
<> <>
foo foo (required)
<> <>
<> <>
` `

View File

@ -1,9 +1,5 @@
package config package config
import (
"fmt"
)
// Merge merges two configurations into a single configuration. // Merge merges two configurations into a single configuration.
// //
// Merge allows for the two configurations to have duplicate resources, // Merge allows for the two configurations to have duplicate resources,
@ -24,59 +20,135 @@ func Merge(c1, c2 *Config) (*Config, error) {
c.unknownKeys = append(c.unknownKeys, k) c.unknownKeys = append(c.unknownKeys, k)
} }
// Merge variables: Variable merging is quite simple. Set fields in // NOTE: Everything below is pretty gross. Due to the lack of generics
// later set variables override those earlier. // in Go, there is some hoop-jumping involved to make this merging a
c.Variables = c1.Variables // little more test-friendly and less repetitive. Ironically, making it
for k, v2 := range c2.Variables { // less repetitive involves being a little repetitive, but I prefer to
v1, ok := c.Variables[k] // be repetitive with things that are less error prone than things that
if ok { // are more error prone (more logic). Type conversions to an interface
if v2.Default == "" { // are pretty low-error.
v2.Default = v1.Default
} var m1, m2, mresult []merger
if v2.Description == "" {
v2.Description = v1.Description // Outputs
} m1 = make([]merger, 0, len(c1.Outputs))
m2 = make([]merger, 0, len(c2.Outputs))
for _, v := range c1.Outputs {
m1 = append(m1, v)
}
for _, v := range c2.Outputs {
m2 = append(m2, v)
}
mresult = mergeSlice(m1, m2)
if len(mresult) > 0 {
c.Outputs = make([]*Output, len(mresult))
for i, v := range mresult {
c.Outputs[i] = v.(*Output)
} }
c.Variables[k] = v2
} }
// Merge outputs: If they collide, just take the latest one for now. In // Provider Configs
// the future, we might provide smarter merge functionality. m1 = make([]merger, 0, len(c1.ProviderConfigs))
c.Outputs = make(map[string]*Output) m2 = make([]merger, 0, len(c2.ProviderConfigs))
for k, v := range c1.Outputs { for _, v := range c1.ProviderConfigs {
c.Outputs[k] = v m1 = append(m1, v)
} }
for k, v := range c2.Outputs { for _, v := range c2.ProviderConfigs {
c.Outputs[k] = v m2 = append(m2, v)
}
mresult = mergeSlice(m1, m2)
if len(mresult) > 0 {
c.ProviderConfigs = make([]*ProviderConfig, len(mresult))
for i, v := range mresult {
c.ProviderConfigs[i] = v.(*ProviderConfig)
}
} }
// Merge provider configs: If they collide, we just take the latest one // Resources
// for now. In the future, we might provide smarter merge functionality. m1 = make([]merger, 0, len(c1.Resources))
c.ProviderConfigs = make(map[string]*ProviderConfig) m2 = make([]merger, 0, len(c2.Resources))
for k, v := range c1.ProviderConfigs { for _, v := range c1.Resources {
c.ProviderConfigs[k] = v m1 = append(m1, v)
} }
for k, v := range c2.ProviderConfigs { for _, v := range c2.Resources {
c.ProviderConfigs[k] = v m2 = append(m2, v)
}
mresult = mergeSlice(m1, m2)
if len(mresult) > 0 {
c.Resources = make([]*Resource, len(mresult))
for i, v := range mresult {
c.Resources[i] = v.(*Resource)
}
} }
// Merge resources: If they collide, we just take the latest one // Variables
// for now. In the future, we might provide smarter merge functionality. m1 = make([]merger, 0, len(c1.Variables))
resources := make(map[string]*Resource) m2 = make([]merger, 0, len(c2.Variables))
for _, r := range c1.Resources { for _, v := range c1.Variables {
id := fmt.Sprintf("%s[%s]", r.Type, r.Name) m1 = append(m1, v)
resources[id] = r
} }
for _, r := range c2.Resources { for _, v := range c2.Variables {
id := fmt.Sprintf("%s[%s]", r.Type, r.Name) m2 = append(m2, v)
resources[id] = r
} }
mresult = mergeSlice(m1, m2)
c.Resources = make([]*Resource, 0, len(resources)) if len(mresult) > 0 {
for _, r := range resources { c.Variables = make([]*Variable, len(mresult))
c.Resources = append(c.Resources, r) for i, v := range mresult {
c.Variables[i] = v.(*Variable)
}
} }
return c, nil return c, nil
} }
// merger is an interface that must be implemented by types that are
// merge-able. This simplifies the implementation of Merge for the various
// components of a Config.
type merger interface {
mergerName() string
mergerMerge(merger) merger
}
// mergeSlice merges a slice of mergers.
func mergeSlice(m1, m2 []merger) []merger {
r := make([]merger, len(m1), len(m1)+len(m2))
copy(r, m1)
m := map[string]struct{}{}
for _, v2 := range m2 {
// If we already saw it, just append it because its a
// duplicate and invalid...
name := v2.mergerName()
if _, ok := m[name]; ok {
r = append(r, v2)
continue
}
m[name] = struct{}{}
// Find an original to override
var original merger
originalIndex := -1
for i, v := range m1 {
if v.mergerName() == name {
originalIndex = i
original = v
break
}
}
var v merger
if original == nil {
v = v2
} else {
v = original.mergerMerge(v2)
}
if originalIndex == -1 {
r = append(r, v)
} else {
r[originalIndex] = v
}
}
return r
}

149
config/merge_test.go Normal file
View File

@ -0,0 +1,149 @@
package config
import (
"reflect"
"testing"
)
func TestMerge(t *testing.T) {
cases := []struct {
c1, c2, result *Config
err bool
}{
// Normal good case.
{
&Config{
Outputs: []*Output{
&Output{Name: "foo"},
},
ProviderConfigs: []*ProviderConfig{
&ProviderConfig{Name: "foo"},
},
Resources: []*Resource{
&Resource{Name: "foo"},
},
Variables: []*Variable{
&Variable{Name: "foo"},
},
unknownKeys: []string{"foo"},
},
&Config{
Outputs: []*Output{
&Output{Name: "bar"},
},
ProviderConfigs: []*ProviderConfig{
&ProviderConfig{Name: "bar"},
},
Resources: []*Resource{
&Resource{Name: "bar"},
},
Variables: []*Variable{
&Variable{Name: "bar"},
},
unknownKeys: []string{"bar"},
},
&Config{
Outputs: []*Output{
&Output{Name: "foo"},
&Output{Name: "bar"},
},
ProviderConfigs: []*ProviderConfig{
&ProviderConfig{Name: "foo"},
&ProviderConfig{Name: "bar"},
},
Resources: []*Resource{
&Resource{Name: "foo"},
&Resource{Name: "bar"},
},
Variables: []*Variable{
&Variable{Name: "foo"},
&Variable{Name: "bar"},
},
unknownKeys: []string{"foo", "bar"},
},
false,
},
// Test that when merging duplicates, it merges into the
// first, but keeps the duplicates so that errors still
// happen.
{
&Config{
Outputs: []*Output{
&Output{Name: "foo"},
},
ProviderConfigs: []*ProviderConfig{
&ProviderConfig{Name: "foo"},
},
Resources: []*Resource{
&Resource{Name: "foo"},
},
Variables: []*Variable{
&Variable{Name: "foo", Default: "foo"},
&Variable{Name: "foo"},
},
unknownKeys: []string{"foo"},
},
&Config{
Outputs: []*Output{
&Output{Name: "bar"},
},
ProviderConfigs: []*ProviderConfig{
&ProviderConfig{Name: "bar"},
},
Resources: []*Resource{
&Resource{Name: "bar"},
},
Variables: []*Variable{
&Variable{Name: "foo", Default: "bar"},
&Variable{Name: "bar"},
},
unknownKeys: []string{"bar"},
},
&Config{
Outputs: []*Output{
&Output{Name: "foo"},
&Output{Name: "bar"},
},
ProviderConfigs: []*ProviderConfig{
&ProviderConfig{Name: "foo"},
&ProviderConfig{Name: "bar"},
},
Resources: []*Resource{
&Resource{Name: "foo"},
&Resource{Name: "bar"},
},
Variables: []*Variable{
&Variable{Name: "foo", Default: "bar"},
&Variable{Name: "foo"},
&Variable{Name: "bar"},
},
unknownKeys: []string{"foo", "bar"},
},
false,
},
}
for i, tc := range cases {
actual, err := Merge(tc.c1, tc.c2)
if (err != nil) != tc.err {
t.Fatalf("%d: error fail", i)
}
if !reflect.DeepEqual(actual, tc.result) {
t.Fatalf("%d: bad:\n\n%#v", i, actual)
}
}
}

View File

@ -69,29 +69,65 @@ func (r *RawConfig) Interpolate(vs map[string]string) error {
if err != nil { if err != nil {
return err return err
} }
w := &variableReplaceWalker{Values: vs}
r.config = config.(map[string]interface{}) r.config = config.(map[string]interface{})
fn := func(i Interpolation) (string, error) {
return i.Interpolate(vs)
}
w := &interpolationWalker{F: fn, Replace: true}
err = reflectwalk.Walk(r.config, w) err = reflectwalk.Walk(r.config, w)
if err != nil { if err != nil {
return err return err
} }
r.unknownKeys = w.UnknownKeys r.unknownKeys = w.unknownKeys
return nil return nil
} }
func (r *RawConfig) init() error { func (r *RawConfig) init() error {
walker := new(variableDetectWalker) r.config = r.Raw
r.Variables = nil
fn := func(i Interpolation) (string, error) {
for k, v := range i.Variables() {
if r.Variables == nil {
r.Variables = make(map[string]InterpolatedVariable)
}
r.Variables[k] = v
}
return "", nil
}
walker := &interpolationWalker{F: fn}
if err := reflectwalk.Walk(r.Raw, walker); err != nil { if err := reflectwalk.Walk(r.Raw, walker); err != nil {
return err return err
} }
r.Variables = walker.Variables
r.config = r.Raw
return nil return nil
} }
func (r *RawConfig) merge(r2 *RawConfig) *RawConfig {
rawRaw, err := copystructure.Copy(r.Raw)
if err != nil {
panic(err)
}
raw := rawRaw.(map[string]interface{})
for k, v := range r2.Raw {
raw[k] = v
}
result, err := NewRawConfig(raw)
if err != nil {
panic(err)
}
return result
}
// UnknownKeys returns the keys of the configuration that are unknown // UnknownKeys returns the keys of the configuration that are unknown
// because they had interpolated variables that must be computed. // because they had interpolated variables that must be computed.
func (r *RawConfig) UnknownKeys() []string { func (r *RawConfig) UnknownKeys() []string {

View File

@ -31,6 +31,8 @@ resource aws_instance "web" {
resource "aws_instance" "db" { resource "aws_instance" "db" {
security_groups = "${aws_security_group.firewall.*.id}" security_groups = "${aws_security_group.firewall.*.id}"
depends_on = ["aws_instance.web"]
} }
output "web_ip" { output "web_ip" {

View File

@ -20,7 +20,8 @@
"resource": { "resource": {
"aws_instance": { "aws_instance": {
"db": { "db": {
"security_groups": ["${aws_security_group.firewall.*.id}"] "security_groups": ["${aws_security_group.firewall.*.id}"],
"depends_on": ["aws_instance.web"]
}, },
"web": { "web": {

View File

@ -0,0 +1,8 @@
variable "foo" {
default = "bar";
description = "bar";
}
resource "aws_instance" "db" {
security_groups = "${aws_security_group.firewall.*.id}"
}

View File

@ -0,0 +1,2 @@
resource "aws_instance" "db" {
}

View File

@ -0,0 +1,9 @@
{
"resource": {
"aws_instance": {
"web": {
"foo": "bar",
}
}
}
}

View File

@ -0,0 +1,17 @@
variable "foo" {
default = "bar";
description = "bar";
}
provider "aws" {
access_key = "foo";
secret_key = "bar";
}
resource "aws_instance" "db" {
security_groups = "${aws_security_group.firewall.*.id}"
}
output "web_ip" {
value = "${aws_instance.web.private_ip}"
}

View File

@ -0,0 +1,10 @@
{
"resource": {
"aws_instance": {
"db": {
"ami": "foo",
"security_groups": ""
}
}
}
}

View File

@ -0,0 +1,20 @@
provider "do" {
api_key = "${var.foo}";
}
resource "aws_security_group" "firewall" {
count = 5
}
resource aws_instance "web" {
ami = "${var.foo}"
security_groups = [
"foo",
"${aws_security_group.firewall.foo}"
]
network_interface {
device_index = 0
description = "Main network interface"
}
}

View File

@ -0,0 +1,3 @@
resource "aws_instance" "web" {
depends_on = ["aws_instance.db"]
}

View File

@ -3,6 +3,12 @@ variable "foo" {
description = "bar"; description = "bar";
} }
variable "amis" {
default = {
"east": "foo",
}
}
provider "aws" { provider "aws" {
access_key = "foo"; access_key = "foo";
secret_key = "bar"; secret_key = "bar";
@ -16,7 +22,7 @@ resource "aws_security_group" "firewall" {
} }
resource aws_instance "web" { resource aws_instance "web" {
ami = "${var.foo}" ami = "${var.amis.east}"
security_groups = [ security_groups = [
"foo", "foo",
"${aws_security_group.firewall.foo}" "${aws_security_group.firewall.foo}"
@ -26,4 +32,6 @@ resource aws_instance "web" {
device_index = 0 device_index = 0
description = "Main network interface" description = "Main network interface"
} }
depends_on = ["aws_security_group.firewall"]
} }

View File

@ -0,0 +1,3 @@
variable "foo" {
default = ["foo", "bar"]
}

View File

@ -0,0 +1,9 @@
variable "foo" {
default = "bar"
}
variable "foo" {
default = {
"foo" = "bar"
}
}

View File

@ -1,216 +0,0 @@
package config
import (
"fmt"
"reflect"
"regexp"
"strings"
"github.com/mitchellh/reflectwalk"
)
// varRegexp is a regexp that matches variables such as ${foo.bar}
var varRegexp *regexp.Regexp
func init() {
varRegexp = regexp.MustCompile(`(?i)(\$+)\{([*-.a-z0-9_]+)\}`)
}
// ReplaceVariables takes a configuration and a mapping of variables
// and performs the structure walking necessary to properly replace
// all the variables.
func ReplaceVariables(
c interface{},
vs map[string]string) ([]string, error) {
w := &variableReplaceWalker{Values: vs}
if err := reflectwalk.Walk(c, w); err != nil {
return nil, err
}
return w.UnknownKeys, nil
}
// variableDetectWalker implements interfaces for the reflectwalk package
// (github.com/mitchellh/reflectwalk) that can be used to automatically
// pull out the variables that need replacing.
type variableDetectWalker struct {
Variables map[string]InterpolatedVariable
}
func (w *variableDetectWalker) Primitive(v reflect.Value) error {
// We only care about strings
if v.Kind() == reflect.Interface {
v = v.Elem()
}
if v.Kind() != reflect.String {
return nil
}
// XXX: This can be a lot more efficient if we used a real
// parser. A regexp is a hammer though that will get this working.
matches := varRegexp.FindAllStringSubmatch(v.String(), -1)
if len(matches) == 0 {
return nil
}
for _, match := range matches {
dollars := len(match[1])
// If there are even amounts of dollar signs, then it is escaped
if dollars%2 == 0 {
continue
}
// Otherwise, record it
key := match[2]
if w.Variables == nil {
w.Variables = make(map[string]InterpolatedVariable)
}
if _, ok := w.Variables[key]; ok {
continue
}
var err error
var iv InterpolatedVariable
if strings.Index(key, ".") == -1 {
return fmt.Errorf(
"Interpolated variable '%s' has bad format. "+
"Did you mean 'var.%s'?",
key, key)
}
if strings.HasPrefix(key, "var.") {
iv, err = NewUserVariable(key)
} else {
iv, err = NewResourceVariable(key)
}
if err != nil {
return err
}
w.Variables[key] = iv
}
return nil
}
// variableReplaceWalker implements interfaces for reflectwalk that
// is used to replace variables with their values.
//
// If Values does not have every available value, then the program
// will _panic_. The variableDetectWalker will tell you all variables
// you need.
type variableReplaceWalker struct {
Values map[string]string
UnknownKeys []string
key []string
loc reflectwalk.Location
cs []reflect.Value
csData interface{}
}
func (w *variableReplaceWalker) Enter(loc reflectwalk.Location) error {
w.loc = loc
return nil
}
func (w *variableReplaceWalker) Exit(loc reflectwalk.Location) error {
w.loc = reflectwalk.None
switch loc {
case reflectwalk.Map:
w.cs = w.cs[:len(w.cs)-1]
case reflectwalk.MapValue:
w.key = w.key[:len(w.key)-1]
}
return nil
}
func (w *variableReplaceWalker) Map(m reflect.Value) error {
w.cs = append(w.cs, m)
return nil
}
func (w *variableReplaceWalker) MapElem(m, k, v reflect.Value) error {
w.csData = k
w.key = append(w.key, k.String())
return nil
}
func (w *variableReplaceWalker) Primitive(v reflect.Value) error {
// We only care about strings
setV := v
if v.Kind() == reflect.Interface {
setV = v
v = v.Elem()
}
if v.Kind() != reflect.String {
return nil
}
matches := varRegexp.FindAllStringSubmatch(v.String(), -1)
if len(matches) == 0 {
return nil
}
result := v.String()
for _, match := range matches {
dollars := len(match[1])
// If there are even amounts of dollar signs, then it is escaped
if dollars%2 == 0 {
continue
}
// Get the key
key := match[2]
value, ok := w.Values[key]
if !ok {
panic("no value for variable key: " + key)
}
// If this is an unknown variable, then we remove it from
// the configuration.
if value == UnknownVariableValue {
w.removeCurrent()
return nil
}
// Replace
result = strings.Replace(result, match[0], value, -1)
}
resultVal := reflect.ValueOf(result)
if w.loc == reflectwalk.MapValue {
// If we're in a map, then the only way to set a map value is
// to set it directly.
m := w.cs[len(w.cs)-1]
mk := w.csData.(reflect.Value)
m.SetMapIndex(mk, resultVal)
} else {
// Otherwise, we should be addressable
setV.Set(resultVal)
}
return nil
}
func (w *variableReplaceWalker) removeCurrent() {
c := w.cs[len(w.cs)-1]
switch c.Kind() {
case reflect.Map:
// Zero value so that we delete the map key
var val reflect.Value
// Get the key and delete it
k := w.csData.(reflect.Value)
c.SetMapIndex(k, val)
}
// Append the key to the unknown keys
w.UnknownKeys = append(w.UnknownKeys, strings.Join(w.key, "."))
}

View File

@ -1,282 +0,0 @@
package config
import (
"reflect"
"testing"
"github.com/mitchellh/reflectwalk"
)
func BenchmarkVariableDetectWalker(b *testing.B) {
w := new(variableDetectWalker)
str := reflect.ValueOf(`foo ${var.bar} bar ${bar.baz.bing} $${escaped}`)
b.ResetTimer()
for i := 0; i < b.N; i++ {
w.Variables = nil
w.Primitive(str)
}
}
func BenchmarkVariableReplaceWalker(b *testing.B) {
w := &variableReplaceWalker{
Values: map[string]string{
"var.bar": "bar",
"bar.baz.bing": "baz",
},
}
str := `foo ${var.bar} bar ${bar.baz.bing} $${escaped}`
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := reflectwalk.Walk(&str, w); err != nil {
panic(err)
}
}
}
func TestReplaceVariables(t *testing.T) {
input := "foo-${var.bar}"
expected := "foo-bar"
unk, err := ReplaceVariables(&input, map[string]string{
"var.bar": "bar",
})
if err != nil {
t.Fatalf("err: %s", err)
}
if len(unk) > 0 {
t.Fatal("bad: %#v", unk)
}
if input != expected {
t.Fatalf("bad: %#v", input)
}
}
func TestVariableDetectWalker(t *testing.T) {
w := new(variableDetectWalker)
str := `foo ${var.bar}`
if err := w.Primitive(reflect.ValueOf(str)); err != nil {
t.Fatalf("err: %s", err)
}
if len(w.Variables) != 1 {
t.Fatalf("bad: %#v", w.Variables)
}
if w.Variables["var.bar"].(*UserVariable).FullKey() != "var.bar" {
t.Fatalf("bad: %#v", w.Variables)
}
}
func TestVariableDetectWalker_resource(t *testing.T) {
w := new(variableDetectWalker)
str := `foo ${ec2.foo.bar}`
if err := w.Primitive(reflect.ValueOf(str)); err != nil {
t.Fatalf("err: %s", err)
}
if len(w.Variables) != 1 {
t.Fatalf("bad: %#v", w.Variables)
}
if w.Variables["ec2.foo.bar"].(*ResourceVariable).FullKey() != "ec2.foo.bar" {
t.Fatalf("bad: %#v", w.Variables)
}
}
func TestVariableDetectWalker_resourceMulti(t *testing.T) {
w := new(variableDetectWalker)
str := `foo ${ec2.foo.*.bar}`
if err := w.Primitive(reflect.ValueOf(str)); err != nil {
t.Fatalf("err: %s", err)
}
if len(w.Variables) != 1 {
t.Fatalf("bad: %#v", w.Variables)
}
if w.Variables["ec2.foo.*.bar"].(*ResourceVariable).FullKey() != "ec2.foo.*.bar" {
t.Fatalf("bad: %#v", w.Variables)
}
}
func TestVariableDetectWalker_bad(t *testing.T) {
w := new(variableDetectWalker)
str := `foo ${bar}`
if err := w.Primitive(reflect.ValueOf(str)); err == nil {
t.Fatal("should error")
}
}
func TestVariableDetectWalker_escaped(t *testing.T) {
w := new(variableDetectWalker)
str := `foo $${var.bar}`
if err := w.Primitive(reflect.ValueOf(str)); err != nil {
t.Fatalf("err: %s", err)
}
if len(w.Variables) > 0 {
t.Fatalf("bad: %#v", w.Variables)
}
}
func TestVariableDetectWalker_empty(t *testing.T) {
w := new(variableDetectWalker)
str := `foo`
if err := w.Primitive(reflect.ValueOf(str)); err != nil {
t.Fatalf("err: %s", err)
}
if len(w.Variables) > 0 {
t.Fatalf("bad: %#v", w.Variables)
}
}
func TestVariableReplaceWalker(t *testing.T) {
w := &variableReplaceWalker{
Values: map[string]string{
"var.bar": "bar",
"var.unknown": UnknownVariableValue,
},
}
cases := []struct {
Input interface{}
Output interface{}
}{
{
`foo ${var.bar}`,
"foo bar",
},
{
[]string{"foo", "${var.bar}"},
[]string{"foo", "bar"},
},
{
map[string]interface{}{
"ami": "${var.bar}",
"security_groups": []interface{}{
"foo",
"${var.bar}",
},
},
map[string]interface{}{
"ami": "bar",
"security_groups": []interface{}{
"foo",
"bar",
},
},
},
{
map[string]interface{}{
"foo": map[string]interface{}{
"foo": []string{"${var.bar}"},
},
},
map[string]interface{}{
"foo": map[string]interface{}{
"foo": []string{"bar"},
},
},
},
{
map[string]interface{}{
"foo": "bar",
"bar": "hello${var.unknown}world",
},
map[string]interface{}{
"foo": "bar",
},
},
{
map[string]interface{}{
"foo": []string{"foo", "${var.unknown}", "bar"},
},
map[string]interface{}{},
},
}
for i, tc := range cases {
var input interface{} = tc.Input
if reflect.ValueOf(tc.Input).Kind() == reflect.String {
input = &tc.Input
}
if err := reflectwalk.Walk(input, w); err != nil {
t.Fatalf("err: %s", err)
}
if !reflect.DeepEqual(tc.Input, tc.Output) {
t.Fatalf("bad %d: %#v", i, tc.Input)
}
}
}
func TestVariableReplaceWalker_unknown(t *testing.T) {
cases := []struct {
Input interface{}
Output interface{}
Keys []string
}{
{
map[string]interface{}{
"foo": "bar",
"bar": "hello${var.unknown}world",
},
map[string]interface{}{
"foo": "bar",
},
[]string{"bar"},
},
{
map[string]interface{}{
"foo": []string{"foo", "${var.unknown}", "bar"},
},
map[string]interface{}{},
[]string{"foo"},
},
{
map[string]interface{}{
"foo": map[string]interface{}{
"bar": "${var.unknown}",
},
},
map[string]interface{}{
"foo": map[string]interface{}{},
},
[]string{"foo.bar"},
},
}
for i, tc := range cases {
var input interface{} = tc.Input
w := &variableReplaceWalker{
Values: map[string]string{
"var.unknown": UnknownVariableValue,
},
}
if reflect.ValueOf(tc.Input).Kind() == reflect.String {
input = &tc.Input
}
if err := reflectwalk.Walk(input, w); err != nil {
t.Fatalf("err: %s", err)
}
if !reflect.DeepEqual(tc.Input, tc.Output) {
t.Fatalf("bad %d: %#v", i, tc.Input)
}
if !reflect.DeepEqual(tc.Keys, w.UnknownKeys) {
t.Fatalf("bad: %#v", w.UnknownKeys)
}
}
}

View File

@ -23,6 +23,10 @@ func (e *Error) Error() string {
len(e.Errors), strings.Join(points, "\n")) len(e.Errors), strings.Join(points, "\n"))
} }
func (e *Error) GoString() string {
return fmt.Sprintf("*%#v", *e)
}
// ErrorAppend is a helper function that will append more errors // ErrorAppend is a helper function that will append more errors
// onto a Error in order to create a larger multi-error. If the // onto a Error in order to create a larger multi-error. If the
// original error is not a Error, it will be turned into one. // original error is not a Error, it will be turned into one.

View File

@ -37,7 +37,7 @@ func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, [
func (p *ResourceProvisioner) Apply( func (p *ResourceProvisioner) Apply(
s *terraform.ResourceState, s *terraform.ResourceState,
c *terraform.ResourceConfig) (*terraform.ResourceState, error) { c *terraform.ResourceConfig) error {
var resp ResourceProvisionerApplyResponse var resp ResourceProvisionerApplyResponse
args := &ResourceProvisionerApplyArgs{ args := &ResourceProvisionerApplyArgs{
State: s, State: s,
@ -46,13 +46,13 @@ func (p *ResourceProvisioner) Apply(
err := p.Client.Call(p.Name+".Apply", args, &resp) err := p.Client.Call(p.Name+".Apply", args, &resp)
if err != nil { if err != nil {
return nil, err return err
} }
if resp.Error != nil { if resp.Error != nil {
err = resp.Error err = resp.Error
} }
return resp.State, err return err
} }
type ResourceProvisionerValidateArgs struct { type ResourceProvisionerValidateArgs struct {
@ -70,7 +70,6 @@ type ResourceProvisionerApplyArgs struct {
} }
type ResourceProvisionerApplyResponse struct { type ResourceProvisionerApplyResponse struct {
State *terraform.ResourceState
Error *BasicError Error *BasicError
} }
@ -83,9 +82,8 @@ type ResourceProvisionerServer struct {
func (s *ResourceProvisionerServer) Apply( func (s *ResourceProvisionerServer) Apply(
args *ResourceProvisionerApplyArgs, args *ResourceProvisionerApplyArgs,
result *ResourceProvisionerApplyResponse) error { result *ResourceProvisionerApplyResponse) error {
state, err := s.Provisioner.Apply(args.State, args.Config) err := s.Provisioner.Apply(args.State, args.Config)
*result = ResourceProvisionerApplyResponse{ *result = ResourceProvisionerApplyResponse{
State: state,
Error: NewBasicError(err), Error: NewBasicError(err),
} }
return nil return nil

View File

@ -21,14 +21,10 @@ func TestResourceProvisioner_apply(t *testing.T) {
} }
provisioner := &ResourceProvisioner{Client: client, Name: name} provisioner := &ResourceProvisioner{Client: client, Name: name}
p.ApplyReturn = &terraform.ResourceState{
ID: "bob",
}
// Apply // Apply
state := &terraform.ResourceState{} state := &terraform.ResourceState{}
conf := &terraform.ResourceConfig{} conf := &terraform.ResourceConfig{}
newState, err := provisioner.Apply(state, conf) err = provisioner.Apply(state, conf)
if !p.ApplyCalled { if !p.ApplyCalled {
t.Fatal("apply should be called") t.Fatal("apply should be called")
} }
@ -38,9 +34,6 @@ func TestResourceProvisioner_apply(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("bad: %#v", err) t.Fatalf("bad: %#v", err)
} }
if !reflect.DeepEqual(p.ApplyReturn, newState) {
t.Fatalf("bad: %#v", newState)
}
} }
func TestResourceProvisioner_validate(t *testing.T) { func TestResourceProvisioner_validate(t *testing.T) {

View File

@ -30,6 +30,7 @@ type Context struct {
providers map[string]ResourceProviderFactory providers map[string]ResourceProviderFactory
provisioners map[string]ResourceProvisionerFactory provisioners map[string]ResourceProvisionerFactory
variables map[string]string variables map[string]string
defaultVars map[string]string
l sync.Mutex // Lock acquired during any task l sync.Mutex // Lock acquired during any task
parCh chan struct{} // Semaphore used to limit parallelism parCh chan struct{} // Semaphore used to limit parallelism
@ -72,6 +73,14 @@ func NewContext(opts *ContextOpts) *Context {
} }
parCh := make(chan struct{}, par) parCh := make(chan struct{}, par)
// Calculate all the default variables
defaultVars := make(map[string]string)
for _, v := range opts.Config.Variables {
for k, val := range v.DefaultsMap() {
defaultVars[k] = val
}
}
return &Context{ return &Context{
config: opts.Config, config: opts.Config,
diff: opts.Diff, diff: opts.Diff,
@ -80,6 +89,7 @@ func NewContext(opts *ContextOpts) *Context {
providers: opts.Providers, providers: opts.Providers,
provisioners: opts.Provisioners, provisioners: opts.Provisioners,
variables: opts.Variables, variables: opts.Variables,
defaultVars: defaultVars,
parCh: parCh, parCh: parCh,
sh: sh, sh: sh,
@ -296,8 +306,13 @@ func (c *Context) computeVars(raw *config.RawConfig) error {
return nil return nil
} }
// Go through each variable and find it // Start building up the variables. First, defaults
vs := make(map[string]string) vs := make(map[string]string)
for k, v := range c.defaultVars {
vs[k] = v
}
// Next, the actual computed variables
for n, rawV := range raw.Variables { for n, rawV := range raw.Variables {
switch v := rawV.(type) { switch v := rawV.(type) {
case *config.ResourceVariable: case *config.ResourceVariable:
@ -314,7 +329,19 @@ func (c *Context) computeVars(raw *config.RawConfig) error {
vs[n] = attr vs[n] = attr
case *config.UserVariable: case *config.UserVariable:
vs[n] = c.variables[v.Name] val, ok := c.variables[v.Name]
if ok {
vs[n] = val
continue
}
// Look up if we have any variables with this prefix because
// those are map overrides. Include those.
for k, val := range c.variables {
if strings.HasPrefix(k, v.Name+".") {
vs["var."+k] = val
}
}
} }
} }
@ -458,15 +485,32 @@ func (c *Context) applyWalkFn() depgraph.WalkFunc {
if err != nil { if err != nil {
return err return err
} }
}
// TODO(mitchellh): we need to verify the diff doesn't change // This should never happen because we check if Diff.Empty above.
// anything and that the diff has no computed values (pre-computed) // If this happened, then the diff above returned a bad diff.
if diff == nil {
return fmt.Errorf(
"%s: diff became nil during Apply. This is a bug with "+
"the resource provider. Please report a bug.",
r.Id)
}
// If we don't have a diff, just make an empty one // Delete id from the diff because it is dependent on
if diff == nil { // our internal plan function.
diff = new(ResourceDiff) delete(r.Diff.Attributes, "id")
diff.init() delete(diff.Attributes, "id")
// Verify the diffs are the same
if !r.Diff.Same(diff) {
log.Printf(
"[ERROR] Diffs don't match.\n\nDiff 1: %#v"+
"\n\nDiff 2: %#v",
r.Diff, diff)
return fmt.Errorf(
"%s: diffs didn't match during apply. This is a "+
"bug with the resource provider, please report a bug.",
r.Id)
}
} }
// If we do not have any connection info, initialize // If we do not have any connection info, initialize
@ -527,10 +571,11 @@ func (c *Context) applyWalkFn() depgraph.WalkFunc {
// //
// Additionally, we need to be careful to not run this if there // Additionally, we need to be careful to not run this if there
// was an error during the provider apply. // was an error during the provider apply.
tainted := false
if applyerr == nil && r.State.ID == "" && len(r.Provisioners) > 0 { if applyerr == nil && r.State.ID == "" && len(r.Provisioners) > 0 {
rs, err = c.applyProvisioners(r, rs) if err := c.applyProvisioners(r, rs); err != nil {
if err != nil {
errs = append(errs, err) errs = append(errs, err)
tainted = true
} }
} }
@ -540,6 +585,10 @@ func (c *Context) applyWalkFn() depgraph.WalkFunc {
delete(c.state.Resources, r.Id) delete(c.state.Resources, r.Id)
} else { } else {
c.state.Resources[r.Id] = rs c.state.Resources[r.Id] = rs
if tainted {
c.state.Tainted[r.Id] = struct{}{}
}
} }
c.sl.Unlock() c.sl.Unlock()
@ -564,9 +613,7 @@ func (c *Context) applyWalkFn() depgraph.WalkFunc {
// applyProvisioners is used to run any provisioners a resource has // applyProvisioners is used to run any provisioners a resource has
// defined after the resource creation has already completed. // defined after the resource creation has already completed.
func (c *Context) applyProvisioners(r *Resource, rs *ResourceState) (*ResourceState, error) { func (c *Context) applyProvisioners(r *Resource, rs *ResourceState) error {
var err error
// Store the original connection info, restore later // Store the original connection info, restore later
origConnInfo := rs.ConnInfo origConnInfo := rs.ConnInfo
defer func() { defer func() {
@ -577,13 +624,13 @@ func (c *Context) applyProvisioners(r *Resource, rs *ResourceState) (*ResourceSt
// Interpolate since we may have variables that depend on the // Interpolate since we may have variables that depend on the
// local resource. // local resource.
if err := prov.Config.interpolate(c); err != nil { if err := prov.Config.interpolate(c); err != nil {
return rs, err return err
} }
// Interpolate the conn info, since it may contain variables // Interpolate the conn info, since it may contain variables
connInfo := NewResourceConfig(prov.ConnInfo) connInfo := NewResourceConfig(prov.ConnInfo)
if err := connInfo.interpolate(c); err != nil { if err := connInfo.interpolate(c); err != nil {
return rs, err return err
} }
// Merge the connection information // Merge the connection information
@ -616,12 +663,12 @@ func (c *Context) applyProvisioners(r *Resource, rs *ResourceState) (*ResourceSt
rs.ConnInfo = overlay rs.ConnInfo = overlay
// Invoke the Provisioner // Invoke the Provisioner
rs, err = prov.Provisioner.Apply(rs, prov.Config) if err := prov.Provisioner.Apply(rs, prov.Config); err != nil {
if err != nil { return err
return rs, err
} }
} }
return rs, nil
return nil
} }
func (c *Context) planWalkFn(result *Plan) depgraph.WalkFunc { func (c *Context) planWalkFn(result *Plan) depgraph.WalkFunc {
@ -651,7 +698,13 @@ func (c *Context) planWalkFn(result *Plan) depgraph.WalkFunc {
// Get a diff from the newest state // Get a diff from the newest state
log.Printf("[DEBUG] %s: Executing diff", r.Id) log.Printf("[DEBUG] %s: Executing diff", r.Id)
var err error var err error
diff, err = r.Provider.Diff(r.State, r.Config) state := r.State
if r.Tainted {
// If we're tainted, we pretend to create a new thing.
state = new(ResourceState)
state.Type = r.State.Type
}
diff, err = r.Provider.Diff(state, r.Config)
if err != nil { if err != nil {
return err return err
} }
@ -661,6 +714,11 @@ func (c *Context) planWalkFn(result *Plan) depgraph.WalkFunc {
diff = new(ResourceDiff) diff = new(ResourceDiff)
} }
if r.Tainted {
// Tainted resources must also be destroyed
diff.Destroy = true
}
if diff.RequiresNew() && r.State.ID != "" { if diff.RequiresNew() && r.State.ID != "" {
// This will also require a destroy // This will also require a destroy
diff.Destroy = true diff.Destroy = true

View File

@ -309,6 +309,35 @@ func TestContextApply_Minimal(t *testing.T) {
} }
} }
func TestContextApply_badDiff(t *testing.T) {
c := testConfig(t, "apply-good")
p := testProvider("aws")
p.ApplyFn = testApplyFn
p.DiffFn = testDiffFn
ctx := testContext(t, &ContextOpts{
Config: c,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
})
if _, err := ctx.Plan(nil); err != nil {
t.Fatalf("err: %s", err)
}
p.DiffFn = func(*ResourceState, *ResourceConfig) (*ResourceDiff, error) {
return &ResourceDiff{
Attributes: map[string]*ResourceAttrDiff{
"newp": nil,
},
}, nil
}
if _, err := ctx.Apply(); err == nil {
t.Fatal("should error")
}
}
func TestContextApply_cancel(t *testing.T) { func TestContextApply_cancel(t *testing.T) {
stopped := false stopped := false
@ -408,18 +437,44 @@ func TestContextApply_compute(t *testing.T) {
} }
} }
func TestContextApply_nilDiff(t *testing.T) {
c := testConfig(t, "apply-good")
p := testProvider("aws")
p.ApplyFn = testApplyFn
p.DiffFn = testDiffFn
ctx := testContext(t, &ContextOpts{
Config: c,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
})
if _, err := ctx.Plan(nil); err != nil {
t.Fatalf("err: %s", err)
}
p.DiffFn = func(*ResourceState, *ResourceConfig) (*ResourceDiff, error) {
return nil, nil
}
if _, err := ctx.Apply(); err == nil {
t.Fatal("should error")
}
}
func TestContextApply_Provisioner_compute(t *testing.T) { func TestContextApply_Provisioner_compute(t *testing.T) {
c := testConfig(t, "apply-provisioner-compute") c := testConfig(t, "apply-provisioner-compute")
p := testProvider("aws") p := testProvider("aws")
pr := testProvisioner() pr := testProvisioner()
p.ApplyFn = testApplyFn p.ApplyFn = testApplyFn
p.DiffFn = testDiffFn p.DiffFn = testDiffFn
pr.ApplyFn = func(rs *ResourceState, c *ResourceConfig) (*ResourceState, error) { pr.ApplyFn = func(rs *ResourceState, c *ResourceConfig) error {
val, ok := c.Config["foo"] val, ok := c.Config["foo"]
if !ok || val != "computed_dynamical" { if !ok || val != "computed_dynamical" {
t.Fatalf("bad value for foo: %v %#v", val, c) t.Fatalf("bad value for foo: %v %#v", val, c)
} }
return rs, nil
return nil
} }
ctx := testContext(t, &ContextOpts{ ctx := testContext(t, &ContextOpts{
Config: c, Config: c,
@ -455,6 +510,46 @@ func TestContextApply_Provisioner_compute(t *testing.T) {
} }
} }
func TestContextApply_provisionerFail(t *testing.T) {
c := testConfig(t, "apply-provisioner-fail")
p := testProvider("aws")
pr := testProvisioner()
p.ApplyFn = testApplyFn
p.DiffFn = testDiffFn
pr.ApplyFn = func(*ResourceState, *ResourceConfig) error {
return fmt.Errorf("EXPLOSION")
}
ctx := testContext(t, &ContextOpts{
Config: c,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
Provisioners: map[string]ResourceProvisionerFactory{
"shell": testProvisionerFuncFixed(pr),
},
Variables: map[string]string{
"value": "1",
},
})
if _, err := ctx.Plan(nil); err != nil {
t.Fatalf("err: %s", err)
}
state, err := ctx.Apply()
if err == nil {
t.Fatal("should error")
}
actual := strings.TrimSpace(state.String())
expected := strings.TrimSpace(testTerraformApplyProvisionerFailStr)
if actual != expected {
t.Fatalf("bad: \n%s", actual)
}
}
func TestContextApply_outputDiffVars(t *testing.T) { func TestContextApply_outputDiffVars(t *testing.T) {
c := testConfig(t, "apply-good") c := testConfig(t, "apply-good")
p := testProvider("aws") p := testProvider("aws")
@ -527,7 +622,7 @@ func TestContextApply_Provisioner_ConnInfo(t *testing.T) {
} }
p.DiffFn = testDiffFn p.DiffFn = testDiffFn
pr.ApplyFn = func(rs *ResourceState, c *ResourceConfig) (*ResourceState, error) { pr.ApplyFn = func(rs *ResourceState, c *ResourceConfig) error {
conn := rs.ConnInfo conn := rs.ConnInfo
if conn["type"] != "telnet" { if conn["type"] != "telnet" {
t.Fatalf("Bad: %#v", conn) t.Fatalf("Bad: %#v", conn)
@ -544,7 +639,8 @@ func TestContextApply_Provisioner_ConnInfo(t *testing.T) {
if conn["pass"] != "test" { if conn["pass"] != "test" {
t.Fatalf("Bad: %#v", conn) t.Fatalf("Bad: %#v", conn)
} }
return rs, nil
return nil
} }
ctx := testContext(t, &ContextOpts{ ctx := testContext(t, &ContextOpts{
@ -1045,10 +1141,19 @@ func TestContextApply_vars(t *testing.T) {
"aws": testProviderFuncFixed(p), "aws": testProviderFuncFixed(p),
}, },
Variables: map[string]string{ Variables: map[string]string{
"foo": "bar", "foo": "us-west-2",
"amis.us-east-1": "override",
}, },
}) })
w, e := ctx.Validate()
if len(w) > 0 {
t.Fatalf("bad: %#v", w)
}
if len(e) > 0 {
t.Fatalf("bad: %s", e)
}
if _, err := ctx.Plan(nil); err != nil { if _, err := ctx.Plan(nil); err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
@ -1159,10 +1264,6 @@ func TestContextPlan_computed(t *testing.T) {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
if len(plan.Diff.Resources) < 2 {
t.Fatalf("bad: %#v", plan.Diff.Resources)
}
actual := strings.TrimSpace(plan.String()) actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanComputedStr) expected := strings.TrimSpace(testTerraformPlanComputedStr)
if actual != expected { if actual != expected {
@ -1410,6 +1511,44 @@ func TestContextPlan_state(t *testing.T) {
} }
} }
func TestContextPlan_taint(t *testing.T) {
c := testConfig(t, "plan-taint")
p := testProvider("aws")
p.DiffFn = testDiffFn
s := &State{
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
ID: "bar",
Type: "aws_instance",
Attributes: map[string]string{"num": "2"},
},
"aws_instance.bar": &ResourceState{
ID: "baz",
Type: "aws_instance",
},
},
Tainted: map[string]struct{}{"aws_instance.bar": struct{}{}},
}
ctx := testContext(t, &ContextOpts{
Config: c,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
State: s,
})
plan, err := ctx.Plan(nil)
if err != nil {
t.Fatalf("err: %s", err)
}
actual := strings.TrimSpace(plan.String())
expected := strings.TrimSpace(testTerraformPlanTaintStr)
if actual != expected {
t.Fatalf("bad:\n%s", actual)
}
}
func TestContextRefresh(t *testing.T) { func TestContextRefresh(t *testing.T) {
p := testProvider("aws") p := testProvider("aws")
c := testConfig(t, "refresh-basic") c := testConfig(t, "refresh-basic")

View File

@ -209,10 +209,46 @@ func (d *ResourceDiff) RequiresNew() bool {
} }
for _, rd := range d.Attributes { for _, rd := range d.Attributes {
if rd.RequiresNew { if rd != nil && rd.RequiresNew {
return true return true
} }
} }
return false return false
} }
// Same checks whether or not to ResourceDiffs are the "same." When
// we say "same", it is not necessarily exactly equal. Instead, it is
// just checking that the same attributes are changing, a destroy
// isn't suddenly happening, etc.
func (d *ResourceDiff) Same(d2 *ResourceDiff) bool {
if d == nil && d2 == nil {
return true
} else if d == nil || d2 == nil {
return false
}
if d.Destroy != d2.Destroy {
return false
}
if d.RequiresNew() != d2.RequiresNew() {
return false
}
if len(d.Attributes) != len(d2.Attributes) {
return false
}
ks := make(map[string]struct{})
for k, _ := range d.Attributes {
ks[k] = struct{}{}
}
for k, _ := range d2.Attributes {
delete(ks, k)
}
if len(ks) > 0 {
return false
}
return true
}

View File

@ -128,6 +128,86 @@ func TestResourceDiff_RequiresNew_nil(t *testing.T) {
} }
} }
func TestResourceDiffSame(t *testing.T) {
cases := []struct {
One, Two *ResourceDiff
Same bool
}{
{
&ResourceDiff{},
&ResourceDiff{},
true,
},
{
nil,
nil,
true,
},
{
&ResourceDiff{Destroy: false},
&ResourceDiff{Destroy: true},
false,
},
{
&ResourceDiff{Destroy: true},
&ResourceDiff{Destroy: true},
true,
},
{
&ResourceDiff{
Attributes: map[string]*ResourceAttrDiff{
"foo": &ResourceAttrDiff{},
},
},
&ResourceDiff{
Attributes: map[string]*ResourceAttrDiff{
"foo": &ResourceAttrDiff{},
},
},
true,
},
{
&ResourceDiff{
Attributes: map[string]*ResourceAttrDiff{
"bar": &ResourceAttrDiff{},
},
},
&ResourceDiff{
Attributes: map[string]*ResourceAttrDiff{
"foo": &ResourceAttrDiff{},
},
},
false,
},
{
&ResourceDiff{
Attributes: map[string]*ResourceAttrDiff{
"foo": &ResourceAttrDiff{RequiresNew: true},
},
},
&ResourceDiff{
Attributes: map[string]*ResourceAttrDiff{
"foo": &ResourceAttrDiff{RequiresNew: false},
},
},
false,
},
}
for i, tc := range cases {
actual := tc.One.Same(tc.Two)
if actual != tc.Same {
t.Fatalf("Fail %d", i)
}
}
}
func TestReadWriteDiff(t *testing.T) { func TestReadWriteDiff(t *testing.T) {
diff := &Diff{ diff := &Diff{
Resources: map[string]*ResourceDiff{ Resources: map[string]*ResourceDiff{

View File

@ -106,6 +106,9 @@ func Graph(opts *GraphOpts) (*depgraph.Graph, error) {
// and no dependencies. // and no dependencies.
graphAddConfigResources(g, opts.Config, opts.State) graphAddConfigResources(g, opts.Config, opts.State)
// Add explicit dependsOn dependencies to the graph
graphAddExplicitDeps(g)
// Next, add the state orphans if we have any // Next, add the state orphans if we have any
if opts.State != nil { if opts.State != nil {
graphAddOrphans(g, opts.Config, opts.State) graphAddOrphans(g, opts.Config, opts.State)
@ -178,6 +181,12 @@ func graphAddConfigResources(
index = i index = i
} }
// Determine if this resource is tainted
tainted := false
if s != nil && s.Tainted != nil {
_, tainted = s.Tainted[r.Id()]
}
var state *ResourceState var state *ResourceState
if s != nil { if s != nil {
state = s.Resources[name] state = s.Resources[name]
@ -209,9 +218,10 @@ func graphAddConfigResources(
Type: r.Type, Type: r.Type,
Config: r, Config: r,
Resource: &Resource{ Resource: &Resource{
Id: name, Id: name,
State: state, State: state,
Config: NewResourceConfig(r.RawConfig), Config: NewResourceConfig(r.RawConfig),
Tainted: tainted,
}, },
}, },
} }
@ -370,6 +380,48 @@ func graphAddDiff(g *depgraph.Graph, d *Diff) error {
return nil return nil
} }
// graphAddExplicitDeps adds the dependencies to the graph for the explicit
// dependsOn configurations.
func graphAddExplicitDeps(g *depgraph.Graph) {
depends := false
rs := make(map[string]*depgraph.Noun)
for _, n := range g.Nouns {
rn, ok := n.Meta.(*GraphNodeResource)
if !ok {
continue
}
rs[rn.Config.Id()] = n
if len(rn.Config.DependsOn) > 0 {
depends = true
}
}
// If we didn't have any dependsOn, just return
if !depends {
return
}
for _, n1 := range rs {
rn1 := n1.Meta.(*GraphNodeResource)
for _, d := range rn1.Config.DependsOn {
for _, n2 := range rs {
rn2 := n2.Meta.(*GraphNodeResource)
if rn2.Config.Id() != d {
continue
}
n1.Deps = append(n1.Deps, &depgraph.Dependency{
Name: d,
Source: n1,
Target: n2,
})
}
}
}
}
// graphAddMissingResourceProviders adds GraphNodeResourceProvider nodes for // graphAddMissingResourceProviders adds GraphNodeResourceProvider nodes for
// the resources that do not have an explicit resource provider specified // the resources that do not have an explicit resource provider specified
// because no provider configuration was given. // because no provider configuration was given.
@ -462,7 +514,8 @@ func graphAddProviderConfigs(g *depgraph.Graph, c *config.Config) {
} }
// Look up the provider config for this resource // Look up the provider config for this resource
pcName := config.ProviderConfigName(resourceNode.Type, c.ProviderConfigs) pcName := config.ProviderConfigName(
resourceNode.Type, c.ProviderConfigs)
if pcName == "" { if pcName == "" {
continue continue
} }
@ -470,11 +523,22 @@ func graphAddProviderConfigs(g *depgraph.Graph, c *config.Config) {
// We have one, so build the noun if it hasn't already been made // We have one, so build the noun if it hasn't already been made
pcNoun, ok := pcNouns[pcName] pcNoun, ok := pcNouns[pcName]
if !ok { if !ok {
var pc *config.ProviderConfig
for _, v := range c.ProviderConfigs {
if v.Name == pcName {
pc = v
break
}
}
if pc == nil {
panic("pc not found")
}
pcNoun = &depgraph.Noun{ pcNoun = &depgraph.Noun{
Name: fmt.Sprintf("provider.%s", pcName), Name: fmt.Sprintf("provider.%s", pcName),
Meta: &GraphNodeResourceProvider{ Meta: &GraphNodeResourceProvider{
ID: pcName, ID: pcName,
Config: c.ProviderConfigs[pcName], Config: pc,
}, },
} }
pcNouns[pcName] = pcNoun pcNouns[pcName] = pcNoun

View File

@ -51,6 +51,21 @@ func TestGraph_cycle(t *testing.T) {
} }
} }
func TestGraph_dependsOn(t *testing.T) {
config := testConfig(t, "graph-depends-on")
g, err := Graph(&GraphOpts{Config: config})
if err != nil {
t.Fatalf("err: %s", err)
}
actual := strings.TrimSpace(g.String())
expected := strings.TrimSpace(testTerraformGraphDependsStr)
if actual != expected {
t.Fatalf("bad:\n\n%s", actual)
}
}
func TestGraph_state(t *testing.T) { func TestGraph_state(t *testing.T) {
config := testConfig(t, "graph-basic") config := testConfig(t, "graph-basic")
state := &State{ state := &State{
@ -347,6 +362,16 @@ root
root -> aws_load_balancer.weblb root -> aws_load_balancer.weblb
` `
const testTerraformGraphDependsStr = `
root: root
aws_instance.db
aws_instance.db -> aws_instance.web
aws_instance.web
root
root -> aws_instance.db
root -> aws_instance.web
`
const testTerraformGraphDiffStr = ` const testTerraformGraphDiffStr = `
root: root root: root
aws_instance.foo aws_instance.foo

View File

@ -53,6 +53,8 @@ func TestReadWritePlan(t *testing.T) {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
println(reflect.DeepEqual(actual.Config.Variables, plan.Config.Variables))
if !reflect.DeepEqual(actual, plan) { if !reflect.DeepEqual(actual, plan) {
t.Fatalf("bad: %#v", actual) t.Fatalf("bad: %#v", actual)
} }

View File

@ -31,6 +31,7 @@ type Resource struct {
Provider ResourceProvider Provider ResourceProvider
State *ResourceState State *ResourceState
Provisioners []*ResourceProvisionerConfig Provisioners []*ResourceProvisionerConfig
Tainted bool
} }
// Vars returns the mapping of variables that should be replaced in // Vars returns the mapping of variables that should be replaced in

View File

@ -20,7 +20,7 @@ type ResourceProvisioner interface {
// resource state along with an error. Instead of a diff, the ResourceConfig // resource state along with an error. Instead of a diff, the ResourceConfig
// is provided since provisioners only run after a resource has been // is provided since provisioners only run after a resource has been
// newly created. // newly created.
Apply(*ResourceState, *ResourceConfig) (*ResourceState, error) Apply(*ResourceState, *ResourceConfig) error
} }
// ResourceProvisionerFactory is a function type that creates a new instance // ResourceProvisionerFactory is a function type that creates a new instance

View File

@ -9,8 +9,7 @@ type MockResourceProvisioner struct {
ApplyCalled bool ApplyCalled bool
ApplyState *ResourceState ApplyState *ResourceState
ApplyConfig *ResourceConfig ApplyConfig *ResourceConfig
ApplyFn func(*ResourceState, *ResourceConfig) (*ResourceState, error) ApplyFn func(*ResourceState, *ResourceConfig) error
ApplyReturn *ResourceState
ApplyReturnError error ApplyReturnError error
ValidateCalled bool ValidateCalled bool
@ -29,12 +28,12 @@ func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error
return p.ValidateReturnWarns, p.ValidateReturnErrors return p.ValidateReturnWarns, p.ValidateReturnErrors
} }
func (p *MockResourceProvisioner) Apply(state *ResourceState, c *ResourceConfig) (*ResourceState, error) { func (p *MockResourceProvisioner) Apply(state *ResourceState, c *ResourceConfig) error {
p.ApplyCalled = true p.ApplyCalled = true
p.ApplyState = state p.ApplyState = state
p.ApplyConfig = c p.ApplyConfig = c
if p.ApplyFn != nil { if p.ApplyFn != nil {
return p.ApplyFn(state, c) return p.ApplyFn(state, c)
} }
return p.ApplyReturn, p.ApplyReturnError return p.ApplyReturnError
} }

View File

@ -11,11 +11,16 @@ import (
func smcUserVariables(c *config.Config, vs map[string]string) []error { func smcUserVariables(c *config.Config, vs map[string]string) []error {
var errs []error var errs []error
cvs := make(map[string]*config.Variable)
for _, v := range c.Variables {
cvs[v.Name] = v
}
// Check that all required variables are present // Check that all required variables are present
required := make(map[string]struct{}) required := make(map[string]struct{})
for k, v := range c.Variables { for _, v := range c.Variables {
if v.Required() { if v.Required() {
required[k] = struct{}{} required[v.Name] = struct{}{}
} }
} }
for k, _ := range vs { for k, _ := range vs {
@ -28,6 +33,20 @@ func smcUserVariables(c *config.Config, vs map[string]string) []error {
} }
} }
// Check that types match up
for k, _ := range vs {
v, ok := cvs[k]
if !ok {
continue
}
if v.Type() != config.VariableTypeString {
errs = append(errs, fmt.Errorf(
"%s: cannot assign string value to map type",
k))
}
}
// TODO(mitchellh): variables that are unknown // TODO(mitchellh): variables that are unknown
return errs return errs

View File

@ -18,4 +18,23 @@ func TestSMCUserVariables(t *testing.T) {
if len(errs) != 0 { if len(errs) != 0 {
t.Fatalf("err: %#v", errs) t.Fatalf("err: %#v", errs)
} }
// Mapping element override
errs = smcUserVariables(c, map[string]string{
"foo": "bar",
"map.foo": "baz",
})
if len(errs) != 0 {
t.Fatalf("err: %#v", errs)
}
// Mapping complete override
errs = smcUserVariables(c, map[string]string{
"foo": "bar",
"map": "baz",
})
if len(errs) == 0 {
t.Fatal("should have errors")
}
} }

View File

@ -18,13 +18,20 @@ import (
type State struct { type State struct {
Outputs map[string]string Outputs map[string]string
Resources map[string]*ResourceState Resources map[string]*ResourceState
Tainted map[string]struct{}
once sync.Once once sync.Once
} }
func (s *State) init() { func (s *State) init() {
s.once.Do(func() { s.once.Do(func() {
s.Resources = make(map[string]*ResourceState) if s.Resources == nil {
s.Resources = make(map[string]*ResourceState)
}
if s.Tainted == nil {
s.Tainted = make(map[string]struct{})
}
}) })
} }
@ -97,7 +104,12 @@ func (s *State) String() string {
id = "<not created>" id = "<not created>"
} }
buf.WriteString(fmt.Sprintf("%s:\n", k)) taintStr := ""
if _, ok := s.Tainted[k]; ok {
taintStr = " (tainted)"
}
buf.WriteString(fmt.Sprintf("%s:%s\n", k, taintStr))
buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) buf.WriteString(fmt.Sprintf(" ID = %s\n", id))
attrKeys := make([]string, 0, len(rs.Attributes)) attrKeys := make([]string, 0, len(rs.Attributes))

View File

@ -130,6 +130,15 @@ aws_instance.foo:
type = aws_instance type = aws_instance
` `
const testTerraformApplyProvisionerFailStr = `
aws_instance.bar: (tainted)
ID = foo
aws_instance.foo:
ID = foo
num = 2
type = aws_instance
`
const testTerraformApplyDestroyStr = ` const testTerraformApplyDestroyStr = `
<no state> <no state>
` `
@ -221,10 +230,13 @@ aws_instance.foo:
const testTerraformApplyVarsStr = ` const testTerraformApplyVarsStr = `
aws_instance.bar: aws_instance.bar:
ID = foo ID = foo
foo = bar bar = foo
baz = override
foo = us-west-2
type = aws_instance type = aws_instance
aws_instance.foo: aws_instance.foo:
ID = foo ID = foo
bar = baz
num = 2 num = 2
type = aws_instance type = aws_instance
` `
@ -399,3 +411,19 @@ STATE:
aws_instance.foo: aws_instance.foo:
ID = bar ID = bar
` `
const testTerraformPlanTaintStr = `
DIFF:
DESTROY: aws_instance.bar
foo: "" => "2"
type: "" => "aws_instance"
STATE:
aws_instance.bar: (tainted)
ID = baz
aws_instance.foo:
ID = bar
num = 2
`

View File

@ -1,3 +1,7 @@
variable "value" {
default = ""
}
resource "aws_instance" "foo" { resource "aws_instance" "foo" {
num = "2" num = "2"
compute = "dynamical" compute = "dynamical"

View File

@ -0,0 +1,7 @@
resource "aws_instance" "foo" {
num = "2"
}
resource "aws_instance" "bar" {
provisioner "shell" {}
}

View File

@ -1,7 +1,23 @@
variable "amis" {
default = {
"us-east-1": "foo",
"us-west-2": "foo",
}
}
variable "bar" {
default = "baz"
}
variable "foo" {}
resource "aws_instance" "foo" { resource "aws_instance" "foo" {
num = "2" num = "2"
bar = "${var.bar}"
} }
resource "aws_instance" "bar" { resource "aws_instance" "bar" {
foo = "${var.foo}" foo = "${var.foo}"
bar = "${lookup(var.amis, var.foo)}"
baz = "${var.amis.us-east-1}"
} }

View File

@ -0,0 +1,5 @@
resource "aws_instance" "web" {}
resource "aws_instance" "db" {
depends_on = ["aws_instance.web"]
}

View File

@ -0,0 +1,7 @@
resource "aws_instance" "foo" {
num = "2"
}
resource "aws_instance" "bar" {
foo = "${aws_instance.foo.num}"
}

View File

@ -6,3 +6,10 @@ variable "foo" {
variable "bar" { variable "bar" {
default = "baz" default = "baz"
} }
# Mapping
variable "map" {
default = {
"foo" = "bar";
}
}