Merge branch 'master' of github.com:hashicorp/terraform

This commit is contained in:
JT 2014-07-24 11:58:27 -07:00
commit f4092c2bfe
55 changed files with 2060 additions and 227 deletions

View File

@ -39,6 +39,14 @@ func init() {
Update: resource_aws_db_security_group_update,
},
"aws_eip": resource.Resource{
ConfigValidator: resource_aws_eip_validation(),
Create: resource_aws_eip_create,
Destroy: resource_aws_eip_destroy,
Diff: resource_aws_eip_diff,
Refresh: resource_aws_eip_refresh,
},
"aws_elb": resource.Resource{
ConfigValidator: resource_aws_elb_validation(),
Create: resource_aws_elb_create,
@ -48,14 +56,6 @@ func init() {
Refresh: resource_aws_elb_refresh,
},
"aws_eip": resource.Resource{
ConfigValidator: resource_aws_eip_validation(),
Create: resource_aws_eip_create,
Destroy: resource_aws_eip_destroy,
Diff: resource_aws_eip_diff,
Refresh: resource_aws_eip_refresh,
},
"aws_instance": resource.Resource{
Create: resource_aws_instance_create,
Destroy: resource_aws_instance_destroy,
@ -112,14 +112,6 @@ func init() {
Update: resource_aws_route_table_association_update,
},
"aws_route53_zone": resource.Resource{
ConfigValidator: resource_aws_r53_zone_validation(),
Create: resource_aws_r53_zone_create,
Destroy: resource_aws_r53_zone_destroy,
Diff: resource_aws_r53_zone_diff,
Refresh: resource_aws_r53_zone_refresh,
},
"aws_route53_record": resource.Resource{
ConfigValidator: resource_aws_r53_record_validation(),
Create: resource_aws_r53_record_create,
@ -129,6 +121,14 @@ func init() {
Update: resource_aws_r53_record_create,
},
"aws_route53_zone": resource.Resource{
ConfigValidator: resource_aws_r53_zone_validation(),
Create: resource_aws_r53_zone_create,
Destroy: resource_aws_r53_zone_destroy,
Diff: resource_aws_r53_zone_diff,
Refresh: resource_aws_r53_zone_refresh,
},
"aws_s3_bucket": resource.Resource{
ConfigValidator: resource_aws_s3_bucket_validation(),
Create: resource_aws_s3_bucket_create,

View File

@ -0,0 +1,101 @@
package digitalocean
import (
"fmt"
"log"
"github.com/hashicorp/terraform/helper/config"
"github.com/hashicorp/terraform/helper/diff"
"github.com/hashicorp/terraform/terraform"
"github.com/pearkes/digitalocean"
)
func resource_digitalocean_domain_create(
s *terraform.ResourceState,
d *terraform.ResourceDiff,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
client := p.client
// Merge the diff into the state so that we have all the attributes
// properly.
rs := s.MergeDiff(d)
// Build up our creation options
opts := digitalocean.CreateDomain{
Name: rs.Attributes["name"],
IPAddress: rs.Attributes["ip_address"],
}
log.Printf("[DEBUG] Domain create configuration: %#v", opts)
name, err := client.CreateDomain(&opts)
if err != nil {
return nil, fmt.Errorf("Error creating Domain: %s", err)
}
rs.ID = name
log.Printf("[INFO] Domain Name: %s", name)
return rs, nil
}
func resource_digitalocean_domain_destroy(
s *terraform.ResourceState,
meta interface{}) error {
p := meta.(*ResourceProvider)
client := p.client
log.Printf("[INFO] Deleting Domain: %s", s.ID)
err := client.DestroyDomain(s.ID)
if err != nil {
return fmt.Errorf("Error deleting Domain: %s", err)
}
return nil
}
func resource_digitalocean_domain_refresh(
s *terraform.ResourceState,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
client := p.client
domain, err := client.RetrieveDomain(s.ID)
if err != nil {
return s, fmt.Errorf("Error retrieving domain: %s", err)
}
s.Attributes["name"] = domain.Name
return s, nil
}
func resource_digitalocean_domain_diff(
s *terraform.ResourceState,
c *terraform.ResourceConfig,
meta interface{}) (*terraform.ResourceDiff, error) {
b := &diff.ResourceBuilder{
Attrs: map[string]diff.AttrType{
"name": diff.AttrTypeCreate,
"ip_address": diff.AttrTypeCreate,
},
ComputedAttrs: []string{},
}
return b.Diff(s, c)
}
func resource_digitalocean_domain_validation() *config.Validator {
return &config.Validator{
Required: []string{
"name",
"ip_address",
},
Optional: []string{},
}
}

View File

@ -0,0 +1,99 @@
package digitalocean
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/pearkes/digitalocean"
)
func TestAccDigitalOceanDomain_Basic(t *testing.T) {
var domain digitalocean.Domain
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDigitalOceanDomainDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckDigitalOceanDomainConfig_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanDomainExists("digitalocean_domain.foobar", &domain),
testAccCheckDigitalOceanDomainAttributes(&domain),
resource.TestCheckResourceAttr(
"digitalocean_domain.foobar", "name", "foobar-test-terraform.com"),
resource.TestCheckResourceAttr(
"digitalocean_domain.foobar", "ip_address", "192.168.0.10"),
),
},
},
})
}
func testAccCheckDigitalOceanDomainDestroy(s *terraform.State) error {
client := testAccProvider.client
for _, rs := range s.Resources {
if rs.Type != "digitalocean_domain" {
continue
}
// Try to find the domain
_, err := client.RetrieveDomain(rs.ID)
if err == nil {
fmt.Errorf("Domain still exists")
}
}
return nil
}
func testAccCheckDigitalOceanDomainAttributes(domain *digitalocean.Domain) resource.TestCheckFunc {
return func(s *terraform.State) error {
if domain.Name != "foobar-test-terraform.com" {
return fmt.Errorf("Bad name: %s", domain.Name)
}
return nil
}
}
func testAccCheckDigitalOceanDomainExists(n string, domain *digitalocean.Domain) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.ID == "" {
return fmt.Errorf("No Record ID is set")
}
client := testAccProvider.client
foundDomain, err := client.RetrieveDomain(rs.ID)
if err != nil {
return err
}
if foundDomain.Name != rs.ID {
return fmt.Errorf("Record not found")
}
*domain = foundDomain
return nil
}
}
const testAccCheckDigitalOceanDomainConfig_basic = `
resource "digitalocean_domain" "foobar" {
name = "foobar-test-terraform.com"
ip_address = "192.168.0.10"
}`

View File

@ -81,6 +81,10 @@ func resource_digitalocean_droplet_create(
droplet := dropletRaw.(*digitalocean.Droplet)
// Initialize the connection info
rs.ConnInfo["type"] = "ssh"
rs.ConnInfo["host"] = droplet.IPV4Address()
return resource_digitalocean_droplet_update_state(rs, droplet)
}

View File

@ -0,0 +1,184 @@
package digitalocean
import (
"fmt"
"log"
"github.com/hashicorp/terraform/helper/config"
"github.com/hashicorp/terraform/helper/diff"
"github.com/hashicorp/terraform/terraform"
"github.com/pearkes/digitalocean"
)
func resource_digitalocean_record_create(
s *terraform.ResourceState,
d *terraform.ResourceDiff,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
client := p.client
// Merge the diff into the state so that we have all the attributes
// properly.
rs := s.MergeDiff(d)
var err error
newRecord := digitalocean.CreateRecord{
Type: rs.Attributes["type"],
Name: rs.Attributes["name"],
Data: rs.Attributes["value"],
Priority: rs.Attributes["priority"],
Port: rs.Attributes["port"],
Weight: rs.Attributes["weight"],
}
log.Printf("[DEBUG] record create configuration: %#v", newRecord)
recId, err := client.CreateRecord(rs.Attributes["domain"], &newRecord)
if err != nil {
return nil, fmt.Errorf("Failed to create record: %s", err)
}
rs.ID = recId
log.Printf("[INFO] Record ID: %s", rs.ID)
record, err := resource_digitalocean_record_retrieve(rs.Attributes["domain"], rs.ID, client)
if err != nil {
return nil, fmt.Errorf("Couldn't find record: %s", err)
}
return resource_digitalocean_record_update_state(rs, record)
}
func resource_digitalocean_record_update(
s *terraform.ResourceState,
d *terraform.ResourceDiff,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
client := p.client
rs := s.MergeDiff(d)
updateRecord := digitalocean.UpdateRecord{}
if attr, ok := d.Attributes["name"]; ok {
updateRecord.Name = attr.New
}
log.Printf("[DEBUG] record update configuration: %#v", updateRecord)
err := client.UpdateRecord(rs.Attributes["domain"], rs.ID, &updateRecord)
if err != nil {
return rs, fmt.Errorf("Failed to update record: %s", err)
}
record, err := resource_digitalocean_record_retrieve(rs.Attributes["domain"], rs.ID, client)
if err != nil {
return rs, fmt.Errorf("Couldn't find record: %s", err)
}
return resource_digitalocean_record_update_state(rs, record)
}
func resource_digitalocean_record_destroy(
s *terraform.ResourceState,
meta interface{}) error {
p := meta.(*ResourceProvider)
client := p.client
log.Printf("[INFO] Deleting record: %s, %s", s.Attributes["domain"], s.ID)
err := client.DestroyRecord(s.Attributes["domain"], s.ID)
if err != nil {
return fmt.Errorf("Error deleting record: %s", err)
}
return nil
}
func resource_digitalocean_record_refresh(
s *terraform.ResourceState,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
client := p.client
rec, err := resource_digitalocean_record_retrieve(s.Attributes["domain"], s.ID, client)
if err != nil {
return nil, err
}
return resource_digitalocean_record_update_state(s, rec)
}
func resource_digitalocean_record_diff(
s *terraform.ResourceState,
c *terraform.ResourceConfig,
meta interface{}) (*terraform.ResourceDiff, error) {
b := &diff.ResourceBuilder{
Attrs: map[string]diff.AttrType{
"domain": diff.AttrTypeCreate,
"name": diff.AttrTypeUpdate,
"type": diff.AttrTypeCreate,
"value": diff.AttrTypeCreate,
"priority": diff.AttrTypeCreate,
"port": diff.AttrTypeCreate,
"weight": diff.AttrTypeCreate,
},
ComputedAttrs: []string{
"value",
"priority",
"weight",
"port",
},
}
return b.Diff(s, c)
}
func resource_digitalocean_record_update_state(
s *terraform.ResourceState,
rec *digitalocean.Record) (*terraform.ResourceState, error) {
s.Attributes["name"] = rec.Name
s.Attributes["type"] = rec.Type
s.Attributes["value"] = rec.Data
s.Attributes["weight"] = rec.StringWeight()
s.Attributes["priority"] = rec.StringPriority()
s.Attributes["port"] = rec.StringPort()
// We belong to a Domain
s.Dependencies = []terraform.ResourceDependency{
terraform.ResourceDependency{ID: s.Attributes["domain"]},
}
return s, nil
}
func resource_digitalocean_record_retrieve(domain string, id string, client *digitalocean.Client) (*digitalocean.Record, error) {
record, err := client.RetrieveRecord(domain, id)
if err != nil {
return nil, err
}
return &record, nil
}
func resource_digitalocean_record_validation() *config.Validator {
return &config.Validator{
Required: []string{
"type",
"domain",
},
Optional: []string{
"value",
"name",
"weight",
"port",
"priority",
},
}
}

View File

@ -0,0 +1,175 @@
package digitalocean
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/pearkes/digitalocean"
)
func TestAccDigitalOceanRecord_Basic(t *testing.T) {
var record digitalocean.Record
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDigitalOceanRecordDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckDigitalOceanRecordConfig_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanRecordExists("digitalocean_record.foobar", &record),
testAccCheckDigitalOceanRecordAttributes(&record),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "name", "terraform"),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "domain", "foobar-test-terraform.com"),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "value", "192.168.0.10"),
),
},
},
})
}
func TestAccDigitalOceanRecord_Updated(t *testing.T) {
var record digitalocean.Record
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDigitalOceanRecordDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckDigitalOceanRecordConfig_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanRecordExists("digitalocean_record.foobar", &record),
testAccCheckDigitalOceanRecordAttributes(&record),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "name", "terraform"),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "domain", "foobar-test-terraform.com"),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "value", "192.168.0.10"),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "type", "A"),
),
},
resource.TestStep{
Config: testAccCheckDigitalOceanRecordConfig_new_value,
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanRecordExists("digitalocean_record.foobar", &record),
testAccCheckDigitalOceanRecordAttributesUpdated(&record),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "name", "terraform"),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "domain", "foobar-test-terraform.com"),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "value", "192.168.0.11"),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "type", "A"),
),
},
},
})
}
func testAccCheckDigitalOceanRecordDestroy(s *terraform.State) error {
client := testAccProvider.client
for _, rs := range s.Resources {
if rs.Type != "digitalocean_record" {
continue
}
_, err := client.RetrieveRecord(rs.Attributes["domain"], rs.ID)
if err == nil {
return fmt.Errorf("Record still exists")
}
}
return nil
}
func testAccCheckDigitalOceanRecordAttributes(record *digitalocean.Record) resource.TestCheckFunc {
return func(s *terraform.State) error {
if record.Data != "192.168.0.10" {
return fmt.Errorf("Bad value: %s", record.Data)
}
return nil
}
}
func testAccCheckDigitalOceanRecordAttributesUpdated(record *digitalocean.Record) resource.TestCheckFunc {
return func(s *terraform.State) error {
if record.Data != "192.168.0.11" {
return fmt.Errorf("Bad value: %s", record.Data)
}
return nil
}
}
func testAccCheckDigitalOceanRecordExists(n string, record *digitalocean.Record) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.ID == "" {
return fmt.Errorf("No Record ID is set")
}
client := testAccProvider.client
foundRecord, err := client.RetrieveRecord(rs.Attributes["domain"], rs.ID)
if err != nil {
return err
}
if foundRecord.StringId() != rs.ID {
return fmt.Errorf("Record not found")
}
*record = foundRecord
return nil
}
}
const testAccCheckDigitalOceanRecordConfig_basic = `
resource "digitalocean_domain" "foobar" {
name = "foobar-test-terraform.com"
ip_address = "192.168.0.10"
}
resource "digitalocean_record" "foobar" {
domain = "${digitalocean_domain.foobar.name}"
name = "terraform"
value = "192.168.0.10"
type = "A"
}`
const testAccCheckDigitalOceanRecordConfig_new_value = `
resource "digitalocean_domain" "foobar" {
name = "foobar-test-terraform.com"
ip_address = "192.168.0.10"
}
resource "digitalocean_record" "foobar" {
domain = "${digitalocean_domain.foobar.name}"
name = "terraform"
value = "192.168.0.11"
type = "A"
}`

View File

@ -11,6 +11,14 @@ var resourceMap *resource.Map
func init() {
resourceMap = &resource.Map{
Mapping: map[string]resource.Resource{
"digitalocean_domain": resource.Resource{
ConfigValidator: resource_digitalocean_domain_validation(),
Create: resource_digitalocean_domain_create,
Destroy: resource_digitalocean_domain_destroy,
Diff: resource_digitalocean_domain_diff,
Refresh: resource_digitalocean_domain_refresh,
},
"digitalocean_droplet": resource.Resource{
ConfigValidator: resource_digitalocean_droplet_validation(),
Create: resource_digitalocean_droplet_create,
@ -19,6 +27,15 @@ func init() {
Refresh: resource_digitalocean_droplet_refresh,
Update: resource_digitalocean_droplet_update,
},
"digitalocean_record": resource.Resource{
ConfigValidator: resource_digitalocean_record_validation(),
Create: resource_digitalocean_record_create,
Destroy: resource_digitalocean_record_destroy,
Update: resource_digitalocean_record_update,
Diff: resource_digitalocean_record_diff,
Refresh: resource_digitalocean_record_refresh,
},
},
}
}

View File

@ -1,10 +1,11 @@
package dnsimple
import (
"fmt"
"log"
"os"
"github.com/rubyist/go-dnsimple"
"github.com/pearkes/dnsimple"
)
type Config struct {
@ -14,7 +15,7 @@ type Config struct {
// Client() returns a new client for accessing heroku.
//
func (c *Config) Client() (*dnsimple.DNSimpleClient, error) {
func (c *Config) Client() (*dnsimple.Client, error) {
// If we have env vars set (like in the acc) tests,
// we need to override the values passed in here.
@ -25,7 +26,11 @@ func (c *Config) Client() (*dnsimple.DNSimpleClient, error) {
c.Token = v
}
client := dnsimple.NewClient(c.Token, c.Email)
client, err := dnsimple.NewClient(c.Email, c.Token)
if err != nil {
return nil, fmt.Errorf("Error setting up client: %s", err)
}
log.Printf("[INFO] DNSimple Client configured for user: %s", client.Email)

View File

@ -3,12 +3,11 @@ package dnsimple
import (
"fmt"
"log"
"strconv"
"github.com/hashicorp/terraform/helper/config"
"github.com/hashicorp/terraform/helper/diff"
"github.com/hashicorp/terraform/terraform"
"github.com/rubyist/go-dnsimple"
"github.com/pearkes/dnsimple"
)
func resource_dnsimple_record_create(
@ -24,42 +23,74 @@ func resource_dnsimple_record_create(
var err error
newRecord := dnsimple.Record{
newRecord := dnsimple.ChangeRecord{
Name: rs.Attributes["name"],
Content: rs.Attributes["value"],
RecordType: rs.Attributes["type"],
Value: rs.Attributes["value"],
Type: rs.Attributes["type"],
}
if attr, ok := rs.Attributes["ttl"]; ok {
newRecord.TTL, err = strconv.Atoi(attr)
if err != nil {
return nil, err
}
newRecord.Ttl = attr
}
log.Printf("[DEBUG] record create configuration: %#v", newRecord)
rec, err := client.CreateRecord(rs.Attributes["domain"], newRecord)
recId, err := client.CreateRecord(rs.Attributes["domain"], &newRecord)
if err != nil {
return nil, fmt.Errorf("Failed to create record: %s", err)
}
rs.ID = strconv.Itoa(rec.Id)
rs.ID = recId
log.Printf("[INFO] record ID: %s", rs.ID)
return resource_dnsimple_record_update_state(rs, &rec)
record, err := resource_dnsimple_record_retrieve(rs.Attributes["domain"], rs.ID, client)
if err != nil {
return nil, fmt.Errorf("Couldn't find record: %s", err)
}
return resource_dnsimple_record_update_state(rs, record)
}
func resource_dnsimple_record_update(
s *terraform.ResourceState,
d *terraform.ResourceDiff,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
client := p.client
rs := s.MergeDiff(d)
panic("Cannot update record")
updateRecord := dnsimple.ChangeRecord{}
return nil, nil
if attr, ok := d.Attributes["name"]; ok {
updateRecord.Name = attr.New
}
if attr, ok := d.Attributes["value"]; ok {
updateRecord.Value = attr.New
}
if attr, ok := d.Attributes["type"]; ok {
updateRecord.Type = attr.New
}
if attr, ok := d.Attributes["ttl"]; ok {
updateRecord.Ttl = attr.New
}
log.Printf("[DEBUG] record update configuration: %#v", updateRecord)
_, err := client.UpdateRecord(rs.Attributes["domain"], rs.ID, &updateRecord)
if err != nil {
return rs, fmt.Errorf("Failed to update record: %s", err)
}
record, err := resource_dnsimple_record_retrieve(rs.Attributes["domain"], rs.ID, client)
if err != nil {
return rs, fmt.Errorf("Couldn't find record: %s", err)
}
return resource_dnsimple_record_update_state(rs, record)
}
func resource_dnsimple_record_destroy(
@ -68,14 +99,10 @@ func resource_dnsimple_record_destroy(
p := meta.(*ResourceProvider)
client := p.client
log.Printf("[INFO] Deleting record: %s", s.ID)
log.Printf("[INFO] Deleting record: %s, %s", s.Attributes["domain"], s.ID)
rec, err := resource_dnsimple_record_retrieve(s.Attributes["domain"], s.ID, client)
if err != nil {
return err
}
err := client.DestroyRecord(s.Attributes["domain"], s.ID)
err = rec.Delete(client)
if err != nil {
return fmt.Errorf("Error deleting record: %s", err)
}
@ -89,7 +116,7 @@ func resource_dnsimple_record_refresh(
p := meta.(*ResourceProvider)
client := p.client
rec, err := resource_dnsimple_record_retrieve(s.Attributes["app"], s.ID, client)
rec, err := resource_dnsimple_record_retrieve(s.Attributes["domain"], s.ID, client)
if err != nil {
return nil, err
}
@ -105,15 +132,16 @@ func resource_dnsimple_record_diff(
b := &diff.ResourceBuilder{
Attrs: map[string]diff.AttrType{
"domain": diff.AttrTypeCreate,
"name": diff.AttrTypeCreate,
"name": diff.AttrTypeUpdate,
"value": diff.AttrTypeUpdate,
"ttl": diff.AttrTypeCreate,
"ttl": diff.AttrTypeUpdate,
"type": diff.AttrTypeUpdate,
},
ComputedAttrs: []string{
"priority",
"domain_id",
"ttl",
},
}
@ -127,25 +155,20 @@ func resource_dnsimple_record_update_state(
s.Attributes["name"] = rec.Name
s.Attributes["value"] = rec.Content
s.Attributes["type"] = rec.RecordType
s.Attributes["ttl"] = strconv.Itoa(rec.TTL)
s.Attributes["priority"] = strconv.Itoa(rec.Priority)
s.Attributes["domain_id"] = strconv.Itoa(rec.DomainId)
s.Attributes["ttl"] = rec.StringTtl()
s.Attributes["priority"] = rec.StringPrio()
s.Attributes["domain_id"] = rec.StringDomainId()
return s, nil
}
func resource_dnsimple_record_retrieve(domain string, id string, client *dnsimple.DNSimpleClient) (*dnsimple.Record, error) {
intId, err := strconv.Atoi(id)
func resource_dnsimple_record_retrieve(domain string, id string, client *dnsimple.Client) (*dnsimple.Record, error) {
record, err := client.RetrieveRecord(domain, id)
if err != nil {
return nil, err
}
record, err := client.RetrieveRecord(domain, intId)
if err != nil {
return nil, fmt.Errorf("Error retrieving record: %s", err)
}
return &record, nil
return record, nil
}
func resource_dnsimple_record_validation() *config.Validator {

View File

@ -3,12 +3,11 @@ package dnsimple
import (
"fmt"
"os"
"strconv"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/rubyist/go-dnsimple"
"github.com/pearkes/dnsimple"
)
func TestAccDNSimpleRecord_Basic(t *testing.T) {
@ -37,6 +36,45 @@ func TestAccDNSimpleRecord_Basic(t *testing.T) {
})
}
func TestAccDNSimpleRecord_Updated(t *testing.T) {
var record dnsimple.Record
domain := os.Getenv("DNSIMPLE_DOMAIN")
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDNSimpleRecordDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: fmt.Sprintf(testAccCheckDNSimpleRecordConfig_basic, domain),
Check: resource.ComposeTestCheckFunc(
testAccCheckDNSimpleRecordExists("dnsimple_record.foobar", &record),
testAccCheckDNSimpleRecordAttributes(&record),
resource.TestCheckResourceAttr(
"dnsimple_record.foobar", "name", "terraform"),
resource.TestCheckResourceAttr(
"dnsimple_record.foobar", "domain", domain),
resource.TestCheckResourceAttr(
"dnsimple_record.foobar", "value", "192.168.0.10"),
),
},
resource.TestStep{
Config: fmt.Sprintf(testAccCheckDNSimpleRecordConfig_new_value, domain),
Check: resource.ComposeTestCheckFunc(
testAccCheckDNSimpleRecordExists("dnsimple_record.foobar", &record),
testAccCheckDNSimpleRecordAttributesUpdated(&record),
resource.TestCheckResourceAttr(
"dnsimple_record.foobar", "name", "terraform"),
resource.TestCheckResourceAttr(
"dnsimple_record.foobar", "domain", domain),
resource.TestCheckResourceAttr(
"dnsimple_record.foobar", "value", "192.168.0.11"),
),
},
},
})
}
func testAccCheckDNSimpleRecordDestroy(s *terraform.State) error {
client := testAccProvider.client
@ -45,12 +83,7 @@ func testAccCheckDNSimpleRecordDestroy(s *terraform.State) error {
continue
}
intId, err := strconv.Atoi(rs.ID)
if err != nil {
return err
}
_, err = client.RetrieveRecord(rs.Attributes["domain"], intId)
_, err := client.RetrieveRecord(rs.Attributes["domain"], rs.ID)
if err == nil {
return fmt.Errorf("Record still exists")
@ -71,6 +104,17 @@ func testAccCheckDNSimpleRecordAttributes(record *dnsimple.Record) resource.Test
}
}
func testAccCheckDNSimpleRecordAttributesUpdated(record *dnsimple.Record) resource.TestCheckFunc {
return func(s *terraform.State) error {
if record.Content != "192.168.0.11" {
return fmt.Errorf("Bad content: %s", record.Content)
}
return nil
}
}
func testAccCheckDNSimpleRecordExists(n string, record *dnsimple.Record) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.Resources[n]
@ -85,22 +129,17 @@ func testAccCheckDNSimpleRecordExists(n string, record *dnsimple.Record) resourc
client := testAccProvider.client
intId, err := strconv.Atoi(rs.ID)
if err != nil {
return err
}
foundRecord, err := client.RetrieveRecord(rs.Attributes["domain"], intId)
foundRecord, err := client.RetrieveRecord(rs.Attributes["domain"], rs.ID)
if err != nil {
return err
}
if strconv.Itoa(foundRecord.Id) != rs.ID {
if foundRecord.StringId() != rs.ID {
return fmt.Errorf("Record not found")
}
*record = foundRecord
*record = *foundRecord
return nil
}
@ -115,3 +154,13 @@ resource "dnsimple_record" "foobar" {
type = "A"
ttl = 3600
}`
const testAccCheckDNSimpleRecordConfig_new_value = `
resource "dnsimple_record" "foobar" {
domain = "%s"
name = "terraform"
value = "192.168.0.11"
type = "A"
ttl = 3600
}`

View File

@ -5,13 +5,13 @@ import (
"github.com/hashicorp/terraform/helper/config"
"github.com/hashicorp/terraform/terraform"
"github.com/rubyist/go-dnsimple"
"github.com/pearkes/dnsimple"
)
type ResourceProvider struct {
Config Config
client *dnsimple.DNSimpleClient
client *dnsimple.Client
}
func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) {

View File

@ -75,6 +75,6 @@ func testAccPreCheck(t *testing.T) {
}
if v := os.Getenv("DNSIMPLE_DOMAIN"); v == "" {
t.Fatal("DNSIMPLE_DOMAIN must be set for acceptance tests. The domain is used to create and destroy record against.")
t.Fatal("DNSIMPLE_DOMAIN must be set for acceptance tests. The domain is used to ` and destroy record against.")
}
}

View File

@ -41,7 +41,7 @@ func FormatState(s *terraform.State, c *colorstring.Colorize) string {
taintStr := ""
if s.Tainted != nil {
if _, ok := s.Tainted[id]; ok {
if _, ok := s.Tainted[k]; ok {
taintStr = " (tainted)"
}
}

View File

@ -112,7 +112,6 @@ func (m *Meta) contextOpts() *terraform.ContextOpts {
opts.Hooks[0] = m.uiHook()
copy(opts.Hooks[1:], m.ContextOpts.Hooks)
copy(opts.Hooks[len(m.ContextOpts.Hooks)+1:], m.extraHooks)
println(fmt.Sprintf("%#v", opts.Hooks))
if len(m.variables) > 0 {
vs := make(map[string]string)

View File

@ -108,31 +108,10 @@ func (c *Config) ProviderFactories() map[string]terraform.ResourceProviderFactor
}
func (c *Config) providerFactory(path string) terraform.ResourceProviderFactory {
originalPath := path
return func() (terraform.ResourceProvider, error) {
// First look for the provider on the PATH.
path, err := exec.LookPath(path)
if err != nil {
// If that doesn't work, look for it in the same directory
// as the executable that is running.
exePath, err := osext.Executable()
if err == nil {
path = filepath.Join(
filepath.Dir(exePath),
filepath.Base(originalPath))
}
}
// If we still don't have a path set, then set it to the
// original path and let any errors that happen bubble out.
if path == "" {
path = originalPath
}
// Build the plugin client configuration and init the plugin
var config plugin.ClientConfig
config.Cmd = exec.Command(path)
config.Cmd = pluginCmd(path)
config.Managed = true
client := plugin.NewClient(&config)
@ -168,31 +147,10 @@ func (c *Config) ProvisionerFactories() map[string]terraform.ResourceProvisioner
}
func (c *Config) provisionerFactory(path string) terraform.ResourceProvisionerFactory {
originalPath := path
return func() (terraform.ResourceProvisioner, error) {
// First look for the provider on the PATH.
path, err := exec.LookPath(path)
if err != nil {
// If that doesn't work, look for it in the same directory
// as the executable that is running.
exePath, err := osext.Executable()
if err == nil {
path = filepath.Join(
filepath.Dir(exePath),
filepath.Base(originalPath))
}
}
// If we still don't have a path set, then set it to the
// original path and let any errors that happen bubble out.
if path == "" {
path = originalPath
}
// Build the plugin client configuration and init the plugin
var config plugin.ClientConfig
config.Cmd = exec.Command(path)
config.Cmd = pluginCmd(path)
config.Managed = true
client := plugin.NewClient(&config)
@ -214,3 +172,29 @@ func (c *Config) provisionerFactory(path string) terraform.ResourceProvisionerFa
}, nil
}
}
func pluginCmd(path string) *exec.Cmd {
originalPath := path
// First look for the provider on the PATH.
path, err := exec.LookPath(path)
if err != nil {
// If that doesn't work, look for it in the same directory
// as the executable that is running.
exePath, err := osext.Executable()
if err == nil {
path = filepath.Join(
filepath.Dir(exePath),
filepath.Base(originalPath))
}
}
// If we still don't have a path set, then set it to the
// original path and let any errors that happen bubble out.
if path == "" {
path = originalPath
}
// Build the command to execute the plugin
return exec.Command(path)
}

View File

@ -232,6 +232,15 @@ CHECK_CYCLES:
}
}
// Check for loops to yourself
for _, n := range g.Nouns {
for _, d := range n.Deps {
if d.Source == d.Target {
vErr.Cycles = append(vErr.Cycles, []*Noun{n})
}
}
}
// Return the detailed error
if vErr.MissingRoot || vErr.Unreachable != nil || vErr.Cycles != nil {
return vErr

View File

@ -11,9 +11,7 @@ import (
func Expand(m map[string]string, key string) interface{} {
// If the key is exactly a key in the map, just return it
if v, ok := m[key]; ok {
if num, err := strconv.ParseInt(v, 0, 0); err == nil {
return int(num)
} else if v == "true" {
if v == "true" {
return true
} else if v == "false" {
return false

View File

@ -44,7 +44,7 @@ func TestExpand(t *testing.T) {
Output: []interface{}{
map[string]interface{}{
"name": "bar",
"port": 3000,
"port": "3000",
"enabled": true,
},
},
@ -63,8 +63,8 @@ func TestExpand(t *testing.T) {
map[string]interface{}{
"name": "bar",
"ports": []interface{}{
1,
2,
"1",
"2",
},
},
},

View File

@ -11,6 +11,7 @@ import (
"net/rpc"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"time"
@ -217,7 +218,7 @@ func (c *Client) Start() (addr net.Addr, err error) {
cmd.Stderr = stderr_w
cmd.Stdout = stdout_w
log.Printf("Starting plugin: %s %#v", cmd.Path, cmd.Args)
log.Printf("[DEBUG] Starting plugin: %s %#v", cmd.Path, cmd.Args)
err = cmd.Start()
if err != nil {
return
@ -248,7 +249,7 @@ func (c *Client) Start() (addr net.Addr, err error) {
cmd.Wait()
// Log and make sure to flush the logs write away
log.Printf("%s: plugin process exited\n", cmd.Path)
log.Printf("[DEBUG] %s: plugin process exited\n", cmd.Path)
os.Stderr.Sync()
// Mark that we exited
@ -295,7 +296,7 @@ func (c *Client) Start() (addr net.Addr, err error) {
timeout := time.After(c.config.StartTimeout)
// Start looking for the address
log.Printf("Waiting for RPC address for: %s", cmd.Path)
log.Printf("[DEBUG] Waiting for RPC address for: %s", cmd.Path)
select {
case <-timeout:
err = errors.New("timeout while waiting for plugin to start")
@ -343,7 +344,7 @@ func (c *Client) logStderr(r io.Reader) {
c.config.Stderr.Write([]byte(line))
line = strings.TrimRightFunc(line, unicode.IsSpace)
log.Printf("%s: %s", c.config.Cmd.Path, line)
log.Printf("%s: %s", filepath.Base(c.config.Cmd.Path), line)
}
if err == io.EOF {

View File

@ -121,7 +121,9 @@ func (c *Context) Apply() (*State, error) {
c.state = c.state.deepcopy()
// Walk
log.Printf("[INFO] Apply walk starting")
err = g.Walk(c.applyWalkFn())
log.Printf("[INFO] Apply walk complete")
// Prune the state so that we have as clean a state as possible
c.state.prune()
@ -565,6 +567,16 @@ func (c *Context) applyWalkFn() depgraph.WalkFunc {
}
}
// Update the resulting diff
c.sl.Lock()
if rs.ID == "" {
delete(c.state.Resources, r.Id)
delete(c.state.Tainted, r.Id)
} else {
c.state.Resources[r.Id] = rs
}
c.sl.Unlock()
// Invoke any provisioners we have defined. This is only done
// if the resource was created, as updates or deletes do not
// invoke provisioners.
@ -579,21 +591,17 @@ func (c *Context) applyWalkFn() depgraph.WalkFunc {
}
}
// Update the resulting diff
c.sl.Lock()
if rs.ID == "" {
delete(c.state.Resources, r.Id)
} else {
c.state.Resources[r.Id] = rs
if tainted {
log.Printf("[DEBUG] %s: Marking as tainted", r.Id)
c.sl.Lock()
c.state.Tainted[r.Id] = struct{}{}
}
}
c.sl.Unlock()
}
// Update the state for the resource itself
r.State = rs
r.Tainted = tainted
for _, h := range c.hooks {
handleHook(h.PostApply(r.Id, r.State, applyerr))
@ -716,6 +724,7 @@ func (c *Context) planWalkFn(result *Plan) depgraph.WalkFunc {
if r.Tainted {
// Tainted resources must also be destroyed
log.Printf("[DEBUG] %s: Tainted, marking for destroy", r.Id)
diff.Destroy = true
}

View File

@ -550,6 +550,52 @@ func TestContextApply_provisionerFail(t *testing.T) {
}
}
func TestContextApply_provisionerResourceRef(t *testing.T) {
c := testConfig(t, "apply-provisioner-resource-ref")
p := testProvider("aws")
pr := testProvisioner()
p.ApplyFn = testApplyFn
p.DiffFn = testDiffFn
pr.ApplyFn = func(rs *ResourceState, c *ResourceConfig) error {
val, ok := c.Config["foo"]
if !ok || val != "2" {
t.Fatalf("bad value for foo: %v %#v", val, c)
}
return nil
}
ctx := testContext(t, &ContextOpts{
Config: c,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
Provisioners: map[string]ResourceProvisionerFactory{
"shell": testProvisionerFuncFixed(pr),
},
})
if _, err := ctx.Plan(nil); err != nil {
t.Fatalf("err: %s", err)
}
state, err := ctx.Apply()
if err != nil {
t.Fatalf("err: %s", err)
}
actual := strings.TrimSpace(state.String())
expected := strings.TrimSpace(testTerraformApplyProvisionerResourceRefStr)
if actual != expected {
t.Fatalf("bad: \n%s", actual)
}
// Verify apply was invoked
if !pr.ApplyCalled {
t.Fatalf("provisioner not invoked")
}
}
func TestContextApply_outputDiffVars(t *testing.T) {
c := testConfig(t, "apply-good")
p := testProvider("aws")

View File

@ -3,6 +3,7 @@ package terraform
import (
"errors"
"fmt"
"log"
"sort"
"strings"
@ -100,6 +101,8 @@ func Graph(opts *GraphOpts) (*depgraph.Graph, error) {
return nil, errors.New("Config is required for Graph")
}
log.Printf("[DEBUG] Creating graph...")
g := new(depgraph.Graph)
// First, build the initial resource graph. This only has the resources
@ -160,6 +163,10 @@ func Graph(opts *GraphOpts) (*depgraph.Graph, error) {
return nil, err
}
log.Printf(
"[DEBUG] Graph created and valid. %d nouns.",
len(g.Nouns))
return g, nil
}
@ -604,20 +611,20 @@ func graphAddVariableDeps(g *depgraph.Graph) {
// Handle the resource variables
vars = m.Config.RawConfig.Variables
nounAddVariableDeps(g, n, vars)
nounAddVariableDeps(g, n, vars, false)
// Handle the variables of the resource provisioners
for _, p := range m.Resource.Provisioners {
vars = p.RawConfig.Variables
nounAddVariableDeps(g, n, vars)
nounAddVariableDeps(g, n, vars, true)
vars = p.ConnInfo.Variables
nounAddVariableDeps(g, n, vars)
nounAddVariableDeps(g, n, vars, true)
}
case *GraphNodeResourceProvider:
vars = m.Config.RawConfig.Variables
nounAddVariableDeps(g, n, vars)
nounAddVariableDeps(g, n, vars, false)
default:
continue
@ -627,7 +634,11 @@ func graphAddVariableDeps(g *depgraph.Graph) {
// nounAddVariableDeps updates the dependencies of a noun given
// a set of associated variable values
func nounAddVariableDeps(g *depgraph.Graph, n *depgraph.Noun, vars map[string]config.InterpolatedVariable) {
func nounAddVariableDeps(
g *depgraph.Graph,
n *depgraph.Noun,
vars map[string]config.InterpolatedVariable,
removeSelf bool) {
for _, v := range vars {
// Only resource variables impose dependencies
rv, ok := v.(*config.ResourceVariable)
@ -641,6 +652,12 @@ func nounAddVariableDeps(g *depgraph.Graph, n *depgraph.Noun, vars map[string]co
continue
}
// If we're ignoring self-references, then don't add that
// dependency.
if removeSelf && n == target {
continue
}
// Build the dependency
dep := &depgraph.Dependency{
Name: rv.ResourceId(),

View File

@ -14,6 +14,7 @@ import (
func init() {
gob.Register(make([]interface{}, 0))
gob.Register(make([]map[string]interface{}, 0))
gob.Register(make(map[string]string))
}
// PlanOpts are the options used to generate an execution plan for

View File

@ -42,6 +42,9 @@ func (s *State) deepcopy() *State {
for k, v := range s.Resources {
result.Resources[k] = v
}
for k, v := range s.Tainted {
result.Tainted[k] = v
}
}
return result

View File

@ -139,6 +139,13 @@ aws_instance.foo:
type = aws_instance
`
const testTerraformApplyProvisionerResourceRefStr = `
aws_instance.bar:
ID = foo
num = 2
type = aws_instance
`
const testTerraformApplyDestroyStr = `
<no state>
`

View File

@ -0,0 +1,7 @@
resource "aws_instance" "bar" {
num = "2"
provisioner "shell" {
foo = "${aws_instance.bar.num}"
}
}

View File

@ -1,16 +0,0 @@
---
layout: "docs"
page_title: "Commands: Agent"
sidebar_current: "docs-commands-agent"
---
# Terraform Agent
The `terraform agent` command is the heart of Terraform: it runs the agent that
performs the important task of maintaining membership information,
running checks, announcing services, handling queries, etc.
Due to the power and flexibility of this command, the Terraform agent
is documented in its own section. See the [Terraform Agent](/docs/agent/basics.html)
section for more information on how to use this command and the
options it has.

View File

@ -0,0 +1,50 @@
---
layout: "docs"
page_title: "Commands"
sidebar_current: "docs-commands"
---
# Terraform Commands (CLI)
Terraform is controlled via a very easy to use command-line interface (CLI).
Terraform is only a single command-line application: terraform. This application
then takes a subcommand such as "apply" or "plan". The complete list of subcommands
is in the navigation to the left.
The terraform CLI is a well-behaved command line application. In erroneous cases,
a non-zero exit status will be returned. It also responds to -h and --help as you'd
most likely expect.
To view a list of the available commands at any time, just run terraform with no arguments:
```
$ terraform
usage: terraform [--version] [--help] <command> [<args>]
Available commands are:
apply Builds or changes infrastructure
graph Create a visual graph of Terraform resources
output Read an output from a state file
plan Generate and show an execution plan
refresh Update local state file against real resources
show Inspect Terraform state or plan
version Prints the Terraform version
```
To get help for any specific command, pass the -h flag to the relevant subcommand. For example,
to see help about the members subcommand:
```
$ terraform graph -h
Usage: terraform graph [options] PATH
Outputs the visual graph of Terraform resources. If the path given is
the path to a configuration, the dependency graph of the resources are
shown. If the path is a plan file, then the dependency graph of the
plan itself is shown.
The graph is outputted in DOT format. The typical program that can
read this format is GraphViz, but many web services are also available
to read this format.
```

View File

@ -0,0 +1,40 @@
---
layout: "docs"
page_title: "Command: refresh"
sidebar_current: "docs-commands-refresh"
---
# Command: refresh
The `terraform refresh` command is used to reconcile the state Terraform
knows about (via it's state file) with the real-world infrastructure.
The can be used to detect any drift from the last-known state, and to
update the state file.
This does not modify infrastructure, but does modify the state file.
If the state is changed, this may cause changes to occur during the next
plan or apply.
## Usage
Usage: `terraform refresh [options] [dir]`
By default, `refresh` requires no flags and looks in the current directory
for the configuration and state file to refresh.
The command-line flags are all optional. The list of available flags are:
* `-no-color` - Disables output with coloring
* `-state=path` - Path to read and write the state file to. Defaults to "terraform.tfstate".
* `-state-out=path` - Path to write updated state file. By default, the
`-state` path will be used.
* `-var 'foo=bar'` - Set a variable in the Terraform configuration. This
flag can be set multiple times.
* `-var-file=foo` - Set variables in the Terraform configuration from
a file. If "terraform.tfvars" is present, it will be automatically
loaded if this flag is not specified.

View File

@ -0,0 +1,24 @@
---
layout: "docs"
page_title: "Command: show"
sidebar_current: "docs-commands-show"
---
# Command: show
The `terraform show` command is used to provide human-readable output
from a state or plan file. This can be used to inspect a plan to ensure
that the planned operations are expected, or to inspect the current state
as terraform sees it.
## Usage
Usage: `terraform show [options] <path>`
You must call `show` with a path to either a Terraform state file or plan
file.
The command-line flags are all optional. The list of available flags are:
* `-no-color` - Disables output with coloring

View File

@ -0,0 +1,57 @@
---
layout: "aws"
page_title: "AWS: aws_autoscaling_group"
sidebar_current: "docs-aws-resource-autoscale"
---
# aws\_autoscaling\_group
Provides an AutoScaling Group resource.
## Example Usage
```
resource "aws_autoscaling_group" "bar" {
availability_zones = ["us-east-1a"]
name = "foobar3-terraform-test"
max_size = 5
min_size = 2
health_check_grace_period = 300
health_check_type = "ELB"
desired_capicity = 4
force_delete = true
launch_configuration = "${aws_launch_configuration.foobar.name}"
}
```
## Argument Reference
The following arguments are supported:
* `name` - (Required) The name of the auto scale group.
* `max_size` - (Required) The maximum size of the auto scale group.
* `min_size` - (Required) The minimum size of the auto scale group.
* `availability_zones` - (Required) A list of AZs to launch resources in.
* `launch_configuration` - (Required) The ID of the launch configuration to use.
* `health_check_grace_period` - (Optional) Time after instance comes into service before checking health.
* `health_check_type` - (Optional) "EC2" or "ELB". Controls how health checking is done.
* `desired_capicity` - (Optional) The number of Amazon EC2 instances that should be running in the group.
* `force_delete` - (Optional) Allows deleting the autoscaling group without waiting
for all instances in the pool to terminate.
## Attributes Reference
The following attributes are exported:
* `id` - The autoscaling group name.
* `availability_zones` - The availability zones of the autoscale group.
* `min_size` - The minimum size of the autoscale group
* `max_size` - The maximum size of the autoscale group
* `default_cooldown` - Time between a scaling activity and the succeeding scaling activity.
* `name` - The name of the autoscale group
* `health_check_grace_period` - Time after instance comes into service before checking health.
* `health_check_type` - "EC2" or "ELB". Controls how health checking is done.
* `desired_capicity` -The number of Amazon EC2 instances that should be running in the group.
* `launch_configuration` - The launch configuration of the autoscale group
* `vpc_zone_identifier` - The VPC zone identifier

View File

@ -0,0 +1,71 @@
---
layout: "aws"
page_title: "AWS: aws_db_instance"
sidebar_current: "docs-aws-resource-db-instance"
---
# aws\_db\_instance
Provides an RDS instance resource.
## Example Usage
```
resource "aws_db_instance" "default" {
identifier = "mydb-rds"
allocated_storage = 10
engine = "mysql"
engine_version = "5.6.17"
instance_class = "db.t1.micro"
name = "mydb"
username = "foo"
password = "bar"
security_group_names = ["${aws_db_security_group.bar.name}"]
}
```
## Argument Reference
The following arguments are supported:
* `allocated_storage` - (Required) The allocated storage in gigabytes.
* `engine` - (Required) The database engine to use.
* `engine_version` - (Required) The engine version to use.
* `identifier` - (Required) The name of the RDS instance
* `instance_class` - (Required) The instance type of the RDS instance.
* `name` - (Required) The DB name to create.
* `password` - (Required) Password for the master DB user.
* `username` - (Required) Username for the master DB user.
* `availability_zone` - (Optional) The AZ for the RDS instance.
* `backup_retention_period` - (Optional) The days to retain backups for.
* `backup_window` - (Optional) The backup window.
* `iops` - (Optional) The amount of provisioned IOPS
* `maintenance_window` - (Optional) The window to perform maintanence in.
* `multi_az` - (Optional) Specifies if the RDS instance is multi-AZ
* `port` - (Optional) The port on which the DB accepts connetions.
* `publicly_accessible` - (Optional) Bool to control if instance is publically accessible.
* `vpc_security_group_ids` - (Optional) List of VPC security groups to associate.
* `skip_final_snapshot` - (Optional) Enables skipping the final snapshot on deletion.
* `security_group_names` - (Optional) List of DB Security Groups to associate.
## Attributes Reference
The following attributes are exported:
* `id` - The RDS instance ID.
* `address` - The address of the RDS instance.
* `allocated_storage` - The amount of allocated storage
* `availability_zone` - The availability zone of the instance
* `backup_retention_period` - The backup retention period
* `backup_window` - The backup window
* `endpoint` - The connection endpoint
* `engine` - The database engine
* `engine_version` - The database engine version
* `instance_class`- The RDS instance class
* `maintenance_window` - The instance maintenance window
* `multi_az` - If the RDS instance is multi AZ enabled
* `name` - The database name
* `port` - The database port
* `status` - The RDS instance status
* `username` - The master username for the database

View File

@ -0,0 +1,43 @@
---
layout: "aws"
page_title: "AWS: aws_db_security_group"
sidebar_current: "docs-aws-resource-db-security-group"
---
# aws\_db\_security\_group
Provides an RDS security group resource.
## Example Usage
```
resource "aws_db_security_group" "default" {
name = "RDS default security group"
ingress {
cidr = "10.0.0.1/24"
}
}
```
## Argument Reference
The following arguments are supported:
* `name` - (Required) The name of the DB security group.
* `description` - (Required) The description of the DB security group.
* `ingress` - (Optional) A list of ingress rules.
Ingress blocks support the following:
* `cidr` - The CIDR block to accept
* `security_group_name` - The name of the security group to authorize
* `security_group_id` - The ID of the security group to authorize
* `security_group_owner_id` - The owner Id of the security group provided
by `security_group_name`.
## Attributes Reference
The following attributes are exported:
* `id` - The db security group ID.

View File

@ -0,0 +1,34 @@
---
layout: "digitalocean"
page_title: "Provider: DigitalOcean"
sidebar_current: "docs-do-index"
---
# DigitalOcean Provider
The DigitalOcean (DO) provider is used to interact with the
resources supported by DigitalOcean. The provider needs to be configured
with the proper credentials before it can be used.
Use the navigation to the left to read about the available resources.
## Example Usage
```
# Configure the DigitalOcean Provider
provider "digitalocean" {
token = "${var.do_token}"
}
# Create a web server
resource "digitalocean_droplet" "web" {
...
}
```
## Argument Reference
The following arguments are supported:
* `token` - (Required) This is the DO API token.

View File

@ -0,0 +1,53 @@
---
layout: "digitalocean"
page_title: "DigitalOcean: digitalocean_droplet"
sidebar_current: "docs-do-resource-droplet"
---
# digitalocean\_droplet
Provides a DigitalOcean droplet resource. This can be used to create,
modify, and delete droplets. Droplets also support
[provisioning](/docs/provisioners/index.html).
## Example Usage
```
# Create a new Web droplet in the nyc2 region
resource "digitalocean_droplet" "web" {
image = "ubuntu1404"
name = "web-1"
region = "nyc2"
size = "512mb"
}
```
## Argument Reference
The following arguments are supported:
* `image` - (Required) The droplet image ID or slug.
* `name` - (Required) The droplet name
* `region` - (Required) The region to start in
* `size` - (Required) The instance size to start
* `backups` - (Optional) Boolean controling if backups are made.
* `ipv6` - (Optional) Boolean controling if IPv6 is enabled.
* `private_networking` - (Optional) Boolean controling if private networks are enabled.
* `ssh_keys` - (Optional) A list of SSH IDs or fingerprints to enable.
## Attributes Reference
The following attributes are exported:
* `id` - The ID of the droplet
* `name`- The name of the droplet
* `region` - The region of the droplet
* `image` - The image of the droplet
* `ipv6` - Is IPv6 enabled
* `ipv6_address` - The IPv6 address
* `ipv4_address` - The IPv4 address
* `locked` - Is the Droplet locked
* `private_networking` - Is private networking enabled
* `size` - The instance size
* `status` - The status of the droplet

View File

@ -0,0 +1,36 @@
---
layout: "heroku"
page_title: "Provider: Heroku"
sidebar_current: "docs-heroku-index"
---
# Heroku Provider
The Heroku provider is used to interact with the
resources supported by Heroku. The provider needs to be configured
with the proper credentials before it can be used.
Use the navigation to the left to read about the available resources.
## Example Usage
```
# Configure the Heroku provider
provider "heroku" {
email = "ops@company.com"
api_key = "${var.heroku_api_key}"
}
# Create a new applicaiton
resource "heroku_app" "default" {
...
}
```
## Argument Reference
The following arguments are supported:
* `api_key` - (Required) Heroku API token
* `email` - (Required) Email to be notified by Heroku

View File

@ -0,0 +1,46 @@
---
layout: "heroku"
page_title: "Heroku: heroku_addon"
sidebar_current: "docs-heroku-resource-addon"
---
# heroku\_addon
Provides a Heroku Add-On resource. These can be attach
services to a Heroku app.
## Example Usage
```
# Create a new heroku app
resource "heroku_app" "default" {
name = "test-app"
}
# Add a web-hook addon for the app
resource "heroku_addon" "webhook" {
app = "${heroku_app.default.name}"
plan = "deployhooks:http"
config {
url = "http://google.com"
}
}
```
## Argument Reference
The following arguments are supported:
* `app` - (Required) The Heroku app to add to.
* `plan` - (Required) The addon to add.
* `config` - (Optional) Optional plan configuration.
## Attributes Reference
The following attributes are exported:
* `id` - The ID of the add-on
* `name` - The add-on name
* `plan` - The plan name
* `provider_id` - The ID of the plan provider

View File

@ -0,0 +1,41 @@
---
layout: "heroku"
page_title: "Heroku: heroku_app"
sidebar_current: "docs-heroku-resource-app"
---
# heroku\_app
Provides a Heroku App resource. This can be used to
create and manage applications on Heroku.
## Example Usage
```
# Create a new heroku app
resource "heroku_app" "default" {
name = "my-cool-app"
}
```
## Argument Reference
The following arguments are supported:
* `name` - (Optional) The name of the Heroku app
* `region` - (Optional) The region of the Heroku app
* `stack` - (Optional) The stack for the Heroku app
* `config_vars` - (Optional) Configuration variables for the app
## Attributes Reference
The following attributes are exported:
* `id` - The ID of the app
* `name` - The name of the app
* `stack` - The stack of the app
* `region` - The region of the app
* `git_url` - The Git URL for the app
* `web_url` - The Web URL for the app
* `heroku_hostname` - The Heroku URL for the app

View File

@ -0,0 +1,41 @@
---
layout: "heroku"
page_title: "Heroku: heroku_domain"
sidebar_current: "docs-heroku-resource-domain"
---
# heroku\_domain
Provides a Heroku App resource. This can be used to
create and manage applications on Heroku.
## Example Usage
```
# Create a new heroku app
resource "heroku_app" "default" {
name = "test-app"
}
# Associate a custom domain
resource "heroku_domain" "default" {
app = "${heroku_app.default.name}"
hostname = "terraform.example.com"
}
```
## Argument Reference
The following arguments are supported:
* `hostname` - (Required) The hostname to serve requests from.
* `app` - (Required) The Heroku app to link to.
## Attributes Reference
The following attributes are exported:
* `id` - The ID of the of the domain record
* `hostname` - The hostname traffic will be served as
* `cname` - The cname traffic should route to.

View File

@ -22,7 +22,7 @@ resource "aws_instance" "web" {
provisioner "remote-exec" {
inline = [
"puppet apply",
"consul join ${aws_instance.web.private_ip",
"consul join ${aws_instance.web.private_ip}",
]
}
}

View File

@ -194,6 +194,19 @@ a lot more metadata about it. This metadata can actually be referenced
for other resources or outputs, which will be covered later in
the getting started guide.
## Provisioning
The EC2 instance we launched at this point is based on the AMI
given, but has no additional software installed. If you're running
an image-based infrastructure (perhaps creating images with
[Packer](http://www.packer.io)), then this is all you need.
However, many infrastructures still require some sort of initialization
or software provisioning step. Terraform supports
provisioners,
which we'll cover a little bit later in the getting started guide,
in order to do this.
## Next
Congratulations! You've built your first infrastructure with Terraform.

View File

@ -0,0 +1,165 @@
---
layout: "intro"
page_title: "Resource Dependencies"
sidebar_current: "gettingstarted-deps"
---
# Resource Dependencies
In this page, we're going to introduce resource dependencies,
where we'll not only see a configuration with multiple resources
for the first time, but also scenarios where resource parameters
use information from other resources.
Up to this point, our example has only contained a single resource.
Real infrastructure has a diverse set of resources and resource
types. Terraform configurations can contain multiple resources,
multiple resource types, and these types can even span multiple
providers.
On this page, we'll show a basic example of multiple resources
and how to reference the attributes of other resources to configure
subsequent resources.
## Assigning an Elastic IP
We'll improve our configuration by assigning an elastic IP to
the EC2 instance we're managing. Modify your `example.tf` and
add the following:
```
resource "aws_eip" "ip" {
instance = "${aws_instance.example.id}"
}
```
This should look familiar from the earlier example of adding
an EC2 instance resource, except this time we're building
an "aws\_eip" resource type. This resource type allocates
and associates an
[elastic IP](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html)
to an EC2 instance.
The only parameter for
[aws\_eip](/docs/providers/aws/r/eip.html) is "instance" which
is the EC2 instance to assign the IP to. For this value, we
use an interpolation to use an attribute from the EC2 instance
we managed earlier.
The syntax for this interpolation should be straightforward:
it requests the "id" attribute from the "aws\_instance.example"
resource.
## Plan and Execute
Run `terraform plan` to view the execution plan. The output
will look something like the following:
```
$ terraform plan
...
+ aws_eip.ip
instance: "" => "${aws_instance.example.id}"
private_ip: "" => "<computed>"
public_ip: "" => "<computed>"
+ aws_instance.example
ami: "" => "ami-aa7ab6c2"
availability_zone: "" => "<computed>"
instance_type: "" => "t1.micro"
key_name: "" => "<computed>"
private_dns: "" => "<computed>"
private_ip: "" => "<computed>"
public_dns: "" => "<computed>"
public_ip: "" => "<computed>"
security_groups: "" => "<computed>"
subnet_id: "" => "<computed>"
```
Terraform will create two resources: the instance and the elastic
IP. In the "instance" value for the "aws\_eip", you can see the
raw interpolation is still present. This is because this variable
won't be known until the "aws\_instance" is created. It will be
replaced at apply-time.
Next, run `terraform apply`. The output will look similar to the
following:
```
aws_instance.example: Creating...
ami: "" => "ami-aa7ab6c2"
instance_type: "" => "t1.micro"
aws_eip.ip: Creating...
instance: "" => "i-0e737b25"
Apply complete! Resources: 2 added, 0 changed, 0 destroyed.
```
It is clearer to see from actually running Terraform, but
Terraform creates the EC2 instance before the elastic IP
address. Due to the interpolation earlier where the elastic
IP requires the ID of the EC2 instance, Terraform is able
to infer a dependency, and knows to create the instance
first.
## Implicit and Explicit Dependencies
Most dependencies in Terraform are implicit: Terraform is able
to infer dependencies based on usage of attributes of other
resources.
Using this information, Terraform builds a graph of resources.
This tells Terraform not only what order to create resources,
but also what resources can be created in parallel. In our example,
since the IP address depended on the EC2 instance, they could
not be created in parallel.
Implicit dependencies work well and are usually all you ever need.
However, you can also specify explicit dependencies with the
`depends_on` parameter which is available on any resource. For example,
we could modify the "aws\_eip" resource to the following, which
effectively does the same thing and is redundant:
```
resource "aws_eip" "ip" {
instance = "${aws_instance.example.id}"
depends_on = ["aws_instance.example"]
}
```
If you're ever unsure about the dependency chain that Terraform
is creating, you can use the `terraform graph` command to view
the graph. This command outputs a dot-formatted graph which can be
viewed with
[Graphviz](http://www.graphviz.org/).
## Non-Dependent Resources
We can now augment the configuration with another EC2 instance.
Because this doesn't rely on any other resource, it can be
created in parallel to everything else.
```
resource "aws_instance" "another" {
ami = "ami-aa7ab6c2"
instance_type = "t1.micro"
}
```
You can view the graph with `terraform graph` to see that
nothing depends on this and that it will likely be created
in parallel.
Before moving on, remove this resource from your configuration
and `terraform apply` again to destroy it. We won't use the
second instance anymore in the getting started guide.
## Next
In this page you were introduced to both multiple resources
as well as basic resource dependencies and resource attribute
interpolation.
Moving on, we'll use provisioners to do some basic bootstrapping
of our launched instance.

View File

@ -18,35 +18,37 @@ destroying is a useful action.
## Plan
While our infrastructure is simple, viewing the execution plan
of a destroy can be useful to make sure that it is destroying
only the resources you expect.
To ask Terraform to create an execution plan to destroy all
infrastructure, run the plan command with the `-destroy` flag.
For Terraform to destroy our infrastructure, we need to ask
Terraform to generate a destroy execution plan. This is a special
kind of execution plan that only destroys all Terraform-managed
infrastructure, and doesn't create or update any components.
```
$ terraform plan -destroy
$ terraform plan -destroy -out=terraform.tfplan
...
- aws_instance.example
```
The output says that "aws\_instance.example" will be deleted.
The plan command is given two new flags.
The `-destroy` flag lets you destroy infrastructure without
modifying the configuration. You can also destroy infrastructure
by simply commenting out or deleting the contents of your
configuration, but usually you just want to destroy an instance
of your infrastructure rather than permanently deleting your
configuration as well. The `-destroy` flag is for this case.
The first flag, `-destroy` tells Terraform to create an execution
plan to destroy the infrastructure. You can see in the output that
our one EC2 instance will be destroyed.
The second flag, `-out` tells Terraform to save the execution plan
to a file. We haven't seen this before, but it isn't limited to
only destroys. Any plan can be saved to a file. Terraform can then
apply a plan, ensuring that only exactly the plan you saw is executed.
For destroys, you must save into a plan, since there is no way to
tell `apply` to destroy otherwise.
## Apply
Let's apply the destroy:
```
$ terraform apply -destroy
$ terraform apply terraform.tfplan
aws_instance.example: Destroying...
Apply complete! Resources: 0 added, 0 changed, 1 destroyed.
@ -57,6 +59,9 @@ Apply complete! Resources: 0 added, 0 changed, 1 destroyed.
Done. Terraform destroyed our one instance, and if you run a
`terraform show`, you'll see that the state file is now empty.
For this command, we gave an argument to `apply` for the first
time. You can give apply a specific plan to execute.
## Next
You now know how to create, modify, and destroy infrastructure.

View File

@ -6,24 +6,16 @@ sidebar_current: "gettingstarted-nextsteps"
# Next Steps
That concludes the getting started guide for Terraform. Hopefully you're able to
see that while Terraform is simple to use, it has a powerful set of features.
We've covered the basics for all of these features in this guide.
That concludes the getting started guide for Terraform. Hopefully
you're now able to not only see what Terraform is useful for, but
you're also able to put this knowledge to use to improve building
your own infrastructure.
Terraform is designed to be friendly to both the DevOps community and
application developers, making it perfect for modern, elastic infrastructures.
We've covered the basics for all of these features in this guide.
As a next step, the following resources are available:
* [Documentation](/docs/index.html) - The documentation is an in-depth reference
guide to all the features of Terraform, including technical details about the
internals of how Terraform operates.
* [Guides](/docs/guides/index.html) - This section provides various getting
started guides with Terraform, including how to bootstrap a new datacenter.
* [Examples](https://github.com/hashicorp/terraform/tree/master/demo) -
The work-in-progress examples folder within the GitHub
repository for Terraform contains functional examples of various use cases
of Terraform to help you get started with exactly what you need.
* [Documentation](/docs/index.html) - The documentation is an in-depth
reference guide to all the features of Terraform, including
technical details about the internals of how Terraform operates.

View File

@ -0,0 +1,78 @@
---
layout: "intro"
page_title: "Output Variables"
sidebar_current: "gettingstarted-outputs"
---
# Output Variables
In the previous section, we introduced input variables as a way
to parameterize Terraform configurations. In this page, we
introduce output variables as a way to organize data to be
easily queried and shown back to the Terraform user.
When building potentially complex infrastructure, Terraform
stores hundreds or thousands of attribute values for all your
resources. But as a user of Terraform, you may only be interested
in a few values of importance, such as a load balancer IP,
VPN address, etc.
Outputs are a way to tell Terraform what data is important.
This data is outputted when `apply` is called, and can be
queried using the `terraform output` command.
## Defining Outputs
Let's define an output to show us the public IP address of the
elastic IP address that we create. Add this to any of your
`*.tf` files:
```
output "ip" {
value = "${aws_eip.ip.public_ip}"
}
```
This defines an output variables named "ip". The `value` field
specifies what the value will be, and almost always contains
one or more interpolations, since the output data is typically
dynamic in some form. In this case, we're outputting the
`public_ip` attribute of the elastic IP address.
Multiple `output` blocks can be defined to specify multiple
output variables.
## Viewing Outputs
Run `terraform apply` to populate the output. This only needs
to be done once after the output is defined. The apply output
should change slightly. At the end you should see this:
```
$ terraform apply
...
Apply complete! Resources: 0 added, 0 changed, 0 destroyed.
Outputs:
ip = 50.17.232.209
```
`apply` highlights the outputs. You can also query the outputs
after apply-time using `terraform output`:
```
$ terraform output ip
50.17.232.209
```
This command is useful for scripts to extract outputs.
## Next
You now know how to parameterize configurations with input
variables, and extract important data using output variables.
Next, we're going to use provisioners to install some software
on the instances created on top of the base AMI used.

View File

@ -0,0 +1,110 @@
---
layout: "intro"
page_title: "Provision"
sidebar_current: "gettingstarted-provision"
---
# Provision
You're now able to create and modify infrastructure. This page
introduces how to use provisioners to run basic shell scripts on
instances when they're created.
If you're using an image-based infrastructure (perhaps with images
created with [Packer](http://www.packer.io)), then what you've
learned so far is good enough. But if you need to do some initial
setup on your instances, provisioners let you upload files,
run shell scripts, etc.
## Defining a Provisioner
To define a provisioner, modify the resource block defining the
"example" EC2 instance to look like the following:
```
resource "aws_instance" "example" {
ami = "ami-aa7ab6c2"
instance_type = "t1.micro"
provisioner "local-exec" {
command = "echo ${aws_instance.example.public_ip} > file.txt"
}
}
```
This adds a `provision` block within the `resource` block. Multiple
`provision` blocks can be added to define multiple provisoining steps.
Terraform supports
[multiple provisioners](/docs/provisioners/index.html),
but for this example we use the "local-exec" provisioner.
The "local-exec" provisioner executes a command locally on the machine
running Terraform. We're using this provisioner versus the others so
we don't have to worry about specifying any
[connection info](/docs/provisioners/connection.html) right now.
## Running Provisioners
Provisioners are run only when a resource is _created_. They
are not a replacement for configuration management and changing
the software of an already-running server, and are instead just
meant as a way to bootstrap a server. For configuration management,
you should use Terraform provisioning to bootstrap a real configuration
management solution.
Make sure that your infrastructure is
[destroyed](/intro/getting-started/destroy.html) if it isn't already,
then run `apply`:
```
$ terraform apply
aws_instance.example: Creating...
ami: "" => "ami-aa7ab6c2"
instance_type: "" => "t1.micro"
aws_eip.ip: Creating...
instance: "" => "i-213f350a"
Apply complete! Resources: 2 added, 0 changed, 0 destroyed.
```
Terraform currently doesn't output anything to indicate the provisioners
have run. This is going to be fixed soon. However, we can verify
everything worked by looking at the "file.txt" file:
```
$ cat file.txt
54.192.26.128
```
It contains the IP, just ask we asked!
## Failed Provisioners and Tainted Resources
If a resource successfully creates but fails during provision,
Terraform will error and mark the resource as "tainted." A
resource that is tainted has been physically created, but can't
be considered safe to use since provisioning failed.
When you generate your next execution plan, Terraform will remove
any tainted resources and create new resources, attempting to
provision again. It does not attempt to restart provisioning on the
same resource because it isn't guaranteed to be safe.
Terraform does not automatically roll back and destroy the resource
during the apply when the failure happens, because that would go
against the execution plan: the execution plan would've said a
resource will be created, but does not say it will ever be deleted.
But if you create an execution plan with a tainted resource, the
plan will clearly state that the resource will be destroyed because
it is tainted.
## Next
Provisioning is important for being able to bootstrap instances.
As another reminder, it is not a replacement for configuration
management. It is meant to simply bootstrap machines. If you use
configuration management, you should use the provisioning as a way
to bootstrap the configuration management utility.
In the next section, we start looking at variables as a way to
better parameterize our configurations.

View File

@ -0,0 +1,141 @@
---
layout: "intro"
page_title: "Input Variables"
sidebar_current: "gettingstarted-variables"
---
# Input Variables
You now have enough Terraform knowledge to create useful
configurations, but we're still hardcoding access keys,
AMIs, etc. To become truly shareable and commitable to version
control, we need to parameterize the configurations. This page
introduces input variables as a way to do this.
## Defining Variables
Let's first extract our access key, secret key, and region
into a few variables. Create another file `variables.tf` with
the following contents. Note that the file can be named anything,
since Terraform loads all files ending in `.tf` in a directory.
```
variable "access_key" {}
variable "secret_key" {}
variable "region" {
default = "us-east-1"
}
```
This defines three variables within your Terraform configuration.
The first two have empty blocks `{}`. The third sets a default. If
a default value is set, the variable is optional. Otherwise, the
variable is required. If you run `terraform plan` now, Terraform will
error since the required variables are not set.
## Using Variables in Configuration
Next, replace the AWS provider configuration with the following:
```
provider "aws" {
access_key = "${var.access_key}"
secret_key = "${var.secret_key}"
region = "${var.region}"
}
```
This uses more interpolations, this time prefixed with `var.`. This
tells Terraform that you're accessing variables. This configures
the AWS provider with the given variables.
## Assigning Variables
There are two ways to assign variables.
First, you can set it directly on the command-line with the
`-var` flag. Any command in Terraform that inspects the configuration
accepts this flag, such as `apply`, `plan`, and `refresh`:
```
$ terraform plan \
-var 'access_key=foo' \
-var 'secret_key=bar'
...
```
Second, you can create a file and assign variables directly. Create
a file named "terraform.tfvars" with the following contents:
```
access_key = "foo"
secret_key = "bar"
```
If a "terraform.tfvars" file is present, Terraform automatically loads
it to populate variables. If the file is named something else, you can
use the `-var-file` flag directly to specify a file.
We recommend using the "terraform.tfvars" file, and ignoring it from
version control.
## Mappings
We've replaced our sensitive strings with variables, but we still
are hardcoding AMIs. Unfortunately, AMIs are specific to the region
that is in use. One option is to just ask the user to input the proper
AMI for the region, but Terraform can do better than that with
_mappings_.
Mappings are a way to create variables that are lookup tables. An example
will show this best. Let's extract our AMIs into a mapping and add
support for the "us-west-2" region as well:
```
variable "amis" {
default = {
"us-east-1": "ami-aa7ab6c2",
"us-west-2": "ami-23f78e13",
}
}
```
A variable becomes a mapping when it has a default value that is a
map like above. There is no way to create a required map.
Then, replace the "aws\_instance" with the following:
```
resource "aws_instance" "example" {
ami = "${lookup(var.amis, var.region)}"
instance_type = "t1.micro"
}
```
This introduces a new type of interpolation: a function call. The
`lookup` function does a dynamic lookup in a map for a key. The
key is `var.region`, which specifies that the value of the region
variables is the key.
While we don't use it in our example, it is worth nothing that you
can also do a static lookup of a mapping directly with
`${var.amis.us-east-1}`.
We set defaults, but mappings can also be overridden using the
`-var` and `-var-file` values. For example, if the user wanted to
specify an alternate AMI for us-east-1:
```
$ terraform plan -var 'amis.us-east-1=foo'
...
```
## Next
Terraform provides variables for parameterizing your configurations.
Mappings let you build lookup tables in cases where that make sense.
Setting and using variables is uniform throughout your configurations.
In the next section, we'll take a look at output variables as a
mechanism to expose certain values more prominently to the Terraform
operator.

View File

@ -60,7 +60,6 @@
<li class="first li-under"><a href="/intro/index.html">Intro</a></li>
<li class="li-under"><a href="/docs/index.html">Docs</a></li>
<li class="li-under"><a href="/community.html">Community</a></li>
<li class="li-under"><a href="http://demo.terraform.io/">Demo</a></li>
</ul>
</nav>
</div>

View File

@ -13,6 +13,18 @@
<li<%= sidebar_current("docs-aws-resource") %>>
<a href="#">Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-aws-resource-autoscale") %>>
<a href="/docs/providers/aws/r/autoscale.html">aws_autoscaling_group</a>
</li>
<li<%= sidebar_current("docs-aws-resource-db-instance") %>>
<a href="/docs/providers/aws/r/db_instance.html">aws_db_instance</a>
</li>
<li<%= sidebar_current("docs-aws-resource-db-security-group") %>>
<a href="/docs/providers/aws/r/db_security_group.html">aws_db_security_group</a>
</li>
<li<%= sidebar_current("docs-aws-resource-eip") %>>
<a href="/docs/providers/aws/r/eip.html">aws_eip</a>
</li>

View File

@ -0,0 +1,26 @@
<% wrap_layout :inner do %>
<% content_for :sidebar do %>
<div class="docs-sidebar hidden-print affix-top" role="complementary">
<ul class="nav docs-sidenav">
<li<%= sidebar_current("docs-home") %>>
<a href="/docs/index.html">&laquo; Documentation Home</a>
</li>
<li<%= sidebar_current("docs-do-index") %>>
<a href="/docs/providers/do/index.html">DigitalOcean Provider</a>
</li>
<li<%= sidebar_current("docs-do-resource") %>>
<a href="#">Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-do-resource-droplet") %>>
<a href="/docs/providers/do/r/droplet.html">digitalocean_droplet</a>
</li>
</ul>
</li>
</ul>
</div>
<% end %>
<%= yield %>
<% end %>

View File

@ -15,36 +15,28 @@
<li<%= sidebar_current("docs-commands") %>>
<a href="/docs/commands/index.html">Commands (CLI)</a>
<ul class="nav">
<li<%= sidebar_current("docs-commands-agent") %>>
<a href="/docs/commands/agent.html">agent</a>
<li<%= sidebar_current("docs-commands-apply") %>>
<a href="/docs/commands/apply.html">apply</a>
</li>
<li<%= sidebar_current("docs-commands-forceleave") %>>
<a href="/docs/commands/force-leave.html">force-leave</a>
<li<%= sidebar_current("docs-commands-graph") %>>
<a href="/docs/commands/graph.html">graph</a>
</li>
<li<%= sidebar_current("docs-commands-join") %>>
<a href="/docs/commands/join.html">join</a>
<li<%= sidebar_current("docs-commands-output") %>>
<a href="/docs/commands/output.html">output</a>
</li>
<li<%= sidebar_current("docs-commands-keygen") %>>
<a href="/docs/commands/keygen.html">keygen</a>
<li<%= sidebar_current("docs-commands-plan") %>>
<a href="/docs/commands/plan.html">plan</a>
</li>
<li<%= sidebar_current("docs-commands-leave") %>>
<a href="/docs/commands/leave.html">leave</a>
<li<%= sidebar_current("docs-commands-refresh") %>>
<a href="/docs/commands/refresh.html">refresh</a>
</li>
<li<%= sidebar_current("docs-commands-members") %>>
<a href="/docs/commands/members.html">members</a>
</li>
<li<%= sidebar_current("docs-commands-monitor") %>>
<a href="/docs/commands/monitor.html">monitor</a>
</li>
<li<%= sidebar_current("docs-commands-info") %>>
<a href="/docs/commands/info.html">info</a>
<li<%= sidebar_current("docs-commands-show") %>>
<a href="/docs/commands/show.html">show</a>
</li>
</ul>
</li>

View File

@ -0,0 +1,34 @@
<% wrap_layout :inner do %>
<% content_for :sidebar do %>
<div class="docs-sidebar hidden-print affix-top" role="complementary">
<ul class="nav docs-sidenav">
<li<%= sidebar_current("docs-home") %>>
<a href="/docs/index.html">&laquo; Documentation Home</a>
</li>
<li<%= sidebar_current("docs-heroku-index") %>>
<a href="/docs/providers/heroku/index.html">Heroku Provider</a>
</li>
<li<%= sidebar_current("docs-heroku-resource") %>>
<a href="#">Resources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-heroku-resource-addon") %>>
<a href="/docs/providers/heroku/r/addon.html">heroku_addon</a>
</li>
<li<%= sidebar_current("docs-heroku-resource-app") %>>
<a href="/docs/providers/heroku/r/app.html">heroku_app</a>
</li>
<li<%= sidebar_current("docs-heroku-resource-domain") %>>
<a href="/docs/providers/heroku/r/domain.html">heroku_domain</a>
</li>
</ul>
</li>
</ul>
</div>
<% end %>
<%= yield %>
<% end %>

View File

@ -42,20 +42,20 @@
<a href="/intro/getting-started/destroy.html">Destroy Infrastructure</a>
</li>
<li<%= sidebar_current("gettingstarted-outputs") %>>
<a href="/intro/getting-started/outputs.html">Output Variables</a>
</li>
<li<%= sidebar_current("gettingstarted-deps") %>>
<a href="/intro/getting-started/dependencies.html">Resource Dependencies</a>
</li>
<li<%= sidebar_current("gettingstarted-provision") %>>
<a href="/intro/getting-started/provision.html">Provision</a>
</li>
<li<%= sidebar_current("gettingstarted-variables") %>>
<a href="/intro/getting-started/variables.html">Input Variables</a>
</li>
<li<%= sidebar_current("gettingstarted-variables") %>>
<a href="/intro/getting-started/provisioners.html">Provision</a>
<li<%= sidebar_current("gettingstarted-outputs") %>>
<a href="/intro/getting-started/outputs.html">Output Variables</a>
</li>
<li<%= sidebar_current("gettingstarted-nextsteps") %>>

View File

@ -6,6 +6,8 @@ body.page-sub{
background-color: @light-black;
}
body.layout-heroku,
body.layout-digitalocean,
body.layout-aws,
body.layout-docs,
body.layout-inner,

View File

@ -1141,12 +1141,16 @@ body.page-home #footer {
body.page-sub {
background-color: #242424;
}
body.layout-heroku,
body.layout-digitalocean,
body.layout-aws,
body.layout-docs,
body.layout-inner,
body.layout-intro {
background: #242424 url('../images/sidebar-wire.png') left 62px no-repeat;
}
body.layout-heroku > .container .col-md-8[role=main],
body.layout-digitalocean > .container .col-md-8[role=main],
body.layout-aws > .container .col-md-8[role=main],
body.layout-docs > .container .col-md-8[role=main],
body.layout-inner > .container .col-md-8[role=main],
@ -1154,6 +1158,8 @@ body.layout-intro > .container .col-md-8[role=main] {
min-height: 800px;
background-color: white;
}
body.layout-heroku > .container .col-md-8[role=main] > div,
body.layout-digitalocean > .container .col-md-8[role=main] > div,
body.layout-aws > .container .col-md-8[role=main] > div,
body.layout-docs > .container .col-md-8[role=main] > div,
body.layout-inner > .container .col-md-8[role=main] > div,