Merge branch 'master' of github.com:hashicorp/terraform

* 'master' of github.com:hashicorp/terraform:
  provider/datadog: Update to datadog_monitor still used d.GetOk (#12497)
  Check instance is running before trying to attach (#12459)
  Fix aws_dms_replication_task diff for json with whitespace. (#12380)
  add "name" to exported attributes (#12483)
  provider/aws: Adding an acceptance test to for ForceNew on ecs_task_definition volumes
  provider/aws: (#10587) Changing volumes in ECS task definition should force new revision.
  provider/aws: Change aws_spot_fleet_request tests to use the correct hash values in test cases
  Small doc updates (#12165)
  Improve description of consul_catalog_entry (#12162)
  provider/aws: Only send iops when creating io1 devices. Fix docs (#12392)
  provider/google: initial commit for node pool resource (#11802)
  Fix spurious user_data diffs
  Properly handle 'vpc_security_group_ids', drop phantom 'security_groups'
  Default 'ebs_optimized' and 'monitoring' to false
This commit is contained in:
clint shryock 2017-03-07 10:25:53 -06:00
commit 71c0c27b1e
23 changed files with 727 additions and 199 deletions

View File

@ -1,6 +1,8 @@
package aws package aws
import ( import (
"bytes"
"encoding/json"
"log" "log"
"strings" "strings"
@ -42,3 +44,17 @@ func suppressAwsDbEngineVersionDiffs(k, old, new string, d *schema.ResourceData)
// Throw a diff by default // Throw a diff by default
return false return false
} }
func suppressEquivalentJsonDiffs(k, old, new string, d *schema.ResourceData) bool {
ob := bytes.NewBufferString("")
if err := json.Compact(ob, []byte(old)); err != nil {
return false
}
nb := bytes.NewBufferString("")
if err := json.Compact(nb, []byte(new)); err != nil {
return false
}
return jsonBytesEqual(ob.Bytes(), nb.Bytes())
}

View File

@ -0,0 +1,31 @@
package aws
import (
"testing"
"github.com/hashicorp/terraform/helper/schema"
)
func TestSuppressEquivalentJsonDiffsWhitespaceAndNoWhitespace(t *testing.T) {
d := new(schema.ResourceData)
noWhitespace := `{"test":"test"}`
whitespace := `
{
"test": "test"
}`
if !suppressEquivalentJsonDiffs("", noWhitespace, whitespace, d) {
t.Errorf("Expected suppressEquivalentJsonDiffs to return true for %s == %s", noWhitespace, whitespace)
}
noWhitespaceDiff := `{"test":"test"}`
whitespaceDiff := `
{
"test": "tested"
}`
if suppressEquivalentJsonDiffs("", noWhitespaceDiff, whitespaceDiff, d) {
t.Errorf("Expected suppressEquivalentJsonDiffs to return false for %s == %s", noWhitespaceDiff, whitespaceDiff)
}
}

View File

@ -60,6 +60,7 @@ func resourceAwsDmsReplicationTask() *schema.Resource {
Type: schema.TypeString, Type: schema.TypeString,
Optional: true, Optional: true,
ValidateFunc: validateJsonString, ValidateFunc: validateJsonString,
DiffSuppressFunc: suppressEquivalentJsonDiffs,
}, },
"source_endpoint_arn": { "source_endpoint_arn": {
Type: schema.TypeString, Type: schema.TypeString,
@ -71,6 +72,7 @@ func resourceAwsDmsReplicationTask() *schema.Resource {
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Required: true,
ValidateFunc: validateJsonString, ValidateFunc: validateJsonString,
DiffSuppressFunc: suppressEquivalentJsonDiffs,
}, },
"tags": { "tags": {
Type: schema.TypeMap, Type: schema.TypeMap,

View File

@ -70,11 +70,13 @@ func resourceAwsEcsTaskDefinition() *schema.Resource {
"name": { "name": {
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Required: true,
ForceNew: true,
}, },
"host_path": { "host_path": {
Type: schema.TypeString, Type: schema.TypeString,
Optional: true, Optional: true,
ForceNew: true,
}, },
}, },
}, },

View File

@ -135,6 +135,41 @@ func TestAccAWSEcsTaskDefinition_constraint(t *testing.T) {
}) })
} }
func TestAccAWSEcsTaskDefinition_changeVolumesForcesNewResource(t *testing.T) {
var before ecs.TaskDefinition
var after ecs.TaskDefinition
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSEcsTaskDefinitionDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSEcsTaskDefinition,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins", &before),
),
},
{
Config: testAccAWSEcsTaskDefinitionUpdatedVolume,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins", &after),
testAccCheckEcsTaskDefinitionRecreated(t, &before, &after),
),
},
},
})
}
func testAccCheckEcsTaskDefinitionRecreated(t *testing.T,
before, after *ecs.TaskDefinition) resource.TestCheckFunc {
return func(s *terraform.State) error {
if *before.Revision == *after.Revision {
t.Fatalf("Expected change of TaskDefinition Revisions, but both were %v", before.Revision)
}
return nil
}
}
func testAccCheckAWSTaskDefinitionConstraintsAttrs(def *ecs.TaskDefinition) resource.TestCheckFunc { func testAccCheckAWSTaskDefinitionConstraintsAttrs(def *ecs.TaskDefinition) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
if len(def.PlacementConstraints) != 1 { if len(def.PlacementConstraints) != 1 {
@ -319,6 +354,55 @@ TASK_DEFINITION
} }
` `
var testAccAWSEcsTaskDefinitionUpdatedVolume = `
resource "aws_ecs_task_definition" "jenkins" {
family = "terraform-acc-test"
container_definitions = <<TASK_DEFINITION
[
{
"cpu": 10,
"command": ["sleep", "10"],
"entryPoint": ["/"],
"environment": [
{"name": "VARNAME", "value": "VARVAL"}
],
"essential": true,
"image": "jenkins",
"links": ["mongodb"],
"memory": 128,
"name": "jenkins",
"portMappings": [
{
"containerPort": 80,
"hostPort": 8080
}
]
},
{
"cpu": 10,
"command": ["sleep", "10"],
"entryPoint": ["/"],
"essential": true,
"image": "mongodb",
"memory": 128,
"name": "mongodb",
"portMappings": [
{
"containerPort": 28017,
"hostPort": 28017
}
]
}
]
TASK_DEFINITION
volume {
name = "jenkins-home"
host_path = "/ecs/jenkins"
}
}
`
var testAccAWSEcsTaskDefinitionWithScratchVolume = ` var testAccAWSEcsTaskDefinitionWithScratchVolume = `
resource "aws_ecs_task_definition" "sleep" { resource "aws_ecs_task_definition" "sleep" {
family = "terraform-acc-sc-volume-test" family = "terraform-acc-sc-volume-test"

View File

@ -1034,11 +1034,16 @@ func readBlockDeviceMappingsFromConfig(
if v, ok := bd["volume_type"].(string); ok && v != "" { if v, ok := bd["volume_type"].(string); ok && v != "" {
ebs.VolumeType = aws.String(v) ebs.VolumeType = aws.String(v)
} if "io1" == strings.ToLower(v) {
// Condition: This parameter is required for requests to create io1
// volumes; it is not used in requests to create gp2, st1, sc1, or
// standard volumes.
// See: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html
if v, ok := bd["iops"].(int); ok && v > 0 { if v, ok := bd["iops"].(int); ok && v > 0 {
ebs.Iops = aws.Int64(int64(v)) ebs.Iops = aws.Int64(int64(v))
} }
}
}
blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{ blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{
DeviceName: aws.String(bd["device_name"].(string)), DeviceName: aws.String(bd["device_name"].(string)),

View File

@ -1060,7 +1060,6 @@ resource "aws_instance" "foo" {
root_block_device { root_block_device {
volume_type = "gp2" volume_type = "gp2"
volume_size = 11 volume_size = 11
iops = 330
} }
} }
` `

View File

@ -2,9 +2,6 @@ package aws
import ( import (
"bytes" "bytes"
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"fmt" "fmt"
"log" "log"
"strconv" "strconv"
@ -168,6 +165,7 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
"ebs_optimized": &schema.Schema{ "ebs_optimized": &schema.Schema{
Type: schema.TypeBool, Type: schema.TypeBool,
Optional: true, Optional: true,
Default: false,
}, },
"iam_instance_profile": &schema.Schema{ "iam_instance_profile": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
@ -194,6 +192,7 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
"monitoring": &schema.Schema{ "monitoring": &schema.Schema{
Type: schema.TypeBool, Type: schema.TypeBool,
Optional: true, Optional: true,
Default: false,
}, },
"placement_group": &schema.Schema{ "placement_group": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
@ -213,8 +212,7 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
StateFunc: func(v interface{}) string { StateFunc: func(v interface{}) string {
switch v.(type) { switch v.(type) {
case string: case string:
hash := sha1.Sum([]byte(v.(string))) return userDataHashSum(v.(string))
return hex.EncodeToString(hash[:])
default: default:
return "" return ""
} }
@ -323,8 +321,7 @@ func buildSpotFleetLaunchSpecification(d map[string]interface{}, meta interface{
} }
if v, ok := d["user_data"]; ok { if v, ok := d["user_data"]; ok {
opts.UserData = aws.String( opts.UserData = aws.String(base64Encode([]byte(v.(string))))
base64Encode([]byte(v.(string))))
} }
if v, ok := d["key_name"]; ok { if v, ok := d["key_name"]; ok {
@ -339,21 +336,11 @@ func buildSpotFleetLaunchSpecification(d map[string]interface{}, meta interface{
opts.WeightedCapacity = aws.Float64(wc) opts.WeightedCapacity = aws.Float64(wc)
} }
var groups []*string var securityGroupIds []*string
if v, ok := d["security_groups"]; ok {
sgs := v.(*schema.Set).List()
for _, v := range sgs {
str := v.(string)
groups = append(groups, aws.String(str))
}
}
var groupIds []*string
if v, ok := d["vpc_security_group_ids"]; ok { if v, ok := d["vpc_security_group_ids"]; ok {
if s := v.(*schema.Set); s.Len() > 0 { if s := v.(*schema.Set); s.Len() > 0 {
for _, v := range s.List() { for _, v := range s.List() {
opts.SecurityGroups = append(opts.SecurityGroups, &ec2.GroupIdentifier{GroupId: aws.String(v.(string))}) securityGroupIds = append(securityGroupIds, aws.String(v.(string)))
groupIds = append(groupIds, aws.String(v.(string)))
} }
} }
} }
@ -378,11 +365,15 @@ func buildSpotFleetLaunchSpecification(d map[string]interface{}, meta interface{
DeleteOnTermination: aws.Bool(true), DeleteOnTermination: aws.Bool(true),
DeviceIndex: aws.Int64(int64(0)), DeviceIndex: aws.Int64(int64(0)),
SubnetId: aws.String(subnetId.(string)), SubnetId: aws.String(subnetId.(string)),
Groups: groupIds, Groups: securityGroupIds,
} }
opts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{ni} opts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{ni}
opts.SubnetId = aws.String("") opts.SubnetId = aws.String("")
} else {
for _, id := range securityGroupIds {
opts.SecurityGroups = append(opts.SecurityGroups, &ec2.GroupIdentifier{GroupId: id})
}
} }
blockDevices, err := readSpotFleetBlockDeviceMappingsFromConfig(d, conn) blockDevices, err := readSpotFleetBlockDeviceMappingsFromConfig(d, conn)
@ -730,24 +721,20 @@ func resourceAwsSpotFleetRequestRead(d *schema.ResourceData, meta interface{}) e
return nil return nil
} }
func launchSpecsToSet(ls []*ec2.SpotFleetLaunchSpecification, conn *ec2.EC2) *schema.Set { func launchSpecsToSet(launchSpecs []*ec2.SpotFleetLaunchSpecification, conn *ec2.EC2) *schema.Set {
specs := &schema.Set{F: hashLaunchSpecification} specSet := &schema.Set{F: hashLaunchSpecification}
for _, val := range ls { for _, spec := range launchSpecs {
dn, err := fetchRootDeviceName(aws.StringValue(val.ImageId), conn) rootDeviceName, err := fetchRootDeviceName(aws.StringValue(spec.ImageId), conn)
if err != nil { if err != nil {
log.Panic(err) log.Panic(err)
} else {
ls := launchSpecToMap(val, dn)
specs.Add(ls)
}
}
return specs
} }
func launchSpecToMap( specSet.Add(launchSpecToMap(spec, rootDeviceName))
l *ec2.SpotFleetLaunchSpecification, }
rootDevName *string, return specSet
) map[string]interface{} { }
func launchSpecToMap(l *ec2.SpotFleetLaunchSpecification, rootDevName *string) map[string]interface{} {
m := make(map[string]interface{}) m := make(map[string]interface{})
m["root_block_device"] = rootBlockDeviceToSet(l.BlockDeviceMappings, rootDevName) m["root_block_device"] = rootBlockDeviceToSet(l.BlockDeviceMappings, rootDevName)
@ -779,10 +766,7 @@ func launchSpecToMap(
} }
if l.UserData != nil { if l.UserData != nil {
ud_dec, err := base64.StdEncoding.DecodeString(aws.StringValue(l.UserData)) m["user_data"] = userDataHashSum(aws.StringValue(l.UserData))
if err == nil {
m["user_data"] = string(ud_dec)
}
} }
if l.KeyName != nil { if l.KeyName != nil {
@ -797,11 +781,23 @@ func launchSpecToMap(
m["subnet_id"] = aws.StringValue(l.SubnetId) m["subnet_id"] = aws.StringValue(l.SubnetId)
} }
securityGroupIds := &schema.Set{F: schema.HashString}
if len(l.NetworkInterfaces) > 0 {
// This resource auto-creates one network interface when associate_public_ip_address is true
for _, group := range l.NetworkInterfaces[0].Groups {
securityGroupIds.Add(aws.StringValue(group))
}
} else {
for _, group := range l.SecurityGroups {
securityGroupIds.Add(aws.StringValue(group.GroupId))
}
}
m["vpc_security_group_ids"] = securityGroupIds
if l.WeightedCapacity != nil { if l.WeightedCapacity != nil {
m["weighted_capacity"] = strconv.FormatFloat(*l.WeightedCapacity, 'f', 0, 64) m["weighted_capacity"] = strconv.FormatFloat(*l.WeightedCapacity, 'f', 0, 64)
} }
// m["security_groups"] = securityGroupsToSet(l.SecutiryGroups)
return m return m
} }
@ -1009,7 +1005,6 @@ func hashLaunchSpecification(v interface{}) int {
} }
buf.WriteString(fmt.Sprintf("%s-", m["instance_type"].(string))) buf.WriteString(fmt.Sprintf("%s-", m["instance_type"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["spot_price"].(string))) buf.WriteString(fmt.Sprintf("%s-", m["spot_price"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["user_data"].(string)))
return hashcode.String(buf.String()) return hashcode.String(buf.String())
} }

View File

@ -100,9 +100,9 @@ func TestAccAWSSpotFleetRequest_lowestPriceAzInGivenList(t *testing.T) {
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.#", "2"), "aws_spot_fleet_request.foo", "launch_specification.#", "2"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.1590006269.availability_zone", "us-west-2a"), "aws_spot_fleet_request.foo", "launch_specification.335709043.availability_zone", "us-west-2a"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.3809475891.availability_zone", "us-west-2b"), "aws_spot_fleet_request.foo", "launch_specification.1671188867.availability_zone", "us-west-2b"),
), ),
}, },
}, },
@ -154,13 +154,13 @@ func TestAccAWSSpotFleetRequest_multipleInstanceTypesInSameAz(t *testing.T) {
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.#", "2"), "aws_spot_fleet_request.foo", "launch_specification.#", "2"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.1590006269.instance_type", "m1.small"), "aws_spot_fleet_request.foo", "launch_specification.335709043.instance_type", "m1.small"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.1590006269.availability_zone", "us-west-2a"), "aws_spot_fleet_request.foo", "launch_specification.335709043.availability_zone", "us-west-2a"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.3079734941.instance_type", "m3.large"), "aws_spot_fleet_request.foo", "launch_specification.590403189.instance_type", "m3.large"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.3079734941.availability_zone", "us-west-2a"), "aws_spot_fleet_request.foo", "launch_specification.590403189.availability_zone", "us-west-2a"),
), ),
}, },
}, },
@ -214,13 +214,13 @@ func TestAccAWSSpotFleetRequest_overriddingSpotPrice(t *testing.T) {
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.#", "2"), "aws_spot_fleet_request.foo", "launch_specification.#", "2"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.522395050.spot_price", "0.01"), "aws_spot_fleet_request.foo", "launch_specification.4143232216.spot_price", "0.01"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.522395050.instance_type", "m3.large"), "aws_spot_fleet_request.foo", "launch_specification.4143232216.instance_type", "m3.large"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.1590006269.spot_price", ""), //there will not be a value here since it's not overriding "aws_spot_fleet_request.foo", "launch_specification.335709043.spot_price", ""), //there will not be a value here since it's not overriding
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.1590006269.instance_type", "m1.small"), "aws_spot_fleet_request.foo", "launch_specification.335709043.instance_type", "m1.small"),
), ),
}, },
}, },
@ -289,13 +289,13 @@ func TestAccAWSSpotFleetRequest_withWeightedCapacity(t *testing.T) {
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.#", "2"), "aws_spot_fleet_request.foo", "launch_specification.#", "2"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.2325690000.weighted_capacity", "3"), "aws_spot_fleet_request.foo", "launch_specification.4120185872.weighted_capacity", "3"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.2325690000.instance_type", "r3.large"), "aws_spot_fleet_request.foo", "launch_specification.4120185872.instance_type", "r3.large"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.3079734941.weighted_capacity", "6"), "aws_spot_fleet_request.foo", "launch_specification.590403189.weighted_capacity", "6"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.3079734941.instance_type", "m3.large"), "aws_spot_fleet_request.foo", "launch_specification.590403189.instance_type", "m3.large"),
), ),
}, },
}, },

View File

@ -77,6 +77,25 @@ func resourceAwsVolumeAttachmentCreate(d *schema.ResourceData, meta interface{})
vols, err := conn.DescribeVolumes(request) vols, err := conn.DescribeVolumes(request)
if (err != nil) || (len(vols.Volumes) == 0) { if (err != nil) || (len(vols.Volumes) == 0) {
// This handles the situation where the instance is created by
// a spot request and whilst the request has been fulfilled the
// instance is not running yet
stateConf := &resource.StateChangeConf{
Pending: []string{"pending"},
Target: []string{"running"},
Refresh: InstanceStateRefreshFunc(conn, iID),
Timeout: 10 * time.Minute,
Delay: 10 * time.Second,
MinTimeout: 3 * time.Second,
}
_, err = stateConf.WaitForState()
if err != nil {
return fmt.Errorf(
"Error waiting for instance (%s) to become ready: %s",
iID, err)
}
// not attached // not attached
opts := &ec2.AttachVolumeInput{ opts := &ec2.AttachVolumeInput{
Device: aws.String(name), Device: aws.String(name),

View File

@ -2,6 +2,8 @@ package aws
import ( import (
"encoding/base64" "encoding/base64"
"encoding/json"
"reflect"
"regexp" "regexp"
) )
@ -24,3 +26,17 @@ func isBase64Encoded(data []byte) bool {
func looksLikeJsonString(s interface{}) bool { func looksLikeJsonString(s interface{}) bool {
return regexp.MustCompile(`^\s*{`).MatchString(s.(string)) return regexp.MustCompile(`^\s*{`).MatchString(s.(string))
} }
func jsonBytesEqual(b1, b2 []byte) bool {
var o1 interface{}
if err := json.Unmarshal(b1, &o1); err != nil {
return false
}
var o2 interface{}
if err := json.Unmarshal(b2, &o2); err != nil {
return false
}
return reflect.DeepEqual(o1, o2)
}

View File

@ -32,3 +32,41 @@ func TestLooksLikeJsonString(t *testing.T) {
t.Errorf("Expected looksLikeJson to return false for %s", doesNotLookLikeJson) t.Errorf("Expected looksLikeJson to return false for %s", doesNotLookLikeJson)
} }
} }
func TestJsonBytesEqualQuotedAndUnquoted(t *testing.T) {
unquoted := `{"test": "test"}`
quoted := "{\"test\": \"test\"}"
if !jsonBytesEqual([]byte(unquoted), []byte(quoted)) {
t.Errorf("Expected jsonBytesEqual to return true for %s == %s", unquoted, quoted)
}
unquotedDiff := `{"test": "test"}`
quotedDiff := "{\"test\": \"tested\"}"
if jsonBytesEqual([]byte(unquotedDiff), []byte(quotedDiff)) {
t.Errorf("Expected jsonBytesEqual to return false for %s == %s", unquotedDiff, quotedDiff)
}
}
func TestJsonBytesEqualWhitespaceAndNoWhitespace(t *testing.T) {
noWhitespace := `{"test":"test"}`
whitespace := `
{
"test": "test"
}`
if !jsonBytesEqual([]byte(noWhitespace), []byte(whitespace)) {
t.Errorf("Expected jsonBytesEqual to return true for %s == %s", noWhitespace, whitespace)
}
noWhitespaceDiff := `{"test":"test"}`
whitespaceDiff := `
{
"test": "tested"
}`
if jsonBytesEqual([]byte(noWhitespaceDiff), []byte(whitespaceDiff)) {
t.Errorf("Expected jsonBytesEqual to return false for %s == %s", noWhitespaceDiff, whitespaceDiff)
}
}

View File

@ -325,6 +325,8 @@ func resourceDatadogMonitorUpdate(d *schema.ResourceData, meta interface{}) erro
o := datadog.Options{ o := datadog.Options{
NotifyNoData: datadog.Bool(d.Get("notify_no_data").(bool)), NotifyNoData: datadog.Bool(d.Get("notify_no_data").(bool)),
RequireFullWindow: datadog.Bool(d.Get("require_full_window").(bool)),
IncludeTags: datadog.Bool(d.Get("include_tags").(bool)),
} }
if attr, ok := d.GetOk("thresholds"); ok { if attr, ok := d.GetOk("thresholds"); ok {
thresholds := attr.(map[string]interface{}) thresholds := attr.(map[string]interface{})
@ -340,9 +342,6 @@ func resourceDatadogMonitorUpdate(d *schema.ResourceData, meta interface{}) erro
} }
} }
if attr, ok := d.GetOk("notify_no_data"); ok {
o.SetNotifyNoData(attr.(bool))
}
if attr, ok := d.GetOk("new_host_delay"); ok { if attr, ok := d.GetOk("new_host_delay"); ok {
o.SetNewHostDelay(attr.(int)) o.SetNewHostDelay(attr.(int))
} }
@ -369,12 +368,6 @@ func resourceDatadogMonitorUpdate(d *schema.ResourceData, meta interface{}) erro
} }
o.Silenced = s o.Silenced = s
} }
if attr, ok := d.GetOk("include_tags"); ok {
o.SetIncludeTags(attr.(bool))
}
if attr, ok := d.GetOk("require_full_window"); ok {
o.SetRequireFullWindow(attr.(bool))
}
if attr, ok := d.GetOk("locked"); ok { if attr, ok := d.GetOk("locked"); ok {
o.SetLocked(attr.(bool)) o.SetLocked(attr.(bool))
} }

View File

@ -0,0 +1,59 @@
package google
import (
"fmt"
"log"
"time"
"github.com/hashicorp/terraform/helper/resource"
"google.golang.org/api/container/v1"
)
type ContainerOperationWaiter struct {
Service *container.Service
Op *container.Operation
Project string
Zone string
}
func (w *ContainerOperationWaiter) Conf() *resource.StateChangeConf {
return &resource.StateChangeConf{
Pending: []string{"PENDING", "RUNNING"},
Target: []string{"DONE"},
Refresh: w.RefreshFunc(),
}
}
func (w *ContainerOperationWaiter) RefreshFunc() resource.StateRefreshFunc {
return func() (interface{}, string, error) {
resp, err := w.Service.Projects.Zones.Operations.Get(
w.Project, w.Zone, w.Op.Name).Do()
if err != nil {
return nil, "", err
}
log.Printf("[DEBUG] Progress of operation %q: %q", w.Op.Name, resp.Status)
return resp, resp.Status, err
}
}
func containerOperationWait(config *Config, op *container.Operation, project, zone, activity string, timeoutMinutes, minTimeoutSeconds int) error {
w := &ContainerOperationWaiter{
Service: config.clientContainer,
Op: op,
Project: project,
Zone: zone,
}
state := w.Conf()
state.Timeout = time.Duration(timeoutMinutes) * time.Minute
state.MinTimeout = time.Duration(minTimeoutSeconds) * time.Second
_, err := state.WaitForState()
if err != nil {
return fmt.Errorf("Error waiting for %s: %s", activity, err)
}
return nil
}

View File

@ -91,6 +91,7 @@ func Provider() terraform.ResourceProvider {
"google_compute_vpn_gateway": resourceComputeVpnGateway(), "google_compute_vpn_gateway": resourceComputeVpnGateway(),
"google_compute_vpn_tunnel": resourceComputeVpnTunnel(), "google_compute_vpn_tunnel": resourceComputeVpnTunnel(),
"google_container_cluster": resourceContainerCluster(), "google_container_cluster": resourceContainerCluster(),
"google_container_node_pool": resourceContainerNodePool(),
"google_dns_managed_zone": resourceDnsManagedZone(), "google_dns_managed_zone": resourceDnsManagedZone(),
"google_dns_record_set": resourceDnsRecordSet(), "google_dns_record_set": resourceDnsRecordSet(),
"google_sql_database": resourceSqlDatabase(), "google_sql_database": resourceSqlDatabase(),

View File

@ -5,9 +5,7 @@ import (
"log" "log"
"net" "net"
"regexp" "regexp"
"time"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/container/v1" "google.golang.org/api/container/v1"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
@ -389,23 +387,11 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
} }
// Wait until it's created // Wait until it's created
wait := resource.StateChangeConf{ waitErr := containerOperationWait(config, op, project, zoneName, "creating GKE cluster", 30, 3)
Pending: []string{"PENDING", "RUNNING"}, if waitErr != nil {
Target: []string{"DONE"}, // The resource didn't actually create
Timeout: 30 * time.Minute, d.SetId("")
MinTimeout: 3 * time.Second, return waitErr
Refresh: func() (interface{}, string, error) {
resp, err := config.clientContainer.Projects.Zones.Operations.Get(
project, zoneName, op.Name).Do()
log.Printf("[DEBUG] Progress of creating GKE cluster %s: %s",
clusterName, resp.Status)
return resp, resp.Status, err
},
}
_, err = wait.WaitForState()
if err != nil {
return err
} }
log.Printf("[INFO] GKE cluster %s has been created", clusterName) log.Printf("[INFO] GKE cluster %s has been created", clusterName)
@ -503,24 +489,9 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
} }
// Wait until it's updated // Wait until it's updated
wait := resource.StateChangeConf{ waitErr := containerOperationWait(config, op, project, zoneName, "updating GKE cluster", 10, 2)
Pending: []string{"PENDING", "RUNNING"}, if waitErr != nil {
Target: []string{"DONE"}, return waitErr
Timeout: 10 * time.Minute,
MinTimeout: 2 * time.Second,
Refresh: func() (interface{}, string, error) {
log.Printf("[DEBUG] Checking if GKE cluster %s is updated", clusterName)
resp, err := config.clientContainer.Projects.Zones.Operations.Get(
project, zoneName, op.Name).Do()
log.Printf("[DEBUG] Progress of updating GKE cluster %s: %s",
clusterName, resp.Status)
return resp, resp.Status, err
},
}
_, err = wait.WaitForState()
if err != nil {
return err
} }
log.Printf("[INFO] GKE cluster %s has been updated to %s", d.Id(), log.Printf("[INFO] GKE cluster %s has been updated to %s", d.Id(),
@ -548,24 +519,9 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er
} }
// Wait until it's deleted // Wait until it's deleted
wait := resource.StateChangeConf{ waitErr := containerOperationWait(config, op, project, zoneName, "deleting GKE cluster", 10, 3)
Pending: []string{"PENDING", "RUNNING"}, if waitErr != nil {
Target: []string{"DONE"}, return waitErr
Timeout: 10 * time.Minute,
MinTimeout: 3 * time.Second,
Refresh: func() (interface{}, string, error) {
log.Printf("[DEBUG] Checking if GKE cluster %s is deleted", clusterName)
resp, err := config.clientContainer.Projects.Zones.Operations.Get(
project, zoneName, op.Name).Do()
log.Printf("[DEBUG] Progress of deleting GKE cluster %s: %s",
clusterName, resp.Status)
return resp, resp.Status, err
},
}
_, err = wait.WaitForState()
if err != nil {
return err
} }
log.Printf("[INFO] GKE cluster %s has been deleted", d.Id()) log.Printf("[INFO] GKE cluster %s has been deleted", d.Id())

View File

@ -0,0 +1,191 @@
package google
import (
"fmt"
"log"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/container/v1"
"google.golang.org/api/googleapi"
)
func resourceContainerNodePool() *schema.Resource {
return &schema.Resource{
Create: resourceContainerNodePoolCreate,
Read: resourceContainerNodePoolRead,
Delete: resourceContainerNodePoolDelete,
Exists: resourceContainerNodePoolExists,
Schema: map[string]*schema.Schema{
"project": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ConflictsWith: []string{"name_prefix"},
ForceNew: true,
},
"name_prefix": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"zone": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"cluster": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"initial_node_count": &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
},
}
}
func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
zone := d.Get("zone").(string)
cluster := d.Get("cluster").(string)
nodeCount := d.Get("initial_node_count").(int)
var name string
if v, ok := d.GetOk("name"); ok {
name = v.(string)
} else if v, ok := d.GetOk("name_prefix"); ok {
name = resource.PrefixedUniqueId(v.(string))
} else {
name = resource.UniqueId()
}
nodePool := &container.NodePool{
Name: name,
InitialNodeCount: int64(nodeCount),
}
req := &container.CreateNodePoolRequest{
NodePool: nodePool,
}
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Create(project, zone, cluster, req).Do()
if err != nil {
return fmt.Errorf("Error creating NodePool: %s", err)
}
waitErr := containerOperationWait(config, op, project, zone, "creating GKE NodePool", 10, 3)
if waitErr != nil {
// The resource didn't actually create
d.SetId("")
return waitErr
}
log.Printf("[INFO] GKE NodePool %s has been created", name)
d.SetId(name)
return resourceContainerNodePoolRead(d, meta)
}
func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
zone := d.Get("zone").(string)
name := d.Get("name").(string)
cluster := d.Get("cluster").(string)
nodePool, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get(
project, zone, cluster, name).Do()
if err != nil {
return fmt.Errorf("Error reading NodePool: %s", err)
}
d.Set("name", nodePool.Name)
d.Set("initial_node_count", nodePool.InitialNodeCount)
return nil
}
func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
zone := d.Get("zone").(string)
name := d.Get("name").(string)
cluster := d.Get("cluster").(string)
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Delete(
project, zone, cluster, name).Do()
if err != nil {
return fmt.Errorf("Error deleting NodePool: %s", err)
}
// Wait until it's deleted
waitErr := containerOperationWait(config, op, project, zone, "deleting GKE NodePool", 10, 2)
if waitErr != nil {
return waitErr
}
log.Printf("[INFO] GKE NodePool %s has been deleted", d.Id())
d.SetId("")
return nil
}
func resourceContainerNodePoolExists(d *schema.ResourceData, meta interface{}) (bool, error) {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return false, err
}
zone := d.Get("zone").(string)
name := d.Get("name").(string)
cluster := d.Get("cluster").(string)
_, err = config.clientContainer.Projects.Zones.Clusters.NodePools.Get(
project, zone, cluster, name).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
log.Printf("[WARN] Removing Container NodePool %q because it's gone", name)
// The resource doesn't exist anymore
return false, err
}
// There was some other error in reading the resource
return true, err
}
return true, nil
}

View File

@ -0,0 +1,101 @@
package google
import (
"fmt"
"strconv"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccContainerNodePool_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccContainerNodePool_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerNodePoolMatches("google_container_node_pool.np"),
),
},
},
})
}
func testAccCheckContainerNodePoolDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
for _, rs := range s.RootModule().Resources {
if rs.Type != "google_container_node_pool" {
continue
}
attributes := rs.Primary.Attributes
_, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get(
config.Project, attributes["zone"], attributes["cluster"], attributes["name"]).Do()
if err == nil {
return fmt.Errorf("NodePool still exists")
}
}
return nil
}
func testAccCheckContainerNodePoolMatches(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
attributes := rs.Primary.Attributes
found, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get(
config.Project, attributes["zone"], attributes["cluster"], attributes["name"]).Do()
if err != nil {
return err
}
if found.Name != attributes["name"] {
return fmt.Errorf("NodePool not found")
}
inc, err := strconv.Atoi(attributes["initial_node_count"])
if err != nil {
return err
}
if found.InitialNodeCount != int64(inc) {
return fmt.Errorf("Mismatched initialNodeCount. TF State: %s. GCP State: %d",
attributes["initial_node_count"], found.InitialNodeCount)
}
return nil
}
}
var testAccContainerNodePool_basic = fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "tf-cluster-nodepool-test-%s"
zone = "us-central1-a"
initial_node_count = 3
master_auth {
username = "mr.yoda"
password = "adoy.rm"
}
}
resource "google_container_node_pool" "np" {
name = "tf-nodepool-test-%s"
zone = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 2
}`, acctest.RandString(10), acctest.RandString(10))

View File

@ -23,12 +23,13 @@ import (
datadogprovider "github.com/hashicorp/terraform/builtin/providers/datadog" datadogprovider "github.com/hashicorp/terraform/builtin/providers/datadog"
digitaloceanprovider "github.com/hashicorp/terraform/builtin/providers/digitalocean" digitaloceanprovider "github.com/hashicorp/terraform/builtin/providers/digitalocean"
dmeprovider "github.com/hashicorp/terraform/builtin/providers/dme" dmeprovider "github.com/hashicorp/terraform/builtin/providers/dme"
dnsprovider "github.com/hashicorp/terraform/builtin/providers/dns"
dnsimpleprovider "github.com/hashicorp/terraform/builtin/providers/dnsimple" dnsimpleprovider "github.com/hashicorp/terraform/builtin/providers/dnsimple"
dnsprovider "github.com/hashicorp/terraform/builtin/providers/dns"
dockerprovider "github.com/hashicorp/terraform/builtin/providers/docker" dockerprovider "github.com/hashicorp/terraform/builtin/providers/docker"
dynprovider "github.com/hashicorp/terraform/builtin/providers/dyn" dynprovider "github.com/hashicorp/terraform/builtin/providers/dyn"
externalprovider "github.com/hashicorp/terraform/builtin/providers/external" externalprovider "github.com/hashicorp/terraform/builtin/providers/external"
fastlyprovider "github.com/hashicorp/terraform/builtin/providers/fastly" fastlyprovider "github.com/hashicorp/terraform/builtin/providers/fastly"
fileprovisioner "github.com/hashicorp/terraform/builtin/provisioners/file"
githubprovider "github.com/hashicorp/terraform/builtin/providers/github" githubprovider "github.com/hashicorp/terraform/builtin/providers/github"
googleprovider "github.com/hashicorp/terraform/builtin/providers/google" googleprovider "github.com/hashicorp/terraform/builtin/providers/google"
grafanaprovider "github.com/hashicorp/terraform/builtin/providers/grafana" grafanaprovider "github.com/hashicorp/terraform/builtin/providers/grafana"
@ -37,6 +38,7 @@ import (
ignitionprovider "github.com/hashicorp/terraform/builtin/providers/ignition" ignitionprovider "github.com/hashicorp/terraform/builtin/providers/ignition"
influxdbprovider "github.com/hashicorp/terraform/builtin/providers/influxdb" influxdbprovider "github.com/hashicorp/terraform/builtin/providers/influxdb"
libratoprovider "github.com/hashicorp/terraform/builtin/providers/librato" libratoprovider "github.com/hashicorp/terraform/builtin/providers/librato"
localexecprovisioner "github.com/hashicorp/terraform/builtin/provisioners/local-exec"
logentriesprovider "github.com/hashicorp/terraform/builtin/providers/logentries" logentriesprovider "github.com/hashicorp/terraform/builtin/providers/logentries"
mailgunprovider "github.com/hashicorp/terraform/builtin/providers/mailgun" mailgunprovider "github.com/hashicorp/terraform/builtin/providers/mailgun"
mysqlprovider "github.com/hashicorp/terraform/builtin/providers/mysql" mysqlprovider "github.com/hashicorp/terraform/builtin/providers/mysql"
@ -54,6 +56,7 @@ import (
rabbitmqprovider "github.com/hashicorp/terraform/builtin/providers/rabbitmq" rabbitmqprovider "github.com/hashicorp/terraform/builtin/providers/rabbitmq"
rancherprovider "github.com/hashicorp/terraform/builtin/providers/rancher" rancherprovider "github.com/hashicorp/terraform/builtin/providers/rancher"
randomprovider "github.com/hashicorp/terraform/builtin/providers/random" randomprovider "github.com/hashicorp/terraform/builtin/providers/random"
remoteexecprovisioner "github.com/hashicorp/terraform/builtin/provisioners/remote-exec"
rundeckprovider "github.com/hashicorp/terraform/builtin/providers/rundeck" rundeckprovider "github.com/hashicorp/terraform/builtin/providers/rundeck"
scalewayprovider "github.com/hashicorp/terraform/builtin/providers/scaleway" scalewayprovider "github.com/hashicorp/terraform/builtin/providers/scaleway"
softlayerprovider "github.com/hashicorp/terraform/builtin/providers/softlayer" softlayerprovider "github.com/hashicorp/terraform/builtin/providers/softlayer"
@ -68,9 +71,6 @@ import (
vaultprovider "github.com/hashicorp/terraform/builtin/providers/vault" vaultprovider "github.com/hashicorp/terraform/builtin/providers/vault"
vcdprovider "github.com/hashicorp/terraform/builtin/providers/vcd" vcdprovider "github.com/hashicorp/terraform/builtin/providers/vcd"
vsphereprovider "github.com/hashicorp/terraform/builtin/providers/vsphere" vsphereprovider "github.com/hashicorp/terraform/builtin/providers/vsphere"
fileprovisioner "github.com/hashicorp/terraform/builtin/provisioners/file"
localexecprovisioner "github.com/hashicorp/terraform/builtin/provisioners/local-exec"
remoteexecprovisioner "github.com/hashicorp/terraform/builtin/provisioners/remote-exec"
"github.com/hashicorp/terraform/plugin" "github.com/hashicorp/terraform/plugin"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
@ -142,12 +142,14 @@ var InternalProviders = map[string]plugin.ProviderFunc{
"vault": vaultprovider.Provider, "vault": vaultprovider.Provider,
"vcd": vcdprovider.Provider, "vcd": vcdprovider.Provider,
"vsphere": vsphereprovider.Provider, "vsphere": vsphereprovider.Provider,
} }
var InternalProvisioners = map[string]plugin.ProvisionerFunc{ var InternalProvisioners = map[string]plugin.ProvisionerFunc{
"file": fileprovisioner.Provisioner, "file": fileprovisioner.Provisioner,
"local-exec": localexecprovisioner.Provisioner, "local-exec": localexecprovisioner.Provisioner,
"remote-exec": remoteexecprovisioner.Provisioner, "remote-exec": remoteexecprovisioner.Provisioner,
} }
func init() { func init() {
@ -155,3 +157,4 @@ func init() {
// built-in provisioners. // built-in provisioners.
InternalProvisioners["chef"] = func() terraform.ResourceProvisioner { return new(chefprovisioner.ResourceProvisioner) } InternalProvisioners["chef"] = func() terraform.ResourceProvisioner { return new(chefprovisioner.ResourceProvisioner) }
} }

View File

@ -54,6 +54,7 @@ The following attributes are exported:
* `arn` - The Amazon Resource Name (ARN) specifying the role. * `arn` - The Amazon Resource Name (ARN) specifying the role.
* `create_date` - The creation date of the IAM role. * `create_date` - The creation date of the IAM role.
* `unique_id` - The stable and unique string identifying the role. * `unique_id` - The stable and unique string identifying the role.
* `name` - The name of the role.
## Example of Using Data Source for Assume Role Policy ## Example of Using Data Source for Assume Role Policy

View File

@ -102,7 +102,8 @@ The `root_block_device` mapping supports the following:
* `volume_size` - (Optional) The size of the volume in gigabytes. * `volume_size` - (Optional) The size of the volume in gigabytes.
* `iops` - (Optional) The amount of provisioned * `iops` - (Optional) The amount of provisioned
[IOPS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html). [IOPS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html).
This must be set with a `volume_type` of `"io1"`. This is only valid for `volume_type` of `"io1"`, and must be specified if
using that type
* `delete_on_termination` - (Optional) Whether the volume should be destroyed * `delete_on_termination` - (Optional) Whether the volume should be destroyed
on instance termination (Default: `true`). on instance termination (Default: `true`).

View File

@ -3,13 +3,13 @@ layout: "consul"
page_title: "Consul: consul_catalog_entry" page_title: "Consul: consul_catalog_entry"
sidebar_current: "docs-consul-resource-catalog-entry" sidebar_current: "docs-consul-resource-catalog-entry"
description: |- description: |-
Provides access to Catalog data in Consul. This can be used to define a node or a service. Currently, defining health checks is not supported. Registers a node or service with the Consul Catalog. Currently, defining health checks is not supported.
--- ---
# consul\_catalog\_entry # consul\_catalog\_entry
Provides access to Catalog data in Consul. This can be used to define a Registers a node or service with the [Consul Catalog](https://www.consul.io/docs/agent/http/catalog.html#catalog_register).
node or a service. Currently, defining health checks is not supported. Currently, defining health checks is not supported.
## Example Usage ## Example Usage
@ -41,6 +41,11 @@ The following arguments are supported:
* `service` - (Optional) A service to optionally associated with * `service` - (Optional) A service to optionally associated with
the node. Supported values are documented below. the node. Supported values are documented below.
* `datacenter` - (Optional) The datacenter to use. This overrides the
datacenter in the provider setup and the agent's default datacenter.
* `token` - (Optional) ACL token.
The `service` block supports the following: The `service` block supports the following:
* `address` - (Optional) The address of the service. Defaults to the * `address` - (Optional) The address of the service. Defaults to the

View File

@ -129,13 +129,23 @@ The `client_auth` configuration block accepts the following arguments:
``` ```
provider "vault" { provider "vault" {
# It is strongly recommended to configure this provider through the # It is strongly recommended to configure this provider through the
# environment variables described below, so that each user can have # environment variables described above, so that each user can have
# separate credentials set in the environment. # separate credentials set in the environment.
address = "https://vault.example.net:8200" #
# This will default to using $VAULT_ADDR
# But can be set explicitly
# address = "https://vault.example.net:8200"
} }
data "vault_generic_secret" "example" { resource "vault_generic_secret" "example" {
path = "secret/foo" path = "secret/foo"
data_json = <<EOT
{
"foo": "bar",
"pizza": "cheese"
}
EOT
} }
``` ```