Merge branch 'master' into brandontosch/GH-11874

This commit is contained in:
Brandon Tosch 2017-03-08 14:10:06 -08:00
commit fa56000b1b
109 changed files with 2513 additions and 921 deletions

View File

@ -4,7 +4,14 @@ branch for the next major version of Terraform.
## 0.9.0-beta3 (unreleased)
BACKWARDS INCOMPATIBILITIES / NOTES:
* provider/aws: `aws_codebuild_project` renamed `timeout` to `build_timeout` [GH-12503]
* provider/azurem: `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` now store has of custom_data not all custom_data [GH-12214]
IMPROVEMENTS:
* provider/azurerm: store only hash of `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` custom_data - reduces size of state [GH-12214]
## 0.9.0-beta2 (March 2, 2017)

View File

@ -335,7 +335,6 @@ resource "aws_instance" "foo" {
root_block_device {
volume_type = "gp2"
volume_size = 11
iops = 330
}
}

View File

@ -14,19 +14,19 @@ func dataSourceAwsVpc() *schema.Resource {
Read: dataSourceAwsVpcRead,
Schema: map[string]*schema.Schema{
"cidr_block": &schema.Schema{
"cidr_block": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"dhcp_options_id": &schema.Schema{
"dhcp_options_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"default": &schema.Schema{
"default": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
@ -34,18 +34,28 @@ func dataSourceAwsVpc() *schema.Resource {
"filter": ec2CustomFiltersSchema(),
"id": &schema.Schema{
"id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"instance_tenancy": &schema.Schema{
"instance_tenancy": {
Type: schema.TypeString,
Computed: true,
},
"state": &schema.Schema{
"ipv6_cidr_block": {
Type: schema.TypeString,
Computed: true,
},
"ipv6_association_id": {
Type: schema.TypeString,
Computed: true,
},
"state": {
Type: schema.TypeString,
Optional: true,
Computed: true,
@ -117,5 +127,10 @@ func dataSourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error {
d.Set("state", vpc.State)
d.Set("tags", tagsToMap(vpc.Tags))
if vpc.Ipv6CidrBlockAssociationSet != nil {
d.Set("ipv6_association_id", vpc.Ipv6CidrBlockAssociationSet[0].AssociationId)
d.Set("ipv6_cidr_block", vpc.Ipv6CidrBlockAssociationSet[0].Ipv6CidrBlock)
}
return nil
}

View File

@ -13,7 +13,7 @@ func TestAccDataSourceAwsVpc_basic(t *testing.T) {
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
{
Config: testAccDataSourceAwsVpcConfig,
Check: resource.ComposeTestCheckFunc(
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_id"),
@ -26,6 +26,25 @@ func TestAccDataSourceAwsVpc_basic(t *testing.T) {
})
}
func TestAccDataSourceAwsVpc_ipv6Associated(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccDataSourceAwsVpcConfigIpv6,
Check: resource.ComposeTestCheckFunc(
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_id"),
resource.TestCheckResourceAttrSet(
"data.aws_vpc.by_id", "ipv6_association_id"),
resource.TestCheckResourceAttrSet(
"data.aws_vpc.by_id", "ipv6_cidr_block"),
),
},
},
})
}
func testAccDataSourceAwsVpcCheck(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[name]
@ -59,6 +78,25 @@ func testAccDataSourceAwsVpcCheck(name string) resource.TestCheckFunc {
}
}
const testAccDataSourceAwsVpcConfigIpv6 = `
provider "aws" {
region = "us-west-2"
}
resource "aws_vpc" "test" {
cidr_block = "172.16.0.0/16"
assign_generated_ipv6_cidr_block = true
tags {
Name = "terraform-testacc-vpc-data-source"
}
}
data "aws_vpc" "by_id" {
id = "${aws_vpc.test.id}"
}
`
const testAccDataSourceAwsVpcConfig = `
provider "aws" {
region = "us-west-2"

View File

@ -1,6 +1,8 @@
package aws
import (
"bytes"
"encoding/json"
"log"
"strings"
@ -42,3 +44,17 @@ func suppressAwsDbEngineVersionDiffs(k, old, new string, d *schema.ResourceData)
// Throw a diff by default
return false
}
func suppressEquivalentJsonDiffs(k, old, new string, d *schema.ResourceData) bool {
ob := bytes.NewBufferString("")
if err := json.Compact(ob, []byte(old)); err != nil {
return false
}
nb := bytes.NewBufferString("")
if err := json.Compact(nb, []byte(new)); err != nil {
return false
}
return jsonBytesEqual(ob.Bytes(), nb.Bytes())
}

View File

@ -0,0 +1,31 @@
package aws
import (
"testing"
"github.com/hashicorp/terraform/helper/schema"
)
func TestSuppressEquivalentJsonDiffsWhitespaceAndNoWhitespace(t *testing.T) {
d := new(schema.ResourceData)
noWhitespace := `{"test":"test"}`
whitespace := `
{
"test": "test"
}`
if !suppressEquivalentJsonDiffs("", noWhitespace, whitespace, d) {
t.Errorf("Expected suppressEquivalentJsonDiffs to return true for %s == %s", noWhitespace, whitespace)
}
noWhitespaceDiff := `{"test":"test"}`
whitespaceDiff := `
{
"test": "tested"
}`
if suppressEquivalentJsonDiffs("", noWhitespaceDiff, whitespaceDiff, d) {
t.Errorf("Expected suppressEquivalentJsonDiffs to return false for %s == %s", noWhitespaceDiff, whitespaceDiff)
}
}

View File

@ -164,6 +164,13 @@ func resourceAwsCodeBuildProject() *schema.Resource {
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validateAwsCodeBuildTimeout,
Removed: "This field has been removed. Please use build_timeout instead",
},
"build_timeout": {
Type: schema.TypeInt,
Optional: true,
Default: "60",
ValidateFunc: validateAwsCodeBuildTimeout,
},
"tags": tagsSchema(),
},
@ -196,7 +203,7 @@ func resourceAwsCodeBuildProjectCreate(d *schema.ResourceData, meta interface{})
params.ServiceRole = aws.String(v.(string))
}
if v, ok := d.GetOk("timeout"); ok {
if v, ok := d.GetOk("build_timeout"); ok {
params.TimeoutInMinutes = aws.Int64(int64(v.(int)))
}
@ -373,7 +380,7 @@ func resourceAwsCodeBuildProjectRead(d *schema.ResourceData, meta interface{}) e
d.Set("encryption_key", project.EncryptionKey)
d.Set("name", project.Name)
d.Set("service_role", project.ServiceRole)
d.Set("timeout", project.TimeoutInMinutes)
d.Set("build_timeout", project.TimeoutInMinutes)
if err := d.Set("tags", tagsToMapCodeBuild(project.Tags)); err != nil {
return err
@ -416,8 +423,8 @@ func resourceAwsCodeBuildProjectUpdate(d *schema.ResourceData, meta interface{})
params.ServiceRole = aws.String(d.Get("service_role").(string))
}
if d.HasChange("timeout") {
params.TimeoutInMinutes = aws.Int64(int64(d.Get("timeout").(int)))
if d.HasChange("build_timeout") {
params.TimeoutInMinutes = aws.Int64(int64(d.Get("build_timeout").(int)))
}
// The documentation clearly says "The replacement set of tags for this build project."

View File

@ -0,0 +1,36 @@
package aws
import (
"fmt"
"log"
"strings"
"github.com/hashicorp/terraform/terraform"
)
func resourceAwsCodebuildMigrateState(
v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
switch v {
case 0:
log.Println("[INFO] Found AWS Codebuild State v0; migrating to v1")
return migrateCodebuildStateV0toV1(is)
default:
return is, fmt.Errorf("Unexpected schema version: %d", v)
}
}
func migrateCodebuildStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) {
if is.Empty() {
log.Println("[DEBUG] Empty InstanceState; nothing to migrate.")
return is, nil
}
log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes)
if is.Attributes["timeout"] != "" {
is.Attributes["build_timeout"] = strings.TrimSpace(is.Attributes["timeout"])
}
log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes)
return is, nil
}

View File

@ -0,0 +1,53 @@
package aws
import (
"testing"
"github.com/hashicorp/terraform/terraform"
)
func TestAWSCodebuildMigrateState(t *testing.T) {
cases := map[string]struct {
StateVersion int
ID string
Attributes map[string]string
Expected string
Meta interface{}
}{
"v0_1": {
StateVersion: 0,
ID: "tf-testing-file",
Attributes: map[string]string{
"description": "some description",
"timeout": "5",
},
Expected: "5",
},
"v0_2": {
StateVersion: 0,
ID: "tf-testing-file",
Attributes: map[string]string{
"description": "some description",
"build_timeout": "5",
},
Expected: "5",
},
}
for tn, tc := range cases {
is := &terraform.InstanceState{
ID: tc.ID,
Attributes: tc.Attributes,
}
is, err := resourceAwsCodebuildMigrateState(
tc.StateVersion, is, tc.Meta)
if err != nil {
t.Fatalf("bad: %s, err: %#v", tn, err)
}
if is.Attributes["build_timeout"] != tc.Expected {
t.Fatalf("Bad build_timeout migration: %s\n\n expected: %s", is.Attributes["build_timeout"], tc.Expected)
}
}
}

View File

@ -25,19 +25,51 @@ func TestAccAWSCodeBuildProject_basic(t *testing.T) {
Config: testAccAWSCodeBuildProjectConfig_basic(name),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSCodeBuildProjectExists("aws_codebuild_project.foo"),
resource.TestCheckResourceAttr(
"aws_codebuild_project.foo", "build_timeout", "5"),
),
},
{
Config: testAccAWSCodeBuildProjectConfig_basicUpdated(name),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSCodeBuildProjectExists("aws_codebuild_project.foo"),
resource.TestCheckResourceAttr(
"aws_codebuild_project.foo", "build_timeout", "50"),
),
},
},
})
}
func TestAccAWSCodeBuildProject_artifactsTypeValidation(t *testing.T) {
func TestAccAWSCodeBuildProject_default_build_timeout(t *testing.T) {
name := acctest.RandString(10)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSCodeBuildProjectDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSCodeBuildProjectConfig_default_timeout(name),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSCodeBuildProjectExists("aws_codebuild_project.foo"),
resource.TestCheckResourceAttr(
"aws_codebuild_project.foo", "build_timeout", "60"),
),
},
{
Config: testAccAWSCodeBuildProjectConfig_basicUpdated(name),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSCodeBuildProjectExists("aws_codebuild_project.foo"),
resource.TestCheckResourceAttr(
"aws_codebuild_project.foo", "build_timeout", "50"),
),
},
},
})
}
func TestAWSCodeBuildProject_artifactsTypeValidation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
@ -57,7 +89,7 @@ func TestAccAWSCodeBuildProject_artifactsTypeValidation(t *testing.T) {
}
}
func TestAccAWSCodeBuildProject_artifactsNamespaceTypeValidation(t *testing.T) {
func TestAWSCodeBuildProject_artifactsNamespaceTypeValidation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
@ -94,7 +126,7 @@ func longTestData() string {
}, data)
}
func TestAccAWSCodeBuildProject_nameValidation(t *testing.T) {
func TestAWSCodeBuildProject_nameValidation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
@ -115,7 +147,7 @@ func TestAccAWSCodeBuildProject_nameValidation(t *testing.T) {
}
}
func TestAccAWSCodeBuildProject_descriptionValidation(t *testing.T) {
func TestAWSCodeBuildProject_descriptionValidation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
@ -133,7 +165,7 @@ func TestAccAWSCodeBuildProject_descriptionValidation(t *testing.T) {
}
}
func TestAccAWSCodeBuildProject_environmentComputeTypeValidation(t *testing.T) {
func TestAWSCodeBuildProject_environmentComputeTypeValidation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
@ -153,7 +185,7 @@ func TestAccAWSCodeBuildProject_environmentComputeTypeValidation(t *testing.T) {
}
}
func TestAccAWSCodeBuildProject_environmentTypeValidation(t *testing.T) {
func TestAWSCodeBuildProject_environmentTypeValidation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
@ -171,7 +203,7 @@ func TestAccAWSCodeBuildProject_environmentTypeValidation(t *testing.T) {
}
}
func TestAccAWSCodeBuildProject_sourceTypeValidation(t *testing.T) {
func TestAWSCodeBuildProject_sourceTypeValidation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
@ -192,7 +224,7 @@ func TestAccAWSCodeBuildProject_sourceTypeValidation(t *testing.T) {
}
}
func TestAccAWSCodeBuildProject_sourceAuthTypeValidation(t *testing.T) {
func TestAWSCodeBuildProject_sourceAuthTypeValidation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
@ -210,7 +242,7 @@ func TestAccAWSCodeBuildProject_sourceAuthTypeValidation(t *testing.T) {
}
}
func TestAccAWSCodeBuildProject_timeoutValidation(t *testing.T) {
func TestAWSCodeBuildProject_timeoutValidation(t *testing.T) {
cases := []struct {
Value int
ErrCount int
@ -342,7 +374,7 @@ resource "aws_iam_policy_attachment" "codebuild_policy_attachment" {
resource "aws_codebuild_project" "foo" {
name = "test-project-%s"
description = "test_codebuild_project"
timeout = "5"
build_timeout = "5"
service_role = "${aws_iam_role.codebuild_role.arn}"
artifacts {
@ -429,7 +461,94 @@ resource "aws_iam_policy_attachment" "codebuild_policy_attachment" {
resource "aws_codebuild_project" "foo" {
name = "test-project-%s"
description = "test_codebuild_project"
timeout = "5"
build_timeout = "50"
service_role = "${aws_iam_role.codebuild_role.arn}"
artifacts {
type = "NO_ARTIFACTS"
}
environment {
compute_type = "BUILD_GENERAL1_SMALL"
image = "2"
type = "LINUX_CONTAINER"
environment_variable = {
"name" = "SOME_OTHERKEY"
"value" = "SOME_OTHERVALUE"
}
}
source {
auth {
type = "OAUTH"
}
type = "GITHUB"
location = "https://github.com/mitchellh/packer.git"
}
tags {
"Environment" = "Test"
}
}
`, rName, rName, rName, rName)
}
func testAccAWSCodeBuildProjectConfig_default_timeout(rName string) string {
return fmt.Sprintf(`
resource "aws_iam_role" "codebuild_role" {
name = "codebuild-role-%s"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "codebuild.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
EOF
}
resource "aws_iam_policy" "codebuild_policy" {
name = "codebuild-policy-%s"
path = "/service-role/"
description = "Policy used in trust relationship with CodeBuild"
policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Resource": [
"*"
],
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
]
}
]
}
POLICY
}
resource "aws_iam_policy_attachment" "codebuild_policy_attachment" {
name = "codebuild-policy-attachment-%s"
policy_arn = "${aws_iam_policy.codebuild_policy.arn}"
roles = ["${aws_iam_role.codebuild_role.id}"]
}
resource "aws_codebuild_project" "foo" {
name = "test-project-%s"
description = "test_codebuild_project"
service_role = "${aws_iam_role.codebuild_role.arn}"
artifacts {

View File

@ -25,19 +25,19 @@ func resourceAwsCustomerGateway() *schema.Resource {
},
Schema: map[string]*schema.Schema{
"bgp_asn": &schema.Schema{
"bgp_asn": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"ip_address": &schema.Schema{
"ip_address": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"type": &schema.Schema{
"type": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
@ -51,10 +51,23 @@ func resourceAwsCustomerGateway() *schema.Resource {
func resourceAwsCustomerGatewayCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
ipAddress := d.Get("ip_address").(string)
vpnType := d.Get("type").(string)
bgpAsn := d.Get("bgp_asn").(int)
alreadyExists, err := resourceAwsCustomerGatewayExists(vpnType, ipAddress, bgpAsn, conn)
if err != nil {
return err
}
if alreadyExists {
return fmt.Errorf("An existing customer gateway for IpAddress: %s, VpnType: %s, BGP ASN: %d has been found", ipAddress, vpnType, bgpAsn)
}
createOpts := &ec2.CreateCustomerGatewayInput{
BgpAsn: aws.Int64(int64(d.Get("bgp_asn").(int))),
PublicIp: aws.String(d.Get("ip_address").(string)),
Type: aws.String(d.Get("type").(string)),
BgpAsn: aws.Int64(int64(bgpAsn)),
PublicIp: aws.String(ipAddress),
Type: aws.String(vpnType),
}
// Create the Customer Gateway.
@ -123,6 +136,37 @@ func customerGatewayRefreshFunc(conn *ec2.EC2, gatewayId string) resource.StateR
}
}
func resourceAwsCustomerGatewayExists(vpnType, ipAddress string, bgpAsn int, conn *ec2.EC2) (bool, error) {
ipAddressFilter := &ec2.Filter{
Name: aws.String("ip-address"),
Values: []*string{aws.String(ipAddress)},
}
typeFilter := &ec2.Filter{
Name: aws.String("type"),
Values: []*string{aws.String(vpnType)},
}
bgp := strconv.Itoa(bgpAsn)
bgpAsnFilter := &ec2.Filter{
Name: aws.String("bgp-asn"),
Values: []*string{aws.String(bgp)},
}
resp, err := conn.DescribeCustomerGateways(&ec2.DescribeCustomerGatewaysInput{
Filters: []*ec2.Filter{ipAddressFilter, typeFilter, bgpAsnFilter},
})
if err != nil {
return false, err
}
if len(resp.CustomerGateways) > 0 && *resp.CustomerGateways[0].State != "deleted" {
return true, nil
}
return false, nil
}
func resourceAwsCustomerGatewayRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn

View File

@ -2,6 +2,7 @@ package aws
import (
"fmt"
"regexp"
"testing"
"time"
@ -21,19 +22,19 @@ func TestAccAWSCustomerGateway_basic(t *testing.T) {
Providers: testAccProviders,
CheckDestroy: testAccCheckCustomerGatewayDestroy,
Steps: []resource.TestStep{
resource.TestStep{
{
Config: testAccCustomerGatewayConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway),
),
},
resource.TestStep{
{
Config: testAccCustomerGatewayConfigUpdateTags,
Check: resource.ComposeTestCheckFunc(
testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway),
),
},
resource.TestStep{
{
Config: testAccCustomerGatewayConfigForceReplace,
Check: resource.ComposeTestCheckFunc(
testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway),
@ -43,6 +44,28 @@ func TestAccAWSCustomerGateway_basic(t *testing.T) {
})
}
func TestAccAWSCustomerGateway_similarAlreadyExists(t *testing.T) {
var gateway ec2.CustomerGateway
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
IDRefreshName: "aws_customer_gateway.foo",
Providers: testAccProviders,
CheckDestroy: testAccCheckCustomerGatewayDestroy,
Steps: []resource.TestStep{
{
Config: testAccCustomerGatewayConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway),
),
},
{
Config: testAccCustomerGatewayConfigIdentical,
ExpectError: regexp.MustCompile("An existing customer gateway"),
},
},
})
}
func TestAccAWSCustomerGateway_disappears(t *testing.T) {
var gateway ec2.CustomerGateway
resource.Test(t, resource.TestCase{
@ -50,7 +73,7 @@ func TestAccAWSCustomerGateway_disappears(t *testing.T) {
Providers: testAccProviders,
CheckDestroy: testAccCheckCustomerGatewayDestroy,
Steps: []resource.TestStep{
resource.TestStep{
{
Config: testAccCustomerGatewayConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway),
@ -178,6 +201,26 @@ resource "aws_customer_gateway" "foo" {
}
`
const testAccCustomerGatewayConfigIdentical = `
resource "aws_customer_gateway" "foo" {
bgp_asn = 65000
ip_address = "172.0.0.1"
type = "ipsec.1"
tags {
Name = "foo-gateway"
}
}
resource "aws_customer_gateway" "identical" {
bgp_asn = 65000
ip_address = "172.0.0.1"
type = "ipsec.1"
tags {
Name = "foo-gateway-identical"
}
}
`
// Add the Another: "tag" tag.
const testAccCustomerGatewayConfigUpdateTags = `
resource "aws_customer_gateway" "foo" {

View File

@ -27,7 +27,7 @@ func resourceAwsDmsReplicationTask() *schema.Resource {
Schema: map[string]*schema.Schema{
"cdc_start_time": {
Type: schema.TypeInt,
Type: schema.TypeString,
Optional: true,
// Requires a Unix timestamp in seconds. Example 1484346880
},
@ -57,9 +57,10 @@ func resourceAwsDmsReplicationTask() *schema.Resource {
ValidateFunc: validateDmsReplicationTaskId,
},
"replication_task_settings": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateJsonString,
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateJsonString,
DiffSuppressFunc: suppressEquivalentJsonDiffs,
},
"source_endpoint_arn": {
Type: schema.TypeString,
@ -68,9 +69,10 @@ func resourceAwsDmsReplicationTask() *schema.Resource {
ValidateFunc: validateArn,
},
"table_mappings": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validateJsonString,
Type: schema.TypeString,
Required: true,
ValidateFunc: validateJsonString,
DiffSuppressFunc: suppressEquivalentJsonDiffs,
},
"tags": {
Type: schema.TypeMap,

View File

@ -70,11 +70,13 @@ func resourceAwsEcsTaskDefinition() *schema.Resource {
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"host_path": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
},
},

View File

@ -135,6 +135,41 @@ func TestAccAWSEcsTaskDefinition_constraint(t *testing.T) {
})
}
func TestAccAWSEcsTaskDefinition_changeVolumesForcesNewResource(t *testing.T) {
var before ecs.TaskDefinition
var after ecs.TaskDefinition
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSEcsTaskDefinitionDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSEcsTaskDefinition,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins", &before),
),
},
{
Config: testAccAWSEcsTaskDefinitionUpdatedVolume,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins", &after),
testAccCheckEcsTaskDefinitionRecreated(t, &before, &after),
),
},
},
})
}
func testAccCheckEcsTaskDefinitionRecreated(t *testing.T,
before, after *ecs.TaskDefinition) resource.TestCheckFunc {
return func(s *terraform.State) error {
if *before.Revision == *after.Revision {
t.Fatalf("Expected change of TaskDefinition Revisions, but both were %v", before.Revision)
}
return nil
}
}
func testAccCheckAWSTaskDefinitionConstraintsAttrs(def *ecs.TaskDefinition) resource.TestCheckFunc {
return func(s *terraform.State) error {
if len(def.PlacementConstraints) != 1 {
@ -319,6 +354,55 @@ TASK_DEFINITION
}
`
var testAccAWSEcsTaskDefinitionUpdatedVolume = `
resource "aws_ecs_task_definition" "jenkins" {
family = "terraform-acc-test"
container_definitions = <<TASK_DEFINITION
[
{
"cpu": 10,
"command": ["sleep", "10"],
"entryPoint": ["/"],
"environment": [
{"name": "VARNAME", "value": "VARVAL"}
],
"essential": true,
"image": "jenkins",
"links": ["mongodb"],
"memory": 128,
"name": "jenkins",
"portMappings": [
{
"containerPort": 80,
"hostPort": 8080
}
]
},
{
"cpu": 10,
"command": ["sleep", "10"],
"entryPoint": ["/"],
"essential": true,
"image": "mongodb",
"memory": 128,
"name": "mongodb",
"portMappings": [
{
"containerPort": 28017,
"hostPort": 28017
}
]
}
]
TASK_DEFINITION
volume {
name = "jenkins-home"
host_path = "/ecs/jenkins"
}
}
`
var testAccAWSEcsTaskDefinitionWithScratchVolume = `
resource "aws_ecs_task_definition" "sleep" {
family = "terraform-acc-sc-volume-test"

View File

@ -108,15 +108,15 @@ resource "aws_s3_bucket_object" "default" {
}
resource "aws_elastic_beanstalk_application" "default" {
name = "tf-test-name"
name = "tf-test-name-%d"
description = "tf-test-desc"
}
resource "aws_elastic_beanstalk_application_version" "default" {
application = "tf-test-name"
application = "tf-test-name-%d"
name = "tf-test-version-label"
bucket = "${aws_s3_bucket.default.id}"
key = "${aws_s3_bucket_object.default.id}"
}
`, randInt)
`, randInt, randInt, randInt)
}

View File

@ -260,6 +260,7 @@ func instanceProfileReadResult(d *schema.ResourceData, result *iam.InstanceProfi
if err := d.Set("path", result.Path); err != nil {
return err
}
d.Set("unique_id", result.InstanceProfileId)
roles := &schema.Set{F: schema.HashString}
for _, role := range result.Roles {

View File

@ -548,7 +548,7 @@ func resourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error {
}
if err := d.Set("ipv6_addresses", ipv6Addresses); err != nil {
log.Printf("[WARN] Error setting ipv6_addresses for AWS Instance (%d): %s", d.Id(), err)
log.Printf("[WARN] Error setting ipv6_addresses for AWS Instance (%s): %s", d.Id(), err)
}
d.Set("ebs_optimized", instance.EbsOptimized)
@ -1034,10 +1034,15 @@ func readBlockDeviceMappingsFromConfig(
if v, ok := bd["volume_type"].(string); ok && v != "" {
ebs.VolumeType = aws.String(v)
}
if v, ok := bd["iops"].(int); ok && v > 0 {
ebs.Iops = aws.Int64(int64(v))
if "io1" == strings.ToLower(v) {
// Condition: This parameter is required for requests to create io1
// volumes; it is not used in requests to create gp2, st1, sc1, or
// standard volumes.
// See: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html
if v, ok := bd["iops"].(int); ok && v > 0 {
ebs.Iops = aws.Int64(int64(v))
}
}
}
blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{

View File

@ -1060,7 +1060,6 @@ resource "aws_instance" "foo" {
root_block_device {
volume_type = "gp2"
volume_size = 11
iops = 330
}
}
`

View File

@ -127,6 +127,10 @@ func resourceAwsLBSSLNegotiationPolicyRead(d *schema.ResourceData, meta interfac
// The policy is gone.
d.SetId("")
return nil
} else if isLoadBalancerNotFound(err) {
// The ELB is gone now, so just remove it from the state
d.SetId("")
return nil
}
return fmt.Errorf("Error retrieving policy: %s", err)
}

View File

@ -21,7 +21,7 @@ func TestAccAWSLBSSLNegotiationPolicy_basic(t *testing.T) {
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccSslNegotiationPolicyConfig(
fmt.Sprintf("tf-acctest-%s", acctest.RandString(10))),
fmt.Sprintf("tf-acctest-%s", acctest.RandString(10)), fmt.Sprintf("tf-test-lb-%s", acctest.RandString(5))),
Check: resource.ComposeTestCheckFunc(
testAccCheckLBSSLNegotiationPolicy(
"aws_elb.lb",
@ -35,6 +35,44 @@ func TestAccAWSLBSSLNegotiationPolicy_basic(t *testing.T) {
})
}
func TestAccAWSLBSSLNegotiationPolicy_missingLB(t *testing.T) {
lbName := fmt.Sprintf("tf-test-lb-%s", acctest.RandString(5))
// check that we can destroy the policy if the LB is missing
removeLB := func() {
conn := testAccProvider.Meta().(*AWSClient).elbconn
deleteElbOpts := elb.DeleteLoadBalancerInput{
LoadBalancerName: aws.String(lbName),
}
if _, err := conn.DeleteLoadBalancer(&deleteElbOpts); err != nil {
t.Fatalf("Error deleting ELB: %s", err)
}
}
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckLBSSLNegotiationPolicyDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccSslNegotiationPolicyConfig(fmt.Sprintf("tf-acctest-%s", acctest.RandString(10)), lbName),
Check: resource.ComposeTestCheckFunc(
testAccCheckLBSSLNegotiationPolicy(
"aws_elb.lb",
"aws_lb_ssl_negotiation_policy.foo",
),
resource.TestCheckResourceAttr(
"aws_lb_ssl_negotiation_policy.foo", "attribute.#", "7"),
),
},
resource.TestStep{
PreConfig: removeLB,
Config: testAccSslNegotiationPolicyConfig(fmt.Sprintf("tf-acctest-%s", acctest.RandString(10)), lbName),
},
},
})
}
func testAccCheckLBSSLNegotiationPolicyDestroy(s *terraform.State) error {
elbconn := testAccProvider.Meta().(*AWSClient).elbconn
@ -155,7 +193,7 @@ func policyAttributesToMap(attributes *[]*elb.PolicyAttributeDescription) map[st
// Sets the SSL Negotiation policy with attributes.
// The IAM Server Cert config is lifted from
// builtin/providers/aws/resource_aws_iam_server_certificate_test.go
func testAccSslNegotiationPolicyConfig(certName string) string {
func testAccSslNegotiationPolicyConfig(certName string, lbName string) string {
return fmt.Sprintf(`
resource "aws_iam_server_certificate" "test_cert" {
name = "%s"
@ -216,14 +254,14 @@ wbEcTx10meJdinnhawqW7L0bhifeiTaPxbaCBXv/wiiL
EOF
}
resource "aws_elb" "lb" {
name = "test-lb"
availability_zones = ["us-west-2a"]
listener {
instance_port = 8000
instance_protocol = "https"
lb_port = 443
lb_protocol = "https"
ssl_certificate_id = "${aws_iam_server_certificate.test_cert.arn}"
name = "%s"
availability_zones = ["us-west-2a"]
listener {
instance_port = 8000
instance_protocol = "https"
lb_port = 443
lb_protocol = "https"
ssl_certificate_id = "${aws_iam_server_certificate.test_cert.arn}"
}
}
resource "aws_lb_ssl_negotiation_policy" "foo" {
@ -236,8 +274,8 @@ resource "aws_lb_ssl_negotiation_policy" "foo" {
}
attribute {
name = "Protocol-TLSv1.1"
value = "false"
}
value = "false"
}
attribute {
name = "Protocol-TLSv1.2"
value = "true"
@ -245,7 +283,7 @@ resource "aws_lb_ssl_negotiation_policy" "foo" {
attribute {
name = "Server-Defined-Cipher-Order"
value = "true"
}
}
attribute {
name = "ECDHE-RSA-AES128-GCM-SHA256"
value = "true"
@ -259,5 +297,5 @@ resource "aws_lb_ssl_negotiation_policy" "foo" {
value = "false"
}
}
`, certName)
`, certName, lbName)
}

View File

@ -2,9 +2,6 @@ package aws
import (
"bytes"
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"fmt"
"log"
"strconv"
@ -168,6 +165,7 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
"ebs_optimized": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"iam_instance_profile": &schema.Schema{
Type: schema.TypeString,
@ -194,6 +192,7 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
"monitoring": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"placement_group": &schema.Schema{
Type: schema.TypeString,
@ -213,8 +212,7 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
StateFunc: func(v interface{}) string {
switch v.(type) {
case string:
hash := sha1.Sum([]byte(v.(string)))
return hex.EncodeToString(hash[:])
return userDataHashSum(v.(string))
default:
return ""
}
@ -323,8 +321,7 @@ func buildSpotFleetLaunchSpecification(d map[string]interface{}, meta interface{
}
if v, ok := d["user_data"]; ok {
opts.UserData = aws.String(
base64Encode([]byte(v.(string))))
opts.UserData = aws.String(base64Encode([]byte(v.(string))))
}
if v, ok := d["key_name"]; ok {
@ -339,21 +336,11 @@ func buildSpotFleetLaunchSpecification(d map[string]interface{}, meta interface{
opts.WeightedCapacity = aws.Float64(wc)
}
var groups []*string
if v, ok := d["security_groups"]; ok {
sgs := v.(*schema.Set).List()
for _, v := range sgs {
str := v.(string)
groups = append(groups, aws.String(str))
}
}
var groupIds []*string
var securityGroupIds []*string
if v, ok := d["vpc_security_group_ids"]; ok {
if s := v.(*schema.Set); s.Len() > 0 {
for _, v := range s.List() {
opts.SecurityGroups = append(opts.SecurityGroups, &ec2.GroupIdentifier{GroupId: aws.String(v.(string))})
groupIds = append(groupIds, aws.String(v.(string)))
securityGroupIds = append(securityGroupIds, aws.String(v.(string)))
}
}
}
@ -378,11 +365,15 @@ func buildSpotFleetLaunchSpecification(d map[string]interface{}, meta interface{
DeleteOnTermination: aws.Bool(true),
DeviceIndex: aws.Int64(int64(0)),
SubnetId: aws.String(subnetId.(string)),
Groups: groupIds,
Groups: securityGroupIds,
}
opts.NetworkInterfaces = []*ec2.InstanceNetworkInterfaceSpecification{ni}
opts.SubnetId = aws.String("")
} else {
for _, id := range securityGroupIds {
opts.SecurityGroups = append(opts.SecurityGroups, &ec2.GroupIdentifier{GroupId: id})
}
}
blockDevices, err := readSpotFleetBlockDeviceMappingsFromConfig(d, conn)
@ -730,24 +721,20 @@ func resourceAwsSpotFleetRequestRead(d *schema.ResourceData, meta interface{}) e
return nil
}
func launchSpecsToSet(ls []*ec2.SpotFleetLaunchSpecification, conn *ec2.EC2) *schema.Set {
specs := &schema.Set{F: hashLaunchSpecification}
for _, val := range ls {
dn, err := fetchRootDeviceName(aws.StringValue(val.ImageId), conn)
func launchSpecsToSet(launchSpecs []*ec2.SpotFleetLaunchSpecification, conn *ec2.EC2) *schema.Set {
specSet := &schema.Set{F: hashLaunchSpecification}
for _, spec := range launchSpecs {
rootDeviceName, err := fetchRootDeviceName(aws.StringValue(spec.ImageId), conn)
if err != nil {
log.Panic(err)
} else {
ls := launchSpecToMap(val, dn)
specs.Add(ls)
}
specSet.Add(launchSpecToMap(spec, rootDeviceName))
}
return specs
return specSet
}
func launchSpecToMap(
l *ec2.SpotFleetLaunchSpecification,
rootDevName *string,
) map[string]interface{} {
func launchSpecToMap(l *ec2.SpotFleetLaunchSpecification, rootDevName *string) map[string]interface{} {
m := make(map[string]interface{})
m["root_block_device"] = rootBlockDeviceToSet(l.BlockDeviceMappings, rootDevName)
@ -779,10 +766,7 @@ func launchSpecToMap(
}
if l.UserData != nil {
ud_dec, err := base64.StdEncoding.DecodeString(aws.StringValue(l.UserData))
if err == nil {
m["user_data"] = string(ud_dec)
}
m["user_data"] = userDataHashSum(aws.StringValue(l.UserData))
}
if l.KeyName != nil {
@ -797,11 +781,23 @@ func launchSpecToMap(
m["subnet_id"] = aws.StringValue(l.SubnetId)
}
securityGroupIds := &schema.Set{F: schema.HashString}
if len(l.NetworkInterfaces) > 0 {
// This resource auto-creates one network interface when associate_public_ip_address is true
for _, group := range l.NetworkInterfaces[0].Groups {
securityGroupIds.Add(aws.StringValue(group))
}
} else {
for _, group := range l.SecurityGroups {
securityGroupIds.Add(aws.StringValue(group.GroupId))
}
}
m["vpc_security_group_ids"] = securityGroupIds
if l.WeightedCapacity != nil {
m["weighted_capacity"] = strconv.FormatFloat(*l.WeightedCapacity, 'f', 0, 64)
}
// m["security_groups"] = securityGroupsToSet(l.SecutiryGroups)
return m
}
@ -1009,7 +1005,6 @@ func hashLaunchSpecification(v interface{}) int {
}
buf.WriteString(fmt.Sprintf("%s-", m["instance_type"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["spot_price"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["user_data"].(string)))
return hashcode.String(buf.String())
}

View File

@ -100,9 +100,9 @@ func TestAccAWSSpotFleetRequest_lowestPriceAzInGivenList(t *testing.T) {
resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.#", "2"),
resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.1590006269.availability_zone", "us-west-2a"),
"aws_spot_fleet_request.foo", "launch_specification.335709043.availability_zone", "us-west-2a"),
resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.3809475891.availability_zone", "us-west-2b"),
"aws_spot_fleet_request.foo", "launch_specification.1671188867.availability_zone", "us-west-2b"),
),
},
},
@ -154,13 +154,13 @@ func TestAccAWSSpotFleetRequest_multipleInstanceTypesInSameAz(t *testing.T) {
resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.#", "2"),
resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.1590006269.instance_type", "m1.small"),
"aws_spot_fleet_request.foo", "launch_specification.335709043.instance_type", "m1.small"),
resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.1590006269.availability_zone", "us-west-2a"),
"aws_spot_fleet_request.foo", "launch_specification.335709043.availability_zone", "us-west-2a"),
resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.3079734941.instance_type", "m3.large"),
"aws_spot_fleet_request.foo", "launch_specification.590403189.instance_type", "m3.large"),
resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.3079734941.availability_zone", "us-west-2a"),
"aws_spot_fleet_request.foo", "launch_specification.590403189.availability_zone", "us-west-2a"),
),
},
},
@ -214,13 +214,13 @@ func TestAccAWSSpotFleetRequest_overriddingSpotPrice(t *testing.T) {
resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.#", "2"),
resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.522395050.spot_price", "0.01"),
"aws_spot_fleet_request.foo", "launch_specification.4143232216.spot_price", "0.01"),
resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.522395050.instance_type", "m3.large"),
"aws_spot_fleet_request.foo", "launch_specification.4143232216.instance_type", "m3.large"),
resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.1590006269.spot_price", ""), //there will not be a value here since it's not overriding
"aws_spot_fleet_request.foo", "launch_specification.335709043.spot_price", ""), //there will not be a value here since it's not overriding
resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.1590006269.instance_type", "m1.small"),
"aws_spot_fleet_request.foo", "launch_specification.335709043.instance_type", "m1.small"),
),
},
},
@ -289,13 +289,13 @@ func TestAccAWSSpotFleetRequest_withWeightedCapacity(t *testing.T) {
resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.#", "2"),
resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.2325690000.weighted_capacity", "3"),
"aws_spot_fleet_request.foo", "launch_specification.4120185872.weighted_capacity", "3"),
resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.2325690000.instance_type", "r3.large"),
"aws_spot_fleet_request.foo", "launch_specification.4120185872.instance_type", "r3.large"),
resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.3079734941.weighted_capacity", "6"),
"aws_spot_fleet_request.foo", "launch_specification.590403189.weighted_capacity", "6"),
resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "launch_specification.3079734941.instance_type", "m3.large"),
"aws_spot_fleet_request.foo", "launch_specification.590403189.instance_type", "m3.large"),
),
},
},

View File

@ -77,6 +77,25 @@ func resourceAwsVolumeAttachmentCreate(d *schema.ResourceData, meta interface{})
vols, err := conn.DescribeVolumes(request)
if (err != nil) || (len(vols.Volumes) == 0) {
// This handles the situation where the instance is created by
// a spot request and whilst the request has been fulfilled the
// instance is not running yet
stateConf := &resource.StateChangeConf{
Pending: []string{"pending"},
Target: []string{"running"},
Refresh: InstanceStateRefreshFunc(conn, iID),
Timeout: 10 * time.Minute,
Delay: 10 * time.Second,
MinTimeout: 3 * time.Second,
}
_, err = stateConf.WaitForState()
if err != nil {
return fmt.Errorf(
"Error waiting for instance (%s) to become ready: %s",
iID, err)
}
// not attached
opts := &ec2.AttachVolumeInput{
Device: aws.String(name),

View File

@ -2,6 +2,8 @@ package aws
import (
"encoding/base64"
"encoding/json"
"reflect"
"regexp"
)
@ -24,3 +26,17 @@ func isBase64Encoded(data []byte) bool {
func looksLikeJsonString(s interface{}) bool {
return regexp.MustCompile(`^\s*{`).MatchString(s.(string))
}
func jsonBytesEqual(b1, b2 []byte) bool {
var o1 interface{}
if err := json.Unmarshal(b1, &o1); err != nil {
return false
}
var o2 interface{}
if err := json.Unmarshal(b2, &o2); err != nil {
return false
}
return reflect.DeepEqual(o1, o2)
}

View File

@ -32,3 +32,41 @@ func TestLooksLikeJsonString(t *testing.T) {
t.Errorf("Expected looksLikeJson to return false for %s", doesNotLookLikeJson)
}
}
func TestJsonBytesEqualQuotedAndUnquoted(t *testing.T) {
unquoted := `{"test": "test"}`
quoted := "{\"test\": \"test\"}"
if !jsonBytesEqual([]byte(unquoted), []byte(quoted)) {
t.Errorf("Expected jsonBytesEqual to return true for %s == %s", unquoted, quoted)
}
unquotedDiff := `{"test": "test"}`
quotedDiff := "{\"test\": \"tested\"}"
if jsonBytesEqual([]byte(unquotedDiff), []byte(quotedDiff)) {
t.Errorf("Expected jsonBytesEqual to return false for %s == %s", unquotedDiff, quotedDiff)
}
}
func TestJsonBytesEqualWhitespaceAndNoWhitespace(t *testing.T) {
noWhitespace := `{"test":"test"}`
whitespace := `
{
"test": "test"
}`
if !jsonBytesEqual([]byte(noWhitespace), []byte(whitespace)) {
t.Errorf("Expected jsonBytesEqual to return true for %s == %s", noWhitespace, whitespace)
}
noWhitespaceDiff := `{"test":"test"}`
whitespaceDiff := `
{
"test": "tested"
}`
if jsonBytesEqual([]byte(noWhitespaceDiff), []byte(whitespaceDiff)) {
t.Errorf("Expected jsonBytesEqual to return false for %s == %s", noWhitespaceDiff, whitespaceDiff)
}
}

View File

@ -1,6 +1,9 @@
package azurerm
import (
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"fmt"
"log"
"reflect"
@ -325,3 +328,31 @@ func ignoreCaseDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool
func ignoreCaseStateFunc(val interface{}) string {
return strings.ToLower(val.(string))
}
func userDataStateFunc(v interface{}) string {
switch s := v.(type) {
case string:
s = base64Encode(s)
hash := sha1.Sum([]byte(s))
return hex.EncodeToString(hash[:])
default:
return ""
}
}
// Base64Encode encodes data if the input isn't already encoded using
// base64.StdEncoding.EncodeToString. If the input is already base64 encoded,
// return the original input unchanged.
func base64Encode(data string) string {
// Check whether the data is already Base64 encoded; don't double-encode
if isBase64Encoded(data) {
return data
}
// data has not been encoded encode and return
return base64.StdEncoding.EncodeToString([]byte(data))
}
func isBase64Encoded(data string) bool {
_, err := base64.StdEncoding.DecodeString(data)
return err == nil
}

View File

@ -4,6 +4,7 @@ import (
"fmt"
"log"
"net/http"
"strings"
"github.com/Azure/azure-sdk-for-go/arm/compute"
"github.com/hashicorp/terraform/helper/schema"
@ -65,6 +66,13 @@ func resourceArmAvailabilitySet() *schema.Resource {
},
},
"managed": {
Type: schema.TypeBool,
Optional: true,
Default: false,
ForceNew: true,
},
"tags": tagsSchema(),
},
}
@ -82,6 +90,7 @@ func resourceArmAvailabilitySetCreate(d *schema.ResourceData, meta interface{})
updateDomainCount := d.Get("platform_update_domain_count").(int)
faultDomainCount := d.Get("platform_fault_domain_count").(int)
tags := d.Get("tags").(map[string]interface{})
managed := d.Get("managed").(bool)
availSet := compute.AvailabilitySet{
Name: &name,
@ -93,6 +102,13 @@ func resourceArmAvailabilitySetCreate(d *schema.ResourceData, meta interface{})
Tags: expandTags(tags),
}
if managed == true {
n := "Aligned"
availSet.Sku = &compute.Sku{
Name: &n,
}
}
resp, err := availSetClient.CreateOrUpdate(resGroup, name, availSet)
if err != nil {
return err
@ -129,6 +145,10 @@ func resourceArmAvailabilitySetRead(d *schema.ResourceData, meta interface{}) er
d.Set("name", resp.Name)
d.Set("location", resp.Location)
if resp.Sku != nil && resp.Sku.Name != nil {
d.Set("managed", strings.EqualFold(*resp.Sku.Name, "Aligned"))
}
flattenAndSetTags(d, resp.Tags)
return nil

View File

@ -122,6 +122,27 @@ func TestAccAzureRMAvailabilitySet_withDomainCounts(t *testing.T) {
})
}
func TestAccAzureRMAvailabilitySet_managed(t *testing.T) {
ri := acctest.RandInt()
config := fmt.Sprintf(testAccAzureRMVAvailabilitySet_managed, ri, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMAvailabilitySetDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMAvailabilitySetExists("azurerm_availability_set.test"),
resource.TestCheckResourceAttr(
"azurerm_availability_set.test", "managed", "true"),
),
},
},
})
}
func testCheckAzureRMAvailabilitySetExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
// Ensure we have enough information in state to look up in API
@ -224,8 +245,8 @@ resource "azurerm_availability_set" "test" {
resource_group_name = "${azurerm_resource_group.test.name}"
tags {
environment = "Production"
cost_center = "MSFT"
environment = "Production"
cost_center = "MSFT"
}
}
`
@ -241,7 +262,7 @@ resource "azurerm_availability_set" "test" {
resource_group_name = "${azurerm_resource_group.test.name}"
tags {
environment = "staging"
environment = "staging"
}
}
`
@ -259,3 +280,18 @@ resource "azurerm_availability_set" "test" {
platform_fault_domain_count = 1
}
`
var testAccAzureRMVAvailabilitySet_managed = `
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
}
resource "azurerm_availability_set" "test" {
name = "acctestavset-%d"
location = "West US"
resource_group_name = "${azurerm_resource_group.test.name}"
platform_update_domain_count = 10
platform_fault_domain_count = 1
managed = true
}
`

View File

@ -360,7 +360,7 @@ func flattenAzureRmContainerServiceLinuxProfile(profile containerservice.LinuxPr
}
values["admin_username"] = *profile.AdminUsername
values["ssh_key"] = &sshKeys
values["ssh_key"] = sshKeys
profiles.Add(values)
return profiles

View File

@ -154,7 +154,7 @@ func TestAccAzureRMContainerService_swarmBasic(t *testing.T) {
var testAccAzureRMContainerService_dcosBasic = `
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
location = "East US"
}
resource "azurerm_container_service" "test" {
@ -192,7 +192,7 @@ resource "azurerm_container_service" "test" {
var testAccAzureRMContainerService_kubernetesBasic = `
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
location = "East US"
}
resource "azurerm_container_service" "test" {
@ -235,7 +235,7 @@ resource "azurerm_container_service" "test" {
var testAccAzureRMContainerService_kubernetesComplete = `
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
location = "East US"
}
resource "azurerm_container_service" "test" {
@ -282,7 +282,7 @@ resource "azurerm_container_service" "test" {
var testAccAzureRMContainerService_swarmBasic = `
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
location = "East US"
}
resource "azurerm_container_service" "test" {

View File

@ -2,7 +2,6 @@ package azurerm
import (
"bytes"
"encoding/base64"
"fmt"
"log"
"net/http"
@ -332,9 +331,10 @@ func resourceArmVirtualMachine() *schema.Resource {
},
"custom_data": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Type: schema.TypeString,
Optional: true,
Computed: true,
StateFunc: userDataStateFunc,
},
},
},
@ -955,14 +955,7 @@ func flattenAzureRmVirtualMachineOsProfile(osProfile *compute.OSProfile) []inter
result["computer_name"] = *osProfile.ComputerName
result["admin_username"] = *osProfile.AdminUsername
if osProfile.CustomData != nil {
var data string
if isBase64Encoded(*osProfile.CustomData) {
decodedData, _ := base64.StdEncoding.DecodeString(*osProfile.CustomData)
data = string(decodedData)
} else {
data = *osProfile.CustomData
}
result["custom_data"] = data
result["custom_data"] = *osProfile.CustomData
}
return []interface{}{result}
@ -1121,12 +1114,7 @@ func expandAzureRmVirtualMachineOsProfile(d *schema.ResourceData) (*compute.OSPr
}
if v := osProfile["custom_data"].(string); v != "" {
if isBase64Encoded(v) {
log.Printf("[WARN] Future Versions of Terraform will automatically base64encode custom_data")
} else {
v = base64Encode(v)
}
v = base64Encode(v)
profile.CustomData = &v
}

View File

@ -2,7 +2,6 @@ package azurerm
import (
"bytes"
"encoding/base64"
"fmt"
"log"
"net/http"
@ -93,8 +92,9 @@ func resourceArmVirtualMachineScaleSet() *schema.Resource {
},
"custom_data": {
Type: schema.TypeString,
Optional: true,
Type: schema.TypeString,
Optional: true,
StateFunc: userDataStateFunc,
},
},
},
@ -652,14 +652,7 @@ func flattenAzureRMVirtualMachineScaleSetOsProfile(profile *compute.VirtualMachi
result["admin_username"] = *profile.AdminUsername
if profile.CustomData != nil {
var data string
if isBase64Encoded(*profile.CustomData) {
decodedData, _ := base64.StdEncoding.DecodeString(*profile.CustomData)
data = string(decodedData)
} else {
data = *profile.CustomData
}
result["custom_data"] = data
result["custom_data"] = *profile.CustomData
}
return []interface{}{result}
@ -861,15 +854,6 @@ func expandAzureRmVirtualMachineScaleSetNetworkProfile(d *schema.ResourceData) *
}
}
func base64Encode(data string) string {
return base64.StdEncoding.EncodeToString([]byte(data))
}
func isBase64Encoded(data string) bool {
_, err := base64.StdEncoding.DecodeString(data)
return err == nil
}
func expandAzureRMVirtualMachineScaleSetsOsProfile(d *schema.ResourceData) (*compute.VirtualMachineScaleSetOSProfile, error) {
osProfileConfigs := d.Get("os_profile").(*schema.Set).List()
@ -889,11 +873,7 @@ func expandAzureRMVirtualMachineScaleSetsOsProfile(d *schema.ResourceData) (*com
}
if customData != "" {
if isBase64Encoded(customData) {
log.Printf("[WARN] Future Versions of Terraform will automatically base64encode custom_data")
} else {
customData = base64Encode(customData)
}
customData = base64Encode(customData)
osProfile.CustomData = &customData
}

View File

@ -3,6 +3,7 @@ package cobbler
import (
"fmt"
"log"
"strings"
"github.com/hashicorp/terraform/helper/schema"
cobbler "github.com/jtopjian/cobblerclient"
@ -261,7 +262,6 @@ func resourceProfileRead(d *schema.ResourceData, meta interface{}) error {
d.Set("proxy", profile.Proxy)
d.Set("redhat_management_key", profile.RedHatManagementKey)
d.Set("redhat_management_server", profile.RedHatManagementServer)
d.Set("repos", profile.Repos)
d.Set("template_files", profile.TemplateFiles)
d.Set("template_remote_kickstarts", profile.TemplateRemoteKickstarts)
d.Set("virt_auto_boot", profile.VirtAutoBoot)
@ -273,6 +273,10 @@ func resourceProfileRead(d *schema.ResourceData, meta interface{}) error {
d.Set("virt_ram", profile.VirtRam)
d.Set("virt_type", profile.VirtType)
// Split repos into a list
repos := strings.Split(profile.Repos, " ")
d.Set("repos", repos)
return nil
}
@ -351,7 +355,7 @@ func buildProfile(d *schema.ResourceData, meta interface{}) cobbler.Profile {
Proxy: d.Get("proxy").(string),
RedHatManagementKey: d.Get("redhat_management_key").(string),
RedHatManagementServer: d.Get("redhat_management_server").(string),
Repos: repos,
Repos: strings.Join(repos, " "),
Server: d.Get("server").(string),
TemplateFiles: d.Get("template_files").(string),
TemplateRemoteKickstarts: d.Get("template_remote_kickstarts").(int),

View File

@ -57,6 +57,26 @@ func TestAccCobblerProfile_change(t *testing.T) {
})
}
func TestAccCobblerProfile_withRepo(t *testing.T) {
var distro cobbler.Distro
var profile cobbler.Profile
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccCobblerPreCheck(t) },
Providers: testAccCobblerProviders,
CheckDestroy: testAccCobblerCheckProfileDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCobblerProfile_withRepo,
Check: resource.ComposeTestCheckFunc(
testAccCobblerCheckDistroExists(t, "cobbler_distro.foo", &distro),
testAccCobblerCheckProfileExists(t, "cobbler_profile.foo", &profile),
),
},
},
})
}
func testAccCobblerCheckProfileDestroy(s *terraform.State) error {
config := testAccCobblerProvider.Meta().(*Config)
@ -147,3 +167,20 @@ var testAccCobblerProfile_change_2 = `
comment = "I am a profile again"
distro = "${cobbler_distro.foo.name}"
}`
var testAccCobblerProfile_withRepo = `
resource "cobbler_distro" "foo" {
name = "foo"
breed = "ubuntu"
os_version = "trusty"
arch = "x86_64"
kernel = "/var/www/cobbler/ks_mirror/Ubuntu-14.04/install/netboot/ubuntu-installer/amd64/linux"
initrd = "/var/www/cobbler/ks_mirror/Ubuntu-14.04/install/netboot/ubuntu-installer/amd64/initrd.gz"
}
resource "cobbler_profile" "foo" {
name = "foo"
comment = "I am a profile again"
distro = "${cobbler_distro.foo.name}"
repos = ["Ubuntu-14.04-x86_64"]
}`

View File

@ -100,6 +100,8 @@ func TestAccCobblerSystem_removeInterface(t *testing.T) {
testAccCobblerCheckDistroExists(t, "cobbler_distro.foo", &distro),
testAccCobblerCheckProfileExists(t, "cobbler_profile.foo", &profile),
testAccCobblerCheckSystemExists(t, "cobbler_system.foo", &system),
resource.TestCheckResourceAttr(
"cobbler_system.foo", "interface.586365610.management", "true"),
),
},
resource.TestStep{
@ -345,6 +347,7 @@ var testAccCobblerSystem_removeInterface_1 = `
static = true
ip_address = "1.2.3.5"
netmask = "255.255.255.0"
management = true
}
}`

View File

@ -324,7 +324,9 @@ func resourceDatadogMonitorUpdate(d *schema.ResourceData, meta interface{}) erro
}
o := datadog.Options{
NotifyNoData: datadog.Bool(d.Get("notify_no_data").(bool)),
NotifyNoData: datadog.Bool(d.Get("notify_no_data").(bool)),
RequireFullWindow: datadog.Bool(d.Get("require_full_window").(bool)),
IncludeTags: datadog.Bool(d.Get("include_tags").(bool)),
}
if attr, ok := d.GetOk("thresholds"); ok {
thresholds := attr.(map[string]interface{})
@ -340,9 +342,6 @@ func resourceDatadogMonitorUpdate(d *schema.ResourceData, meta interface{}) erro
}
}
if attr, ok := d.GetOk("notify_no_data"); ok {
o.SetNotifyNoData(attr.(bool))
}
if attr, ok := d.GetOk("new_host_delay"); ok {
o.SetNewHostDelay(attr.(int))
}
@ -369,12 +368,6 @@ func resourceDatadogMonitorUpdate(d *schema.ResourceData, meta interface{}) erro
}
o.Silenced = s
}
if attr, ok := d.GetOk("include_tags"); ok {
o.SetIncludeTags(attr.(bool))
}
if attr, ok := d.GetOk("require_full_window"); ok {
o.SetRequireFullWindow(attr.(bool))
}
if attr, ok := d.GetOk("locked"); ok {
o.SetLocked(attr.(bool))
}

View File

@ -138,6 +138,33 @@ func resourceDockerContainer() *schema.Resource {
ForceNew: true,
},
"capabilities": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
ForceNew: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"add": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"drop": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
},
},
Set: resourceDockerCapabilitiesHash,
},
"volumes": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
@ -396,6 +423,21 @@ func resourceDockerContainer() *schema.Resource {
}
}
func resourceDockerCapabilitiesHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
if v, ok := m["add"]; ok {
buf.WriteString(fmt.Sprintf("%v-", v))
}
if v, ok := m["remove"]; ok {
buf.WriteString(fmt.Sprintf("%v-", v))
}
return hashcode.String(buf.String())
}
func resourceDockerPortsHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})

View File

@ -126,6 +126,15 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
hostConfig.VolumesFrom = volumesFrom
}
if v, ok := d.GetOk("capabilities"); ok {
for _, capInt := range v.(*schema.Set).List() {
capa := capInt.(map[string]interface{})
hostConfig.CapAdd = stringSetToStringSlice(capa["add"].(*schema.Set))
hostConfig.CapDrop = stringSetToStringSlice(capa["drop"].(*schema.Set))
break
}
}
if v, ok := d.GetOk("dns"); ok {
hostConfig.DNS = stringSetToStringSlice(v.(*schema.Set))
}

View File

@ -128,6 +128,22 @@ func TestAccDockerContainer_customized(t *testing.T) {
return fmt.Errorf("Container has wrong dns search setting: %v", c.HostConfig.DNS[0])
}
if len(c.HostConfig.CapAdd) != 1 {
return fmt.Errorf("Container does not have the correct number of Capabilities in ADD: %d", len(c.HostConfig.CapAdd))
}
if c.HostConfig.CapAdd[0] != "ALL" {
return fmt.Errorf("Container has wrong CapAdd setting: %v", c.HostConfig.CapAdd[0])
}
if len(c.HostConfig.CapDrop) != 1 {
return fmt.Errorf("Container does not have the correct number of Capabilities in Drop: %d", len(c.HostConfig.CapDrop))
}
if c.HostConfig.CapDrop[0] != "SYS_ADMIN" {
return fmt.Errorf("Container has wrong CapDrop setting: %v", c.HostConfig.CapDrop[0])
}
if c.HostConfig.CPUShares != 32 {
return fmt.Errorf("Container has wrong cpu shares setting: %d", c.HostConfig.CPUShares)
}
@ -311,6 +327,12 @@ resource "docker_container" "foo" {
memory = 512
memory_swap = 2048
cpu_shares = 32
capabilities {
add= ["ALL"]
drop = ["SYS_ADMIN"]
}
dns = ["8.8.8.8"]
dns_opts = ["rotate"]
dns_search = ["example.com"]

View File

@ -0,0 +1,59 @@
package google
import (
"fmt"
"log"
"time"
"github.com/hashicorp/terraform/helper/resource"
"google.golang.org/api/container/v1"
)
type ContainerOperationWaiter struct {
Service *container.Service
Op *container.Operation
Project string
Zone string
}
func (w *ContainerOperationWaiter) Conf() *resource.StateChangeConf {
return &resource.StateChangeConf{
Pending: []string{"PENDING", "RUNNING"},
Target: []string{"DONE"},
Refresh: w.RefreshFunc(),
}
}
func (w *ContainerOperationWaiter) RefreshFunc() resource.StateRefreshFunc {
return func() (interface{}, string, error) {
resp, err := w.Service.Projects.Zones.Operations.Get(
w.Project, w.Zone, w.Op.Name).Do()
if err != nil {
return nil, "", err
}
log.Printf("[DEBUG] Progress of operation %q: %q", w.Op.Name, resp.Status)
return resp, resp.Status, err
}
}
func containerOperationWait(config *Config, op *container.Operation, project, zone, activity string, timeoutMinutes, minTimeoutSeconds int) error {
w := &ContainerOperationWaiter{
Service: config.clientContainer,
Op: op,
Project: project,
Zone: zone,
}
state := w.Conf()
state.Timeout = time.Duration(timeoutMinutes) * time.Minute
state.MinTimeout = time.Duration(minTimeoutSeconds) * time.Second
_, err := state.WaitForState()
if err != nil {
return fmt.Errorf("Error waiting for %s: %s", activity, err)
}
return nil
}

View File

@ -91,6 +91,7 @@ func Provider() terraform.ResourceProvider {
"google_compute_vpn_gateway": resourceComputeVpnGateway(),
"google_compute_vpn_tunnel": resourceComputeVpnTunnel(),
"google_container_cluster": resourceContainerCluster(),
"google_container_node_pool": resourceContainerNodePool(),
"google_dns_managed_zone": resourceDnsManagedZone(),
"google_dns_record_set": resourceDnsRecordSet(),
"google_sql_database": resourceSqlDatabase(),

View File

@ -214,7 +214,7 @@ func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error {
}
zone := d.Get("zone").(string)
err = computeOperationWaitZone(config, op, project, zone, "Creating Disk")
err = computeOperationWaitZone(config, op, project, zone, "Deleting Disk")
if err != nil {
return err
}

View File

@ -5,9 +5,7 @@ import (
"log"
"net"
"regexp"
"time"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/container/v1"
"google.golang.org/api/googleapi"
@ -389,23 +387,11 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
}
// Wait until it's created
wait := resource.StateChangeConf{
Pending: []string{"PENDING", "RUNNING"},
Target: []string{"DONE"},
Timeout: 30 * time.Minute,
MinTimeout: 3 * time.Second,
Refresh: func() (interface{}, string, error) {
resp, err := config.clientContainer.Projects.Zones.Operations.Get(
project, zoneName, op.Name).Do()
log.Printf("[DEBUG] Progress of creating GKE cluster %s: %s",
clusterName, resp.Status)
return resp, resp.Status, err
},
}
_, err = wait.WaitForState()
if err != nil {
return err
waitErr := containerOperationWait(config, op, project, zoneName, "creating GKE cluster", 30, 3)
if waitErr != nil {
// The resource didn't actually create
d.SetId("")
return waitErr
}
log.Printf("[INFO] GKE cluster %s has been created", clusterName)
@ -503,24 +489,9 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
}
// Wait until it's updated
wait := resource.StateChangeConf{
Pending: []string{"PENDING", "RUNNING"},
Target: []string{"DONE"},
Timeout: 10 * time.Minute,
MinTimeout: 2 * time.Second,
Refresh: func() (interface{}, string, error) {
log.Printf("[DEBUG] Checking if GKE cluster %s is updated", clusterName)
resp, err := config.clientContainer.Projects.Zones.Operations.Get(
project, zoneName, op.Name).Do()
log.Printf("[DEBUG] Progress of updating GKE cluster %s: %s",
clusterName, resp.Status)
return resp, resp.Status, err
},
}
_, err = wait.WaitForState()
if err != nil {
return err
waitErr := containerOperationWait(config, op, project, zoneName, "updating GKE cluster", 10, 2)
if waitErr != nil {
return waitErr
}
log.Printf("[INFO] GKE cluster %s has been updated to %s", d.Id(),
@ -548,24 +519,9 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er
}
// Wait until it's deleted
wait := resource.StateChangeConf{
Pending: []string{"PENDING", "RUNNING"},
Target: []string{"DONE"},
Timeout: 10 * time.Minute,
MinTimeout: 3 * time.Second,
Refresh: func() (interface{}, string, error) {
log.Printf("[DEBUG] Checking if GKE cluster %s is deleted", clusterName)
resp, err := config.clientContainer.Projects.Zones.Operations.Get(
project, zoneName, op.Name).Do()
log.Printf("[DEBUG] Progress of deleting GKE cluster %s: %s",
clusterName, resp.Status)
return resp, resp.Status, err
},
}
_, err = wait.WaitForState()
if err != nil {
return err
waitErr := containerOperationWait(config, op, project, zoneName, "deleting GKE cluster", 10, 3)
if waitErr != nil {
return waitErr
}
log.Printf("[INFO] GKE cluster %s has been deleted", d.Id())

View File

@ -0,0 +1,191 @@
package google
import (
"fmt"
"log"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/container/v1"
"google.golang.org/api/googleapi"
)
func resourceContainerNodePool() *schema.Resource {
return &schema.Resource{
Create: resourceContainerNodePoolCreate,
Read: resourceContainerNodePoolRead,
Delete: resourceContainerNodePoolDelete,
Exists: resourceContainerNodePoolExists,
Schema: map[string]*schema.Schema{
"project": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ConflictsWith: []string{"name_prefix"},
ForceNew: true,
},
"name_prefix": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"zone": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"cluster": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"initial_node_count": &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
},
}
}
func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
zone := d.Get("zone").(string)
cluster := d.Get("cluster").(string)
nodeCount := d.Get("initial_node_count").(int)
var name string
if v, ok := d.GetOk("name"); ok {
name = v.(string)
} else if v, ok := d.GetOk("name_prefix"); ok {
name = resource.PrefixedUniqueId(v.(string))
} else {
name = resource.UniqueId()
}
nodePool := &container.NodePool{
Name: name,
InitialNodeCount: int64(nodeCount),
}
req := &container.CreateNodePoolRequest{
NodePool: nodePool,
}
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Create(project, zone, cluster, req).Do()
if err != nil {
return fmt.Errorf("Error creating NodePool: %s", err)
}
waitErr := containerOperationWait(config, op, project, zone, "creating GKE NodePool", 10, 3)
if waitErr != nil {
// The resource didn't actually create
d.SetId("")
return waitErr
}
log.Printf("[INFO] GKE NodePool %s has been created", name)
d.SetId(name)
return resourceContainerNodePoolRead(d, meta)
}
func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
zone := d.Get("zone").(string)
name := d.Get("name").(string)
cluster := d.Get("cluster").(string)
nodePool, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get(
project, zone, cluster, name).Do()
if err != nil {
return fmt.Errorf("Error reading NodePool: %s", err)
}
d.Set("name", nodePool.Name)
d.Set("initial_node_count", nodePool.InitialNodeCount)
return nil
}
func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
zone := d.Get("zone").(string)
name := d.Get("name").(string)
cluster := d.Get("cluster").(string)
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Delete(
project, zone, cluster, name).Do()
if err != nil {
return fmt.Errorf("Error deleting NodePool: %s", err)
}
// Wait until it's deleted
waitErr := containerOperationWait(config, op, project, zone, "deleting GKE NodePool", 10, 2)
if waitErr != nil {
return waitErr
}
log.Printf("[INFO] GKE NodePool %s has been deleted", d.Id())
d.SetId("")
return nil
}
func resourceContainerNodePoolExists(d *schema.ResourceData, meta interface{}) (bool, error) {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return false, err
}
zone := d.Get("zone").(string)
name := d.Get("name").(string)
cluster := d.Get("cluster").(string)
_, err = config.clientContainer.Projects.Zones.Clusters.NodePools.Get(
project, zone, cluster, name).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
log.Printf("[WARN] Removing Container NodePool %q because it's gone", name)
// The resource doesn't exist anymore
return false, err
}
// There was some other error in reading the resource
return true, err
}
return true, nil
}

View File

@ -0,0 +1,101 @@
package google
import (
"fmt"
"strconv"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccContainerNodePool_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccContainerNodePool_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerNodePoolMatches("google_container_node_pool.np"),
),
},
},
})
}
func testAccCheckContainerNodePoolDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
for _, rs := range s.RootModule().Resources {
if rs.Type != "google_container_node_pool" {
continue
}
attributes := rs.Primary.Attributes
_, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get(
config.Project, attributes["zone"], attributes["cluster"], attributes["name"]).Do()
if err == nil {
return fmt.Errorf("NodePool still exists")
}
}
return nil
}
func testAccCheckContainerNodePoolMatches(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
attributes := rs.Primary.Attributes
found, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get(
config.Project, attributes["zone"], attributes["cluster"], attributes["name"]).Do()
if err != nil {
return err
}
if found.Name != attributes["name"] {
return fmt.Errorf("NodePool not found")
}
inc, err := strconv.Atoi(attributes["initial_node_count"])
if err != nil {
return err
}
if found.InitialNodeCount != int64(inc) {
return fmt.Errorf("Mismatched initialNodeCount. TF State: %s. GCP State: %d",
attributes["initial_node_count"], found.InitialNodeCount)
}
return nil
}
}
var testAccContainerNodePool_basic = fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "tf-cluster-nodepool-test-%s"
zone = "us-central1-a"
initial_node_count = 3
master_auth {
username = "mr.yoda"
password = "adoy.rm"
}
}
resource "google_container_node_pool" "np" {
name = "tf-nodepool-test-%s"
zone = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 2
}`, acctest.RandString(10), acctest.RandString(10))

View File

@ -17,7 +17,7 @@ import (
func Provider() terraform.ResourceProvider {
return &schema.Provider{
ResourcesMap: map[string]*schema.Resource{
DataSourcesMap: map[string]*schema.Resource{
"ignition_config": resourceConfig(),
"ignition_disk": resourceDisk(),
"ignition_raid": resourceRaid(),
@ -28,6 +28,44 @@ func Provider() terraform.ResourceProvider {
"ignition_user": resourceUser(),
"ignition_group": resourceGroup(),
},
ResourcesMap: map[string]*schema.Resource{
"ignition_config": schema.DataSourceResourceShim(
"ignition_config",
resourceConfig(),
),
"ignition_disk": schema.DataSourceResourceShim(
"ignition_disk",
resourceDisk(),
),
"ignition_raid": schema.DataSourceResourceShim(
"ignition_raid",
resourceRaid(),
),
"ignition_filesystem": schema.DataSourceResourceShim(
"ignition_filesystem",
resourceFilesystem(),
),
"ignition_file": schema.DataSourceResourceShim(
"ignition_file",
resourceFile(),
),
"ignition_systemd_unit": schema.DataSourceResourceShim(
"ignition_systemd_unit",
resourceSystemdUnit(),
),
"ignition_networkd_unit": schema.DataSourceResourceShim(
"ignition_networkd_unit",
resourceNetworkdUnit(),
),
"ignition_user": schema.DataSourceResourceShim(
"ignition_user",
resourceUser(),
),
"ignition_group": schema.DataSourceResourceShim(
"ignition_group",
resourceGroup(),
),
},
ConfigureFunc: func(*schema.ResourceData) (interface{}, error) {
return &cache{
disks: make(map[string]*types.Disk, 0),

View File

@ -26,9 +26,6 @@ var configReferenceResource = &schema.Resource{
func resourceConfig() *schema.Resource {
return &schema.Resource{
Create: resourceIgnitionFileCreate,
Update: resourceIgnitionFileCreate,
Delete: resourceIgnitionFileDelete,
Exists: resourceIgnitionFileExists,
Read: resourceIgnitionFileRead,
Schema: map[string]*schema.Schema{
@ -93,7 +90,7 @@ func resourceConfig() *schema.Resource {
}
}
func resourceIgnitionFileCreate(d *schema.ResourceData, meta interface{}) error {
func resourceIgnitionFileRead(d *schema.ResourceData, meta interface{}) error {
rendered, err := renderConfig(d, meta.(*cache))
if err != nil {
return err
@ -107,11 +104,6 @@ func resourceIgnitionFileCreate(d *schema.ResourceData, meta interface{}) error
return nil
}
func resourceIgnitionFileDelete(d *schema.ResourceData, meta interface{}) error {
d.SetId("")
return nil
}
func resourceIgnitionFileExists(d *schema.ResourceData, meta interface{}) (bool, error) {
rendered, err := renderConfig(d, meta.(*cache))
if err != nil {
@ -121,10 +113,6 @@ func resourceIgnitionFileExists(d *schema.ResourceData, meta interface{}) (bool,
return hash(rendered) == d.Id(), nil
}
func resourceIgnitionFileRead(d *schema.ResourceData, meta interface{}) error {
return nil
}
func renderConfig(d *schema.ResourceData, c *cache) (string, error) {
i, err := buildConfig(d, c)
if err != nil {

View File

@ -12,7 +12,7 @@ import (
func TestIngnitionFileReplace(t *testing.T) {
testIgnition(t, `
resource "ignition_config" "test" {
data "ignition_config" "test" {
replace {
source = "foo"
verification = "sha512-0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
@ -38,7 +38,7 @@ func TestIngnitionFileReplace(t *testing.T) {
func TestIngnitionFileAppend(t *testing.T) {
testIgnition(t, `
resource "ignition_config" "test" {
data "ignition_config" "test" {
append {
source = "foo"
verification = "sha512-0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
@ -95,7 +95,7 @@ var testTemplate = `
%s
output "rendered" {
value = "${ignition_config.test.rendered}"
value = "${data.ignition_config.test.rendered}"
}
`

View File

@ -7,8 +7,6 @@ import (
func resourceDisk() *schema.Resource {
return &schema.Resource{
Create: resourceDiskCreate,
Delete: resourceDiskDelete,
Exists: resourceDiskExists,
Read: resourceDiskRead,
Schema: map[string]*schema.Schema{
@ -60,7 +58,7 @@ func resourceDisk() *schema.Resource {
}
}
func resourceDiskCreate(d *schema.ResourceData, meta interface{}) error {
func resourceDiskRead(d *schema.ResourceData, meta interface{}) error {
id, err := buildDisk(d, meta.(*cache))
if err != nil {
return err
@ -70,11 +68,6 @@ func resourceDiskCreate(d *schema.ResourceData, meta interface{}) error {
return nil
}
func resourceDiskDelete(d *schema.ResourceData, meta interface{}) error {
d.SetId("")
return nil
}
func resourceDiskExists(d *schema.ResourceData, meta interface{}) (bool, error) {
id, err := buildDisk(d, meta.(*cache))
if err != nil {
@ -84,10 +77,6 @@ func resourceDiskExists(d *schema.ResourceData, meta interface{}) (bool, error)
return id == d.Id(), nil
}
func resourceDiskRead(d *schema.ResourceData, meta interface{}) error {
return nil
}
func buildDisk(d *schema.ResourceData, c *cache) (string, error) {
var partitions []types.Partition
for _, raw := range d.Get("partition").([]interface{}) {

View File

@ -9,7 +9,7 @@ import (
func TestIngnitionDisk(t *testing.T) {
testIgnition(t, `
resource "ignition_disk" "foo" {
data "ignition_disk" "foo" {
device = "/foo"
partition {
label = "qux"
@ -18,10 +18,10 @@ func TestIngnitionDisk(t *testing.T) {
type_guid = "01234567-89AB-CDEF-EDCB-A98765432101"
}
}
resource "ignition_config" "test" {
data "ignition_config" "test" {
disks = [
"${ignition_disk.foo.id}",
"${data.ignition_disk.foo.id}",
]
}
`, func(c *types.Config) error {
@ -58,3 +58,26 @@ func TestIngnitionDisk(t *testing.T) {
return nil
})
}
func TestIngnitionDiskResource(t *testing.T) {
testIgnition(t, `
resource "ignition_disk" "foo" {
device = "/foo"
partition {
label = "qux"
}
}
data "ignition_config" "test" {
disks = [
"${ignition_disk.foo.id}",
]
}
`, func(c *types.Config) error {
if len(c.Storage.Disks) != 1 {
return fmt.Errorf("disks, found %d", len(c.Storage.Disks))
}
return nil
})
}

View File

@ -10,8 +10,6 @@ import (
func resourceFile() *schema.Resource {
return &schema.Resource{
Create: resourceFileCreate,
Delete: resourceFileDelete,
Exists: resourceFileExists,
Read: resourceFileRead,
Schema: map[string]*schema.Schema{
@ -91,7 +89,7 @@ func resourceFile() *schema.Resource {
}
}
func resourceFileCreate(d *schema.ResourceData, meta interface{}) error {
func resourceFileRead(d *schema.ResourceData, meta interface{}) error {
id, err := buildFile(d, meta.(*cache))
if err != nil {
return err
@ -101,11 +99,6 @@ func resourceFileCreate(d *schema.ResourceData, meta interface{}) error {
return nil
}
func resourceFileDelete(d *schema.ResourceData, meta interface{}) error {
d.SetId("")
return nil
}
func resourceFileExists(d *schema.ResourceData, meta interface{}) (bool, error) {
id, err := buildFile(d, meta.(*cache))
if err != nil {
@ -115,10 +108,6 @@ func resourceFileExists(d *schema.ResourceData, meta interface{}) (bool, error)
return id == d.Id(), nil
}
func resourceFileRead(d *schema.ResourceData, metacontent interface{}) error {
return nil
}
func buildFile(d *schema.ResourceData, c *cache) (string, error) {
_, hasContent := d.GetOk("content")
_, hasSource := d.GetOk("source")

View File

@ -9,7 +9,7 @@ import (
func TestIngnitionFile(t *testing.T) {
testIgnition(t, `
resource "ignition_file" "foo" {
data "ignition_file" "foo" {
filesystem = "foo"
path = "/foo"
content {
@ -19,8 +19,8 @@ func TestIngnitionFile(t *testing.T) {
uid = 42
gid = 84
}
resource "ignition_file" "qux" {
data "ignition_file" "qux" {
filesystem = "qux"
path = "/qux"
source {
@ -30,10 +30,10 @@ func TestIngnitionFile(t *testing.T) {
}
}
resource "ignition_config" "test" {
data "ignition_config" "test" {
files = [
"${ignition_file.foo.id}",
"${ignition_file.qux.id}",
"${data.ignition_file.foo.id}",
"${data.ignition_file.qux.id}",
]
}
`, func(c *types.Config) error {

View File

@ -9,8 +9,6 @@ import (
func resourceFilesystem() *schema.Resource {
return &schema.Resource{
Create: resourceFilesystemCreate,
Delete: resourceFilesystemDelete,
Exists: resourceFilesystemExists,
Read: resourceFilesystemRead,
Schema: map[string]*schema.Schema{
@ -59,7 +57,7 @@ func resourceFilesystem() *schema.Resource {
}
}
func resourceFilesystemCreate(d *schema.ResourceData, meta interface{}) error {
func resourceFilesystemRead(d *schema.ResourceData, meta interface{}) error {
id, err := buildFilesystem(d, meta.(*cache))
if err != nil {
return err
@ -69,11 +67,6 @@ func resourceFilesystemCreate(d *schema.ResourceData, meta interface{}) error {
return nil
}
func resourceFilesystemDelete(d *schema.ResourceData, meta interface{}) error {
d.SetId("")
return nil
}
func resourceFilesystemExists(d *schema.ResourceData, meta interface{}) (bool, error) {
id, err := buildFilesystem(d, meta.(*cache))
if err != nil {
@ -83,10 +76,6 @@ func resourceFilesystemExists(d *schema.ResourceData, meta interface{}) (bool, e
return id == d.Id(), nil
}
func resourceFilesystemRead(d *schema.ResourceData, meta interface{}) error {
return nil
}
func buildFilesystem(d *schema.ResourceData, c *cache) (string, error) {
var mount *types.FilesystemMount
if _, ok := d.GetOk("mount"); ok {

View File

@ -9,12 +9,12 @@ import (
func TestIngnitionFilesystem(t *testing.T) {
testIgnition(t, `
resource "ignition_filesystem" "foo" {
data "ignition_filesystem" "foo" {
name = "foo"
path = "/foo"
}
resource "ignition_filesystem" "qux" {
data "ignition_filesystem" "qux" {
name = "qux"
mount {
device = "/qux"
@ -22,7 +22,7 @@ func TestIngnitionFilesystem(t *testing.T) {
}
}
resource "ignition_filesystem" "bar" {
data "ignition_filesystem" "bar" {
name = "bar"
mount {
device = "/bar"
@ -32,11 +32,11 @@ func TestIngnitionFilesystem(t *testing.T) {
}
}
resource "ignition_config" "test" {
data "ignition_config" "test" {
filesystems = [
"${ignition_filesystem.foo.id}",
"${ignition_filesystem.qux.id}",
"${ignition_filesystem.bar.id}",
"${data.ignition_filesystem.foo.id}",
"${data.ignition_filesystem.qux.id}",
"${data.ignition_filesystem.bar.id}",
]
}
`, func(c *types.Config) error {

View File

@ -7,8 +7,6 @@ import (
func resourceGroup() *schema.Resource {
return &schema.Resource{
Create: resourceGroupCreate,
Delete: resourceGroupDelete,
Exists: resourceGroupExists,
Read: resourceGroupRead,
Schema: map[string]*schema.Schema{
@ -31,7 +29,7 @@ func resourceGroup() *schema.Resource {
}
}
func resourceGroupCreate(d *schema.ResourceData, meta interface{}) error {
func resourceGroupRead(d *schema.ResourceData, meta interface{}) error {
id, err := buildGroup(d, meta.(*cache))
if err != nil {
return err
@ -41,11 +39,6 @@ func resourceGroupCreate(d *schema.ResourceData, meta interface{}) error {
return nil
}
func resourceGroupDelete(d *schema.ResourceData, meta interface{}) error {
d.SetId("")
return nil
}
func resourceGroupExists(d *schema.ResourceData, meta interface{}) (bool, error) {
id, err := buildGroup(d, meta.(*cache))
if err != nil {
@ -55,10 +48,6 @@ func resourceGroupExists(d *schema.ResourceData, meta interface{}) (bool, error)
return id == d.Id(), nil
}
func resourceGroupRead(d *schema.ResourceData, meta interface{}) error {
return nil
}
func buildGroup(d *schema.ResourceData, c *cache) (string, error) {
return c.addGroup(&types.Group{
Name: d.Get("name").(string),

View File

@ -9,20 +9,20 @@ import (
func TestIngnitionGroup(t *testing.T) {
testIgnition(t, `
resource "ignition_group" "foo" {
data "ignition_group" "foo" {
name = "foo"
password_hash = "password"
gid = 42
}
resource "ignition_group" "qux" {
data "ignition_group" "qux" {
name = "qux"
}
resource "ignition_config" "test" {
}
data "ignition_config" "test" {
groups = [
"${ignition_group.foo.id}",
"${ignition_group.qux.id}",
"${data.ignition_group.foo.id}",
"${data.ignition_group.qux.id}",
]
}
`, func(c *types.Config) error {

View File

@ -7,8 +7,6 @@ import (
func resourceNetworkdUnit() *schema.Resource {
return &schema.Resource{
Create: resourceNetworkdUnitCreate,
Delete: resourceNetworkdUnitDelete,
Exists: resourceNetworkdUnitExists,
Read: resourceNetworkdUnitRead,
Schema: map[string]*schema.Schema{
@ -26,7 +24,7 @@ func resourceNetworkdUnit() *schema.Resource {
}
}
func resourceNetworkdUnitCreate(d *schema.ResourceData, meta interface{}) error {
func resourceNetworkdUnitRead(d *schema.ResourceData, meta interface{}) error {
id, err := buildNetworkdUnit(d, meta.(*cache))
if err != nil {
return err
@ -50,10 +48,6 @@ func resourceNetworkdUnitExists(d *schema.ResourceData, meta interface{}) (bool,
return id == d.Id(), nil
}
func resourceNetworkdUnitRead(d *schema.ResourceData, meta interface{}) error {
return nil
}
func buildNetworkdUnit(d *schema.ResourceData, c *cache) (string, error) {
if err := validateUnitContent(d.Get("content").(string)); err != nil {
return "", err

View File

@ -9,14 +9,14 @@ import (
func TestIngnitionNetworkdUnit(t *testing.T) {
testIgnition(t, `
resource "ignition_networkd_unit" "foo" {
data "ignition_networkd_unit" "foo" {
name = "foo.link"
content = "[Match]\nName=eth0\n\n[Network]\nAddress=10.0.1.7\n"
}
resource "ignition_config" "test" {
data "ignition_config" "test" {
networkd = [
"${ignition_networkd_unit.foo.id}",
"${data.ignition_networkd_unit.foo.id}",
]
}
`, func(c *types.Config) error {

View File

@ -7,8 +7,6 @@ import (
func resourceRaid() *schema.Resource {
return &schema.Resource{
Create: resourceRaidCreate,
Delete: resourceRaidDelete,
Exists: resourceRaidExists,
Read: resourceRaidRead,
Schema: map[string]*schema.Schema{
@ -37,7 +35,7 @@ func resourceRaid() *schema.Resource {
}
}
func resourceRaidCreate(d *schema.ResourceData, meta interface{}) error {
func resourceRaidRead(d *schema.ResourceData, meta interface{}) error {
id, err := buildRaid(d, meta.(*cache))
if err != nil {
return err
@ -47,11 +45,6 @@ func resourceRaidCreate(d *schema.ResourceData, meta interface{}) error {
return nil
}
func resourceRaidDelete(d *schema.ResourceData, meta interface{}) error {
d.SetId("")
return nil
}
func resourceRaidExists(d *schema.ResourceData, meta interface{}) (bool, error) {
id, err := buildRaid(d, meta.(*cache))
if err != nil {
@ -61,10 +54,6 @@ func resourceRaidExists(d *schema.ResourceData, meta interface{}) (bool, error)
return id == d.Id(), nil
}
func resourceRaidRead(d *schema.ResourceData, meta interface{}) error {
return nil
}
func buildRaid(d *schema.ResourceData, c *cache) (string, error) {
var devices []types.Path
for _, value := range d.Get("devices").([]interface{}) {

View File

@ -9,16 +9,16 @@ import (
func TestIngnitionRaid(t *testing.T) {
testIgnition(t, `
resource "ignition_raid" "foo" {
data "ignition_raid" "foo" {
name = "foo"
level = "raid10"
devices = ["/foo"]
spares = 42
}
resource "ignition_config" "test" {
data "ignition_config" "test" {
arrays = [
"${ignition_raid.foo.id}",
"${data.ignition_raid.foo.id}",
]
}
`, func(c *types.Config) error {

View File

@ -7,8 +7,6 @@ import (
func resourceSystemdUnit() *schema.Resource {
return &schema.Resource{
Create: resourceSystemdUnitCreate,
Delete: resourceSystemdUnitDelete,
Exists: resourceSystemdUnitExists,
Read: resourceSystemdUnitRead,
Schema: map[string]*schema.Schema{
@ -56,7 +54,7 @@ func resourceSystemdUnit() *schema.Resource {
}
}
func resourceSystemdUnitCreate(d *schema.ResourceData, meta interface{}) error {
func resourceSystemdUnitRead(d *schema.ResourceData, meta interface{}) error {
id, err := buildSystemdUnit(d, meta.(*cache))
if err != nil {
return err
@ -66,11 +64,6 @@ func resourceSystemdUnitCreate(d *schema.ResourceData, meta interface{}) error {
return nil
}
func resourceSystemdUnitDelete(d *schema.ResourceData, meta interface{}) error {
d.SetId("")
return nil
}
func resourceSystemdUnitExists(d *schema.ResourceData, meta interface{}) (bool, error) {
id, err := buildSystemdUnit(d, meta.(*cache))
if err != nil {
@ -80,10 +73,6 @@ func resourceSystemdUnitExists(d *schema.ResourceData, meta interface{}) (bool,
return id == d.Id(), nil
}
func resourceSystemdUnitRead(d *schema.ResourceData, meta interface{}) error {
return nil
}
func buildSystemdUnit(d *schema.ResourceData, c *cache) (string, error) {
var dropins []types.SystemdUnitDropIn
for _, raw := range d.Get("dropin").([]interface{}) {

View File

@ -9,7 +9,7 @@ import (
func TestIngnitionSystemdUnit(t *testing.T) {
testIgnition(t, `
resource "ignition_systemd_unit" "foo" {
data "ignition_systemd_unit" "foo" {
name = "foo.service"
content = "[Match]\nName=eth0\n\n[Network]\nAddress=10.0.1.7\n"
enable = false
@ -21,9 +21,9 @@ func TestIngnitionSystemdUnit(t *testing.T) {
}
}
resource "ignition_config" "test" {
data "ignition_config" "test" {
systemd = [
"${ignition_systemd_unit.foo.id}",
"${data.ignition_systemd_unit.foo.id}",
]
}
`, func(c *types.Config) error {
@ -59,7 +59,7 @@ func TestIngnitionSystemdUnit(t *testing.T) {
func TestIngnitionSystemdUnitEmptyContentWithDropIn(t *testing.T) {
testIgnition(t, `
resource "ignition_systemd_unit" "foo" {
data "ignition_systemd_unit" "foo" {
name = "foo.service"
dropin {
name = "foo.conf"
@ -67,9 +67,9 @@ func TestIngnitionSystemdUnitEmptyContentWithDropIn(t *testing.T) {
}
}
resource "ignition_config" "test" {
data "ignition_config" "test" {
systemd = [
"${ignition_systemd_unit.foo.id}",
"${data.ignition_systemd_unit.foo.id}",
]
}
`, func(c *types.Config) error {
@ -98,14 +98,14 @@ func TestIngnitionSystemdUnitEmptyContentWithDropIn(t *testing.T) {
// #11325
func TestIgnitionSystemdUnit_emptyContent(t *testing.T) {
testIgnition(t, `
resource "ignition_systemd_unit" "foo" {
data "ignition_systemd_unit" "foo" {
name = "foo.service"
enable = true
enable = true
}
resource "ignition_config" "test" {
data "ignition_config" "test" {
systemd = [
"${ignition_systemd_unit.foo.id}",
"${data.ignition_systemd_unit.foo.id}",
]
}
`, func(c *types.Config) error {

View File

@ -9,8 +9,6 @@ import (
func resourceUser() *schema.Resource {
return &schema.Resource{
Create: resourceUserCreate,
Delete: resourceUserDelete,
Exists: resourceUserExists,
Read: resourceUserRead,
Schema: map[string]*schema.Schema{
@ -80,7 +78,7 @@ func resourceUser() *schema.Resource {
}
}
func resourceUserCreate(d *schema.ResourceData, meta interface{}) error {
func resourceUserRead(d *schema.ResourceData, meta interface{}) error {
id, err := buildUser(d, meta.(*cache))
if err != nil {
return err
@ -90,11 +88,6 @@ func resourceUserCreate(d *schema.ResourceData, meta interface{}) error {
return nil
}
func resourceUserDelete(d *schema.ResourceData, meta interface{}) error {
d.SetId("")
return nil
}
func resourceUserExists(d *schema.ResourceData, meta interface{}) (bool, error) {
id, err := buildUser(d, meta.(*cache))
if err != nil {
@ -104,10 +97,6 @@ func resourceUserExists(d *schema.ResourceData, meta interface{}) (bool, error)
return id == d.Id(), nil
}
func resourceUserRead(d *schema.ResourceData, meta interface{}) error {
return nil
}
func buildUser(d *schema.ResourceData, c *cache) (string, error) {
uc := types.UserCreate{
Uid: getUInt(d, "uid"),

View File

@ -9,7 +9,7 @@ import (
func TestIngnitionUser(t *testing.T) {
testIgnition(t, `
resource "ignition_user" "foo" {
data "ignition_user" "foo" {
name = "foo"
password_hash = "password"
ssh_authorized_keys = ["keys"]
@ -24,14 +24,14 @@ func TestIngnitionUser(t *testing.T) {
shell = "shell"
}
resource "ignition_user" "qux" {
data "ignition_user" "qux" {
name = "qux"
}
resource "ignition_config" "test" {
data "ignition_config" "test" {
users = [
"${ignition_user.foo.id}",
"${ignition_user.qux.id}",
"${data.ignition_user.foo.id}",
"${data.ignition_user.qux.id}",
]
}
`, func(c *types.Config) error {

View File

@ -0,0 +1,112 @@
package openstack
import (
"fmt"
"log"
"strconv"
"github.com/hashicorp/terraform/helper/schema"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack/networking/v2/networks"
"github.com/gophercloud/gophercloud/openstack/networking/v2/subnets"
)
func dataSourceNetworkingNetworkV2() *schema.Resource {
return &schema.Resource{
Read: dataSourceNetworkingNetworkV2Read,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"matching_subnet_cidr": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"region": &schema.Schema{
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""),
},
"tenant_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
"OS_TENANT_ID",
"OS_PROJECT_ID",
}, ""),
Description: descriptions["tenant_id"],
},
"admin_state_up": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"shared": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceNetworkingNetworkV2Read(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
networkingClient, err := config.networkingV2Client(GetRegion(d))
listOpts := networks.ListOpts{
Name: d.Get("name").(string),
TenantID: d.Get("tenant_id").(string),
Status: "ACTIVE",
}
pages, err := networks.List(networkingClient, listOpts).AllPages()
allNetworks, err := networks.ExtractNetworks(pages)
if err != nil {
return fmt.Errorf("Unable to retrieve networks: %s", err)
}
var refinedNetworks []networks.Network
if cidr := d.Get("matching_subnet_cidr").(string); cidr != "" {
for _, n := range allNetworks {
for _, s := range n.Subnets {
subnet, err := subnets.Get(networkingClient, s).Extract()
if err != nil {
if _, ok := err.(gophercloud.ErrDefault404); ok {
continue
}
return fmt.Errorf("Unable to retrieve network subnet: %s", err)
}
if cidr == subnet.CIDR {
refinedNetworks = append(refinedNetworks, n)
}
}
}
} else {
refinedNetworks = allNetworks
}
if len(refinedNetworks) < 1 {
return fmt.Errorf("Your query returned no results. " +
"Please change your search criteria and try again.")
}
if len(refinedNetworks) > 1 {
return fmt.Errorf("Your query returned more than one result." +
" Please try a more specific search criteria")
}
network := refinedNetworks[0]
log.Printf("[DEBUG] Retrieved Network %s: %+v", network.ID, network)
d.SetId(network.ID)
d.Set("name", network.Name)
d.Set("admin_state_up", strconv.FormatBool(network.AdminStateUp))
d.Set("shared", strconv.FormatBool(network.Shared))
d.Set("tenant_id", network.TenantID)
d.Set("region", GetRegion(d))
return nil
}

View File

@ -0,0 +1,98 @@
package openstack
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccOpenStackNetworkingNetworkV2DataSource_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccOpenStackNetworkingNetworkV2DataSource_network,
},
resource.TestStep{
Config: testAccOpenStackNetworkingNetworkV2DataSource_basic,
Check: resource.ComposeTestCheckFunc(
testAccCheckNetworkingNetworkV2DataSourceID("data.openstack_networking_network_v2.net"),
resource.TestCheckResourceAttr(
"data.openstack_networking_network_v2.net", "name", "tf_test_network"),
resource.TestCheckResourceAttr(
"data.openstack_networking_network_v2.net", "admin_state_up", "true"),
),
},
},
})
}
func TestAccOpenStackNetworkingNetworkV2DataSource_subnet(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccOpenStackNetworkingNetworkV2DataSource_network,
},
resource.TestStep{
Config: testAccOpenStackNetworkingNetworkV2DataSource_subnet,
Check: resource.ComposeTestCheckFunc(
testAccCheckNetworkingNetworkV2DataSourceID("data.openstack_networking_network_v2.net"),
resource.TestCheckResourceAttr(
"data.openstack_networking_network_v2.net", "name", "tf_test_network"),
resource.TestCheckResourceAttr(
"data.openstack_networking_network_v2.net", "admin_state_up", "true"),
),
},
},
})
}
func testAccCheckNetworkingNetworkV2DataSourceID(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Can't find network data source: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("Network data source ID not set")
}
return nil
}
}
const testAccOpenStackNetworkingNetworkV2DataSource_network = `
resource "openstack_networking_network_v2" "net" {
name = "tf_test_network"
admin_state_up = "true"
}
resource "openstack_networking_subnet_v2" "subnet" {
name = "tf_test_subnet"
cidr = "192.168.199.0/24"
no_gateway = true
network_id = "${openstack_networking_network_v2.net.id}"
}
`
var testAccOpenStackNetworkingNetworkV2DataSource_basic = fmt.Sprintf(`
%s
data "openstack_networking_network_v2" "net" {
name = "${openstack_networking_network_v2.net.name}"
}
`, testAccOpenStackNetworkingNetworkV2DataSource_network)
var testAccOpenStackNetworkingNetworkV2DataSource_subnet = fmt.Sprintf(`
%s
data "openstack_networking_network_v2" "net" {
matching_subnet_cidr = "${openstack_networking_subnet_v2.subnet.cidr}"
}
`, testAccOpenStackNetworkingNetworkV2DataSource_network)

View File

@ -135,7 +135,8 @@ func Provider() terraform.ResourceProvider {
},
DataSourcesMap: map[string]*schema.Resource{
"openstack_images_image_v2": dataSourceImagesImageV2(),
"openstack_images_image_v2": dataSourceImagesImageV2(),
"openstack_networking_network_v2": dataSourceNetworkingNetworkV2(),
},
ResourcesMap: map[string]*schema.Resource{

View File

@ -61,52 +61,55 @@ func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig)
}
if raw, ok := c.Config["timeout"]; ok {
configTimeouts := raw.([]map[string]interface{})
for _, timeoutValues := range configTimeouts {
// loop through each Timeout given in the configuration and validate they
// the Timeout defined in the resource
for timeKey, timeValue := range timeoutValues {
// validate that we're dealing with the normal CRUD actions
var found bool
for _, key := range timeoutKeys() {
if timeKey == key {
found = true
break
if configTimeouts, ok := raw.([]map[string]interface{}); ok {
for _, timeoutValues := range configTimeouts {
// loop through each Timeout given in the configuration and validate they
// the Timeout defined in the resource
for timeKey, timeValue := range timeoutValues {
// validate that we're dealing with the normal CRUD actions
var found bool
for _, key := range timeoutKeys() {
if timeKey == key {
found = true
break
}
}
}
if !found {
return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey)
}
if !found {
return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey)
}
// Get timeout
rt, err := time.ParseDuration(timeValue.(string))
if err != nil {
return fmt.Errorf("Error parsing Timeout for (%s): %s", timeKey, err)
}
// Get timeout
rt, err := time.ParseDuration(timeValue.(string))
if err != nil {
return fmt.Errorf("Error parsing Timeout for (%s): %s", timeKey, err)
}
var timeout *time.Duration
switch timeKey {
case TimeoutCreate:
timeout = t.Create
case TimeoutUpdate:
timeout = t.Update
case TimeoutRead:
timeout = t.Read
case TimeoutDelete:
timeout = t.Delete
case TimeoutDefault:
timeout = t.Default
}
var timeout *time.Duration
switch timeKey {
case TimeoutCreate:
timeout = t.Create
case TimeoutUpdate:
timeout = t.Update
case TimeoutRead:
timeout = t.Read
case TimeoutDelete:
timeout = t.Delete
case TimeoutDefault:
timeout = t.Default
}
// If the resource has not delcared this in the definition, then error
// with an unsupported message
if timeout == nil {
return unsupportedTimeoutKeyError(timeKey)
}
// If the resource has not delcared this in the definition, then error
// with an unsupported message
if timeout == nil {
return unsupportedTimeoutKeyError(timeKey)
}
*timeout = rt
*timeout = rt
}
}
} else {
log.Printf("[WARN] Invalid Timeout structure found, skipping timeouts")
}
}

View File

@ -1,8 +1,31 @@
Release v1.7.5 (2017-03-08)
===
Service Client Updates
---
* `aws/endpoints`: Updated Regions and Endpoints metadata.
* `service/rds`: Updates service API and documentation
* Add support to using encrypted clusters as cross-region replication masters. Update CopyDBClusterSnapshot API to support encrypted cross region copy of Aurora cluster snapshots.
Release v1.7.4 (2017-03-06)
===
Service Client Updates
---
* `service/budgets`: Updates service API and paginators
* When creating or editing a budget via the AWS Budgets API you can define notifications that are sent to subscribers when the actual or forecasted value for cost or usage exceeds the notificationThreshold associated with the budget notification object. Starting today, the maximum allowed value for the notificationThreshold was raised from 100 to 300. This change was made to give you more flexibility when setting budget notifications.
* `service/cloudtrail`: Updates service documentation and paginators
* Doc-only update for AWSCloudTrail: Updated links/descriptions
* `aws/endpoints`: Updated Regions and Endpoints metadata.
* `service/opsworkscm`: Updates service API, documentation, and paginators
* OpsWorks for Chef Automate has added a new field "AssociatePublicIpAddress" to the CreateServer request, "CloudFormationStackArn" to the Server model and "TERMINATED" server state.
Release v1.7.3 (2017-02-28)
===
Service Client Updates
---
* `service/mturk`: Renaming service
* service/mechanicalturkrequesterservice was renamed to service/mturk. Be sure to change any references of the old client to the new.

View File

@ -104,8 +104,10 @@ const (
MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
MonitoringServiceID = "monitoring" // Monitoring.
MturkRequesterServiceID = "mturk-requester" // MturkRequester.
OpsworksServiceID = "opsworks" // Opsworks.
OpsworksCmServiceID = "opsworks-cm" // OpsworksCm.
OrganizationsServiceID = "organizations" // Organizations.
PinpointServiceID = "pinpoint" // Pinpoint.
PollyServiceID = "polly" // Polly.
RdsServiceID = "rds" // Rds.
@ -958,6 +960,7 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
@ -1075,10 +1078,13 @@ var awsPartition = partition{
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
"us-west-2": endpoint{},
},
@ -1110,6 +1116,16 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
"mturk-requester": service{
IsRegionalized: boxedFalse,
Endpoints: endpoints{
"sandbox": endpoint{
Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com",
},
"us-east-1": endpoint{},
},
},
"opsworks": service{
Endpoints: endpoints{
@ -1136,6 +1152,19 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
"organizations": service{
PartitionEndpoint: "aws-global",
IsRegionalized: boxedFalse,
Endpoints: endpoints{
"aws-global": endpoint{
Hostname: "organizations.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
},
},
},
"pinpoint": service{
Defaults: endpoint{
CredentialScope: credentialScope{
@ -1946,6 +1975,12 @@ var awsusgovPartition = partition{
},
},
},
"kinesis": service{
Endpoints: endpoints{
"us-gov-west-1": endpoint{},
},
},
"kms": service{
Endpoints: endpoints{

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.7.3"
const SDKVersion = "1.7.5"

View File

@ -80,7 +80,6 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri
continue
}
if protocol.CanSetIdempotencyToken(value.Field(i), field) {
token := protocol.GetIdempotencyToken()
elemValue = reflect.ValueOf(token)
@ -124,7 +123,11 @@ func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string
// check for unflattened list member
if !q.isEC2 && tag.Get("flattened") == "" {
prefix += ".member"
if listName := tag.Get("locationNameList"); listName == "" {
prefix += ".member"
} else {
prefix += "." + listName
}
}
for i := 0; i < value.Len(); i++ {

View File

@ -476,7 +476,8 @@ func (c *CloudTrail) GetEventSelectorsRequest(input *GetEventSelectorsInput) (re
// * If your event selector includes read-only events, write-only events,
// or all.
//
// For more information, see Configuring Event Selectors for Trails (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-event-selectors-for-a-trail.html)
// For more information, see Logging Data and Management Events for Trails
// (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html)
// in the AWS CloudTrail User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@ -846,6 +847,8 @@ func (c *CloudTrail) LookupEventsRequest(input *LookupEventsInput) (req *request
//
// * Event name
//
// * Event source
//
// * Resource name
//
// * Resource type
@ -962,11 +965,11 @@ func (c *CloudTrail) PutEventSelectorsRequest(input *PutEventSelectorsInput) (re
// PutEventSelectors API operation for AWS CloudTrail.
//
// Configures an event selector for your trail. Use event selectors to specify
// the type of events that you want your trail to log. When an event occurs
// in your account, CloudTrail evaluates the event selectors in all trails.
// For each trail, if the event matches any event selector, the trail processes
// and logs the event. If the event doesn't match any event selector, the trail
// doesn't log the event.
// whether you want your trail to log management and/or data events. When an
// event occurs in your account, CloudTrail evaluates the event selectors in
// all trails. For each trail, if the event matches any event selector, the
// trail processes and logs the event. If the event doesn't match any event
// selector, the trail doesn't log the event.
//
// Example
//
@ -987,7 +990,7 @@ func (c *CloudTrail) PutEventSelectorsRequest(input *PutEventSelectorsInput) (re
// trail was created; otherwise, an InvalidHomeRegionException is thrown.
//
// You can configure up to five event selectors for each trail. For more information,
// see Configuring Event Selectors for Trails (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-event-selectors-for-a-trail.html)
// see Logging Data and Management Events for Trails (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html)
// in the AWS CloudTrail User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@ -1028,7 +1031,7 @@ func (c *CloudTrail) PutEventSelectorsRequest(input *PutEventSelectorsInput) (re
//
// * Specify a valid number of event selectors (1 to 5) for a trail.
//
// * Specify a valid number of data resources (1 to 50) for an event selector.
// * Specify a valid number of data resources (1 to 250) for an event selector.
//
// * Specify a valid value for a parameter. For example, specifying the ReadWriteType
// parameter with a value of read-only is invalid.
@ -1590,8 +1593,8 @@ type CreateTrailInput struct {
IsMultiRegionTrail *bool `type:"boolean"`
// Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail.
// The value can be a an alias name prefixed by "alias/", a fully specified
// ARN to an alias, a fully specified ARN to a key, or a globally unique identifier.
// The value can be an alias name prefixed by "alias/", a fully specified ARN
// to an alias, a fully specified ARN to a key, or a globally unique identifier.
//
// Examples:
//
@ -1865,9 +1868,9 @@ func (s *CreateTrailOutput) SetTrailARN(v string) *CreateTrailOutput {
}
// The Amazon S3 objects that you specify in your event selectors for your trail
// to log data events. Data events are object level API operations that access
// to log data events. Data events are object-level API operations that access
// S3 objects, such as GetObject, DeleteObject, and PutObject. You can specify
// up to 50 S3 buckets and object prefixes for an event selector.
// up to 250 S3 buckets and object prefixes for a trail.
//
// Example
//
@ -2144,11 +2147,11 @@ func (s *Event) SetUsername(v string) *Event {
return s
}
// Use event selectors to specify the types of events that you want your trail
// to log. When an event occurs in your account, CloudTrail evaluates the event
// selector for all trails. For each trail, if the event matches any event selector,
// the trail processes and logs the event. If the event doesn't match any event
// selector, the trail doesn't log the event.
// Use event selectors to specify whether you want your trail to log management
// and/or data events. When an event occurs in your account, CloudTrail evaluates
// the event selector for all trails. For each trail, if the event matches any
// event selector, the trail processes and logs the event. If the event doesn't
// match any event selector, the trail doesn't log the event.
//
// You can configure up to five event selectors for a trail.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudtrail-2013-11-01/EventSelector
@ -2156,16 +2159,16 @@ type EventSelector struct {
_ struct{} `type:"structure"`
// CloudTrail supports logging only data events for S3 objects. You can specify
// up to 50 S3 buckets and object prefixes for an event selector.
// up to 250 S3 buckets and object prefixes for a trail.
//
// For more information, see Data Events (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-event-selectors-for-a-trail.html#data-events-resources)
// For more information, see Data Events (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html#logging-data-events)
// in the AWS CloudTrail User Guide.
DataResources []*DataResource `type:"list"`
// Specify if you want your event selector to include management events for
// your trail.
//
// For more information, see Management Events (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-event-selectors-for-a-trail.html#event-selector-for-management-events)
// For more information, see Management Events (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html#logging-management-events)
// in the AWS CloudTrail User Guide.
//
// By default, the value is true.
@ -3541,8 +3544,8 @@ type UpdateTrailInput struct {
IsMultiRegionTrail *bool `type:"boolean"`
// Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail.
// The value can be a an alias name prefixed by "alias/", a fully specified
// ARN to an alias, a fully specified ARN to a key, or a globally unique identifier.
// The value can be an alias name prefixed by "alias/", a fully specified ARN
// to an alias, a fully specified ARN to a key, or a globally unique identifier.
//
// Examples:
//

View File

@ -59,7 +59,7 @@ const (
//
// * Specify a valid number of event selectors (1 to 5) for a trail.
//
// * Specify a valid number of data resources (1 to 50) for an event selector.
// * Specify a valid number of data resources (1 to 250) for an event selector.
//
// * Specify a valid value for a parameter. For example, specifying the ReadWriteType
// parameter with a value of read-only is invalid.

View File

@ -509,8 +509,67 @@ func (c *RDS) CopyDBClusterSnapshotRequest(input *CopyDBClusterSnapshotInput) (r
// CopyDBClusterSnapshot API operation for Amazon Relational Database Service.
//
// Creates a snapshot of a DB cluster. For more information on Amazon Aurora,
// see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html)
// Copies a snapshot of a DB cluster.
//
// To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier
// must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.
//
// You can copy an encrypted DB cluster snapshot from another AWS region. In
// that case, the region where you call the CopyDBClusterSnapshot action is
// the destination region for the encrypted DB cluster snapshot to be copied
// to. To copy an encrypted DB cluster snapshot from another region, you must
// provide the following values:
//
// * KmsKeyId - The AWS Key Management System (KMS) key identifier for the
// key to use to encrypt the copy of the DB cluster snapshot in the destination
// region.
//
// * PreSignedUrl - A URL that contains a Signature Version 4 signed request
// for the CopyDBClusterSnapshot action to be called in the source region
// where the DB cluster snapshot will be copied from. The pre-signed URL
// must be a valid request for the CopyDBClusterSnapshot API action that
// can be executed in the source region that contains the encrypted DB cluster
// snapshot to be copied.
//
// The pre-signed URL request must contain the following parameter values:
//
// KmsKeyId - The KMS key identifier for the key to use to encrypt the copy
// of the DB cluster snapshot in the destination region. This is the same
// identifier for both the CopyDBClusterSnapshot action that is called in
// the destination region, and the action contained in the pre-signed URL.
//
// DestinationRegion - The name of the region that the DB cluster snapshot will
// be created in.
//
// SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for
// the encrypted DB cluster snapshot to be copied. This identifier must be
// in the Amazon Resource Name (ARN) format for the source region. For example,
// if you are copying an encrypted DB cluster snapshot from the us-west-2
// region, then your SourceDBClusterSnapshotIdentifier looks like the following
// example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115.
//
// To learn how to generate a Signature Version 4 signed request, see Authenticating
// Requests: Using Query Parameters (AWS Signature Version 4) (http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html)
// and Signature Version 4 Signing Process (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
//
// * TargetDBClusterSnapshotIdentifier - The identifier for the new copy
// of the DB cluster snapshot in the destination region.
//
// * SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier
// for the encrypted DB cluster snapshot to be copied. This identifier must
// be in the ARN format for the source region and is the same value as the
// SourceDBClusterSnapshotIdentifier in the pre-signed URL.
//
// To cancel the copy operation once it is in progress, delete the target DB
// cluster snapshot identified by TargetDBClusterSnapshotIdentifier while that
// DB cluster snapshot is in "copying" status.
//
// For more information on copying encrypted DB cluster snapshots from one region
// to another, see Copying a DB Cluster Snapshot in the Same Account, Either
// in the Same Region or Across Regions (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_CopySnapshot.html#USER_CopyDBClusterSnapshot.CrossRegion)
// in the Amazon RDS User Guide.
//
// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html)
// in the Amazon RDS User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@ -873,6 +932,8 @@ func (c *RDS) CreateDBClusterRequest(input *CreateDBClusterInput) (req *request.
//
// You can use the ReplicationSourceIdentifier parameter to create the DB cluster
// as a Read Replica of another DB cluster or Amazon RDS MySQL DB instance.
// For cross-region replication where the DB cluster identified by ReplicationSourceIdentifier
// is encrypted, you must also specify the PreSignedUrl parameter.
//
// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html)
// in the Amazon RDS User Guide.
@ -8316,6 +8377,65 @@ func (s *CopyDBClusterParameterGroupOutput) SetDBClusterParameterGroup(v *DBClus
type CopyDBClusterSnapshotInput struct {
_ struct{} `type:"structure"`
CopyTags *bool `type:"boolean"`
// DestinationRegion is used for presigning the request to a given region.
DestinationRegion *string `type:"string"`
// The AWS KMS key ID for an encrypted DB cluster snapshot. The KMS key ID is
// the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias
// for the KMS encryption key.
//
// If you copy an unencrypted DB cluster snapshot and specify a value for the
// KmsKeyId parameter, Amazon RDS encrypts the target DB cluster snapshot using
// the specified KMS encryption key.
//
// If you copy an encrypted DB cluster snapshot from your AWS account, you can
// specify a value for KmsKeyId to encrypt the copy with a new KMS encryption
// key. If you don't specify a value for KmsKeyId, then the copy of the DB cluster
// snapshot is encrypted with the same KMS key as the source DB cluster snapshot.
//
// If you copy an encrypted DB cluster snapshot that is shared from another
// AWS account, then you must specify a value for KmsKeyId.
//
// To copy an encrypted DB cluster snapshot to another region, you must set
// KmsKeyId to the KMS key ID you want to use to encrypt the copy of the DB
// cluster snapshot in the destination region. KMS encryption keys are specific
// to the region that they are created in, and you cannot use encryption keys
// from one region in another region.
KmsKeyId *string `type:"string"`
// The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot
// API action in the AWS region that contains the source DB cluster snapshot
// to copy. The PreSignedUrl parameter must be used when copying an encrypted
// DB cluster snapshot from another AWS region.
//
// The pre-signed URL must be a valid request for the CopyDBSClusterSnapshot
// API action that can be executed in the source region that contains the encrypted
// DB cluster snapshot to be copied. The pre-signed URL request must contain
// the following parameter values:
//
// * KmsKeyId - The KMS key identifier for the key to use to encrypt the
// copy of the DB cluster snapshot in the destination region. This is the
// same identifier for both the CopyDBClusterSnapshot action that is called
// in the destination region, and the action contained in the pre-signed
// URL.
//
// * DestinationRegion - The name of the region that the DB cluster snapshot
// will be created in.
//
// * SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier
// for the encrypted DB cluster snapshot to be copied. This identifier must
// be in the Amazon Resource Name (ARN) format for the source region. For
// example, if you are copying an encrypted DB cluster snapshot from the
// us-west-2 region, then your SourceDBClusterSnapshotIdentifier looks like
// the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115.
//
// To learn how to generate a Signature Version 4 signed request, see Authenticating
// Requests: Using Query Parameters (AWS Signature Version 4) (http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html)
// and Signature Version 4 Signing Process (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
PreSignedUrl *string `type:"string"`
// The identifier of the DB cluster snapshot to copy. This parameter is not
// case-sensitive.
//
@ -8332,6 +8452,11 @@ type CopyDBClusterSnapshotInput struct {
// SourceDBClusterSnapshotIdentifier is a required field
SourceDBClusterSnapshotIdentifier *string `type:"string" required:"true"`
// SourceRegion is the source region where the resource exists. This is not
// sent over the wire and is only used for presigning. This value should always
// have the same region as the source ARN.
SourceRegion *string `type:"string" ignore:"true"`
// A list of tags.
Tags []*Tag `locationNameList:"Tag" type:"list"`
@ -8378,12 +8503,42 @@ func (s *CopyDBClusterSnapshotInput) Validate() error {
return nil
}
// SetCopyTags sets the CopyTags field's value.
func (s *CopyDBClusterSnapshotInput) SetCopyTags(v bool) *CopyDBClusterSnapshotInput {
s.CopyTags = &v
return s
}
// SetDestinationRegion sets the DestinationRegion field's value.
func (s *CopyDBClusterSnapshotInput) SetDestinationRegion(v string) *CopyDBClusterSnapshotInput {
s.DestinationRegion = &v
return s
}
// SetKmsKeyId sets the KmsKeyId field's value.
func (s *CopyDBClusterSnapshotInput) SetKmsKeyId(v string) *CopyDBClusterSnapshotInput {
s.KmsKeyId = &v
return s
}
// SetPreSignedUrl sets the PreSignedUrl field's value.
func (s *CopyDBClusterSnapshotInput) SetPreSignedUrl(v string) *CopyDBClusterSnapshotInput {
s.PreSignedUrl = &v
return s
}
// SetSourceDBClusterSnapshotIdentifier sets the SourceDBClusterSnapshotIdentifier field's value.
func (s *CopyDBClusterSnapshotInput) SetSourceDBClusterSnapshotIdentifier(v string) *CopyDBClusterSnapshotInput {
s.SourceDBClusterSnapshotIdentifier = &v
return s
}
// SetSourceRegion sets the SourceRegion field's value.
func (s *CopyDBClusterSnapshotInput) SetSourceRegion(v string) *CopyDBClusterSnapshotInput {
s.SourceRegion = &v
return s
}
// SetTags sets the Tags field's value.
func (s *CopyDBClusterSnapshotInput) SetTags(v []*Tag) *CopyDBClusterSnapshotInput {
s.Tags = v
@ -8966,6 +9121,9 @@ type CreateDBClusterInput struct {
// you are creating.
DatabaseName *string `type:"string"`
// DestinationRegion is used for presigning the request to a given region.
DestinationRegion *string `type:"string"`
// The name of the database engine to be used for this DB cluster.
//
// Valid Values: aurora
@ -8985,12 +9143,16 @@ type CreateDBClusterInput struct {
// The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption
// key. If you are creating a DB cluster with the same AWS account that owns
// the KMS encryption key used to encrypt the new DB cluster, then you can use
// the KMS key alias instead of the ARN for the KM encryption key.
// the KMS key alias instead of the ARN for the KMS encryption key.
//
// If the StorageEncrypted parameter is true, and you do not specify a value
// for the KmsKeyId parameter, then Amazon RDS will use your default encryption
// key. AWS KMS creates the default encryption key for your AWS account. Your
// AWS account has a different default encryption key for each AWS region.
//
// If you create a Read Replica of an encrypted DB cluster in another region,
// you must set KmsKeyId to a KMS key ID that is valid in the destination region.
// This key is used to encrypt the Read Replica in that region.
KmsKeyId *string `type:"string"`
// The password for the master database user. This password can contain any
@ -9022,6 +9184,36 @@ type CreateDBClusterInput struct {
// Default: 3306
Port *int64 `type:"integer"`
// A URL that contains a Signature Version 4 signed request for the CreateDBCluster
// action to be called in the source region where the DB cluster will be replicated
// from. You only need to specify PreSignedUrl when you are performing cross-region
// replication from an encrypted DB cluster.
//
// The pre-signed URL must be a valid request for the CreateDBCluster API action
// that can be executed in the source region that contains the encrypted DB
// cluster to be copied.
//
// The pre-signed URL request must contain the following parameter values:
//
// * KmsKeyId - The KMS key identifier for the key to use to encrypt the
// copy of the DB cluster in the destination region. This should refer to
// the same KMS key for both the CreateDBCluster action that is called in
// the destination region, and the action contained in the pre-signed URL.
//
// * DestinationRegion - The name of the region that Aurora Read Replica
// will be created in.
//
// * ReplicationSourceIdentifier - The DB cluster identifier for the encrypted
// DB cluster to be copied. This identifier must be in the Amazon Resource
// Name (ARN) format for the source region. For example, if you are copying
// an encrypted DB cluster from the us-west-2 region, then your ReplicationSourceIdentifier
// would look like Example: arn:aws:rds:us-west-2:123456789012:cluster:aurora-cluster1.
//
// To learn how to generate a Signature Version 4 signed request, see Authenticating
// Requests: Using Query Parameters (AWS Signature Version 4) (http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html)
// and Signature Version 4 Signing Process (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
PreSignedUrl *string `type:"string"`
// The daily time range during which automated backups are created if automated
// backups are enabled using the BackupRetentionPeriod parameter.
//
@ -9060,6 +9252,11 @@ type CreateDBClusterInput struct {
// this DB cluster is created as a Read Replica.
ReplicationSourceIdentifier *string `type:"string"`
// SourceRegion is the source region where the resource exists. This is not
// sent over the wire and is only used for presigning. This value should always
// have the same region as the source ARN.
SourceRegion *string `type:"string" ignore:"true"`
// Specifies whether the DB cluster is encrypted.
StorageEncrypted *bool `type:"boolean"`
@ -9138,6 +9335,12 @@ func (s *CreateDBClusterInput) SetDatabaseName(v string) *CreateDBClusterInput {
return s
}
// SetDestinationRegion sets the DestinationRegion field's value.
func (s *CreateDBClusterInput) SetDestinationRegion(v string) *CreateDBClusterInput {
s.DestinationRegion = &v
return s
}
// SetEngine sets the Engine field's value.
func (s *CreateDBClusterInput) SetEngine(v string) *CreateDBClusterInput {
s.Engine = &v
@ -9180,6 +9383,12 @@ func (s *CreateDBClusterInput) SetPort(v int64) *CreateDBClusterInput {
return s
}
// SetPreSignedUrl sets the PreSignedUrl field's value.
func (s *CreateDBClusterInput) SetPreSignedUrl(v string) *CreateDBClusterInput {
s.PreSignedUrl = &v
return s
}
// SetPreferredBackupWindow sets the PreferredBackupWindow field's value.
func (s *CreateDBClusterInput) SetPreferredBackupWindow(v string) *CreateDBClusterInput {
s.PreferredBackupWindow = &v
@ -9198,6 +9407,12 @@ func (s *CreateDBClusterInput) SetReplicationSourceIdentifier(v string) *CreateD
return s
}
// SetSourceRegion sets the SourceRegion field's value.
func (s *CreateDBClusterInput) SetSourceRegion(v string) *CreateDBClusterInput {
s.SourceRegion = &v
return s
}
// SetStorageEncrypted sets the StorageEncrypted field's value.
func (s *CreateDBClusterInput) SetStorageEncrypted(v bool) *CreateDBClusterInput {
s.StorageEncrypted = &v

View File

@ -13,6 +13,8 @@ func init() {
ops := []string{
opCopyDBSnapshot,
opCreateDBInstanceReadReplica,
opCopyDBClusterSnapshot,
opCreateDBCluster,
}
initRequest = func(r *request.Request) {
for _, operation := range ops {

View File

@ -50,7 +50,7 @@ type Profile struct {
Proxy string `mapstructure:"proxy"`
RedHatManagementKey string `mapstructure:"redhat_management_key"`
RedHatManagementServer string `mapstructure:"redhat_management_server"`
Repos []string `mapstructure:"repos"`
Repos string `mapstructure:"repos"`
Server string `mapstructure:"server"`
TemplateFiles string `mapstructure:"template_files"`
TemplateRemoteKickstarts int `mapstructure:"template_remote_kickstarts"`

View File

@ -98,7 +98,7 @@ type Interface struct {
IPv6StaticRoutes []string `mapstructure:"ipv6_static_routes" structs:"ipv6_static_routes"`
IPv6DefaultGateway string `mapstructure:"ipv6_default_gateway structs:"ipv6_default_gateway"`
MACAddress string `mapstructure:"mac_address" structs:"mac_address"`
Management bool `mapstructure:"management" structs:"managment"`
Management bool `mapstructure:"management" structs:"management"`
Netmask string `mapstructure:"netmask" structs:"netmask"`
Static bool `mapstructure:"static" structs:"static"`
StaticRoutes []string `mapstructure:"static_routes" structs:"static_routes"`

650
vendor/vendor.json vendored
View File

@ -488,636 +488,636 @@
"revisionTime": "2017-01-23T00:46:44Z"
},
{
"checksumSHA1": "OM67XTAtQQWCANR9osdxf2e+ij4=",
"checksumSHA1": "iqZtcuXvBhnOSc9oSK706rUQBGg=",
"path": "github.com/aws/aws-sdk-go",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "hw+4wvdRBIceN0V2nXNZPCb93gI=",
"checksumSHA1": "FN20dHo+g6B2zQC/ETGW/J+RNxw=",
"path": "github.com/aws/aws-sdk-go/aws",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=",
"path": "github.com/aws/aws-sdk-go/aws/awserr",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "yyYr41HZ1Aq0hWc3J5ijXwYEcac=",
"path": "github.com/aws/aws-sdk-go/aws/awsutil",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "iThCyNRL/oQFD9CF2SYgBGl+aww=",
"path": "github.com/aws/aws-sdk-go/aws/client",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=",
"path": "github.com/aws/aws-sdk-go/aws/client/metadata",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "Fl8vRSCY0MbM04cmiz/0MID+goA=",
"path": "github.com/aws/aws-sdk-go/aws/corehandlers",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "zu5C95rmCZff6NYZb62lEaT5ibE=",
"path": "github.com/aws/aws-sdk-go/aws/credentials",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "u3GOAJLmdvbuNUeUEcZSEAOeL/0=",
"path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=",
"path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "6cj/zsRmcxkE1TLS+v910GbQYg0=",
"path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "lqh3fG7wCochvB4iHAZJuhhEJW0=",
"path": "github.com/aws/aws-sdk-go/aws/defaults",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "/EXbk/z2TWjWc1Hvb4QYs3Wmhb8=",
"path": "github.com/aws/aws-sdk-go/aws/ec2metadata",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "JTrzEDPXL3pUUH+dMCixz9T9rLY=",
"checksumSHA1": "9LFtC2yggJvQfZ6NKVQTkW2WQJ8=",
"path": "github.com/aws/aws-sdk-go/aws/endpoints",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "M78rTxU55Qagqr3MYj91im2031E=",
"path": "github.com/aws/aws-sdk-go/aws/request",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "u6tKvFGcRQ1xtby1ONjgyUTgcpg=",
"path": "github.com/aws/aws-sdk-go/aws/session",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "0FvPLvkBUpTElfUc/FZtPsJfuV0=",
"path": "github.com/aws/aws-sdk-go/aws/signer/v4",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "wk7EyvDaHwb5qqoOP/4d3cV0708=",
"path": "github.com/aws/aws-sdk-go/private/protocol",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "1QmQ3FqV37w0Zi44qv8pA1GeR0A=",
"path": "github.com/aws/aws-sdk-go/private/protocol/ec2query",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "O6hcK24yI6w7FA+g4Pbr+eQ7pys=",
"path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "R00RL5jJXRYq1iiK1+PGvMfvXyM=",
"path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=",
"path": "github.com/aws/aws-sdk-go/private/protocol/query",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "hqTEmgtchF9SwVTW0IQId2eLUKM=",
"checksumSHA1": "Drt1JfLMa0DQEZLWrnMlTWaIcC8=",
"path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "szZSLm3BlYkL3vqlZhNAlYk8iwM=",
"path": "github.com/aws/aws-sdk-go/private/protocol/rest",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "Rpu8KBtHZgvhkwHxUfaky+qW+G4=",
"path": "github.com/aws/aws-sdk-go/private/protocol/restjson",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "ODo+ko8D6unAxZuN1jGzMcN4QCc=",
"path": "github.com/aws/aws-sdk-go/private/protocol/restxml",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "lZ1z4xAbT8euCzKoAsnEYic60VE=",
"path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "F6mth+G7dXN1GI+nktaGo8Lx8aE=",
"path": "github.com/aws/aws-sdk-go/private/signer/v2",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "Eo9yODN5U99BK0pMzoqnBm7PCrY=",
"path": "github.com/aws/aws-sdk-go/private/waiter",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "9n/Gdm1mNIxB7eXRZR+LP2pLjr8=",
"path": "github.com/aws/aws-sdk-go/service/acm",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "ygS1AtvAaYa1JHsccugtZUlxnxo=",
"path": "github.com/aws/aws-sdk-go/service/apigateway",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "vywzqp8jtu1rUKkb/4LEld2yOgQ=",
"path": "github.com/aws/aws-sdk-go/service/applicationautoscaling",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "0/2niio3ok72EAFl/s3S/E/yabc=",
"path": "github.com/aws/aws-sdk-go/service/autoscaling",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "rKlCBX8p5aFkljRSWug8chDKOsU=",
"path": "github.com/aws/aws-sdk-go/service/cloudformation",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "FKms6qE/E3ZLLV90G877CrXJwpk=",
"path": "github.com/aws/aws-sdk-go/service/cloudfront",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "YCEne7Ho245VJlaDqxpGaaNnNAs=",
"checksumSHA1": "JkCPEbRbVHODZ8hw8fRRB0ow0+s=",
"path": "github.com/aws/aws-sdk-go/service/cloudtrail",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "ZnIZiTYeRgS2393kOcYxNL0qAUQ=",
"path": "github.com/aws/aws-sdk-go/service/cloudwatch",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "wlq1vQbXSJ4NK6fzlVrPDZwyw8A=",
"path": "github.com/aws/aws-sdk-go/service/cloudwatchevents",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "TMRiIJYbg0/5naYSnYk3DQnaDkk=",
"path": "github.com/aws/aws-sdk-go/service/cloudwatchlogs",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "8T0+kiovp+vGclOMZMajizGsG54=",
"path": "github.com/aws/aws-sdk-go/service/codebuild",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "JKGhzZ6hg3myUEnNndjUyamloN4=",
"path": "github.com/aws/aws-sdk-go/service/codecommit",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "Lzj28Igm2Nazp9iY1qt3nJQ8vv4=",
"path": "github.com/aws/aws-sdk-go/service/codedeploy",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "LXjLQyMAadcANG0UURWuw4di2YE=",
"path": "github.com/aws/aws-sdk-go/service/codepipeline",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "NYRd4lqocAcZdkEvLHAZYyXz8Bs=",
"path": "github.com/aws/aws-sdk-go/service/configservice",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "fcYSy6jPQjLB7mtOfxsMqWnjobU=",
"path": "github.com/aws/aws-sdk-go/service/databasemigrationservice",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "efnIi8bx7cQJ46T9mtzg/SFRqLI=",
"path": "github.com/aws/aws-sdk-go/service/directoryservice",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "D5tbr+FKR8BUU0HxxGB9pS9Dlrc=",
"path": "github.com/aws/aws-sdk-go/service/dynamodb",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "ecCVL8+SptmQlojrGtL8mQdaJ6E=",
"path": "github.com/aws/aws-sdk-go/service/ec2",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "B6qHy1+Rrp9lQCBR/JDRT72kuCI=",
"path": "github.com/aws/aws-sdk-go/service/ecr",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "UFpKfwRxhzQk3pCbBrBa2RsPL24=",
"path": "github.com/aws/aws-sdk-go/service/ecs",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "jTTOfudaj/nYDyLCig9SKlDFFHk=",
"path": "github.com/aws/aws-sdk-go/service/efs",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "5ZYWoEnb0SID/9cKRb1oGPrrhsA=",
"path": "github.com/aws/aws-sdk-go/service/elasticache",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "oVV/BlLfwPI+iycKd9PIQ7oLm/4=",
"path": "github.com/aws/aws-sdk-go/service/elasticbeanstalk",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "yvQhmYq5ZKkKooTgkZ+M6032Vr0=",
"path": "github.com/aws/aws-sdk-go/service/elasticsearchservice",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "M1+iJ/A2Ml8bxSJFrBr/jWsv9w0=",
"path": "github.com/aws/aws-sdk-go/service/elastictranscoder",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "BjzlDfZp1UvDoFfFnkwBxJxtylg=",
"path": "github.com/aws/aws-sdk-go/service/elb",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "42TACCjZnJKGuF4ijfLpKUpw4/I=",
"path": "github.com/aws/aws-sdk-go/service/elbv2",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "x+ykEiXwI53Wm6Ypb4XgFf/6HaI=",
"path": "github.com/aws/aws-sdk-go/service/emr",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "1O87s9AddHMbwCu6ooNULcW9iE8=",
"path": "github.com/aws/aws-sdk-go/service/firehose",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "g5xmBO7nAUGV2yT8SAL2tfP8DUU=",
"path": "github.com/aws/aws-sdk-go/service/glacier",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "7JybKGBdRMLcnHP+126VLsnVghM=",
"path": "github.com/aws/aws-sdk-go/service/iam",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "Bk6ExT97T4NMOyXthMr6Avm34mg=",
"path": "github.com/aws/aws-sdk-go/service/inspector",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "lUmFKbtBQn9S4qrD5GOd57PIU1c=",
"path": "github.com/aws/aws-sdk-go/service/kinesis",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "l1NpLkHXS+eDybfk4Al9Afhyf/4=",
"path": "github.com/aws/aws-sdk-go/service/kms",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "8kUY3AExG/gcAJ2I2a5RCSoxx5I=",
"path": "github.com/aws/aws-sdk-go/service/lambda",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "Ab4YFGFLtEBEIpr8kHkLjB7ydGY=",
"path": "github.com/aws/aws-sdk-go/service/lightsail",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "c3N3uwWuXjwio6NNDAlDr0oUUXk=",
"path": "github.com/aws/aws-sdk-go/service/opsworks",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "M5FOT9uK1HSNypXWS8yVyt2tTWc=",
"checksumSHA1": "jlUKUEyZw9qh+qLaPaRzWS5bxEk=",
"path": "github.com/aws/aws-sdk-go/service/rds",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "09fncNHyk8Tcw9Ailvi0pi9F1Xc=",
"path": "github.com/aws/aws-sdk-go/service/redshift",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "VWVMEqjfDDgB14lgsv0Zq3dQclU=",
"path": "github.com/aws/aws-sdk-go/service/route53",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "eEWM4wKzVbRqAwIy3MdMCDUGs2s=",
"path": "github.com/aws/aws-sdk-go/service/s3",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "4NNi2Ab0iPu/MRGo/kn20mTNxg4=",
"path": "github.com/aws/aws-sdk-go/service/ses",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "KpqdFUB/0gBsouCqZmflQ4YPXB0=",
"path": "github.com/aws/aws-sdk-go/service/sfn",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "cRGam+7Yt9Ys4WQH6TNYg+Fjf20=",
"path": "github.com/aws/aws-sdk-go/service/simpledb",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "3wN8qn+1be7xe/0zXrOM502s+8M=",
"path": "github.com/aws/aws-sdk-go/service/sns",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "pMyhp8ffTMnHDoF+Wu0rcvhVoNE=",
"path": "github.com/aws/aws-sdk-go/service/sqs",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "UEVVPCLpzuLRBIZI7X1A8mIpSuA=",
"path": "github.com/aws/aws-sdk-go/service/ssm",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "Knj17ZMPWkGYTm2hZxEgnuboMM4=",
"path": "github.com/aws/aws-sdk-go/service/sts",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "C99KOCRh6qMcFwKFZy3r8we9NNE=",
"path": "github.com/aws/aws-sdk-go/service/waf",
"revision": "6669bce73b4e3bc922ff5ea3a3983ede26e02b39",
"revisionTime": "2017-02-28T02:59:22Z",
"version": "v1.7.3",
"versionExact": "v1.7.3"
"revision": "fa1a4bc634fffa6ac468d8fb217e05475e063440",
"revisionTime": "2017-03-08T00:44:25Z",
"version": "v1.7.5",
"versionExact": "v1.7.5"
},
{
"checksumSHA1": "nqw2Qn5xUklssHTubS5HDvEL9L4=",
@ -2168,11 +2168,11 @@
"revisionTime": "2016-06-16T18:50:15Z"
},
{
"checksumSHA1": "N4M0qtTBHR2XBxTuPRL9ocdkQns=",
"checksumSHA1": "YhQcOsGx8r2S/jkJ0Qt4cZ5BLCU=",
"comment": "v0.3.0-33-g53d1c0a",
"path": "github.com/jtopjian/cobblerclient",
"revision": "53d1c0a0b003aabfa7ecfa848d856606cb481196",
"revisionTime": "2016-04-01T00:38:02Z"
"revision": "3b306e0a196ac0dc04751ad27130601a4533cce9",
"revisionTime": "2017-03-05T20:21:00Z"
},
{
"checksumSHA1": "sKheT5xw89Tbu2Q071FQO27CVmE=",

View File

@ -1,3 +1,3 @@
source "https://rubygems.org"
gem "middleman-hashicorp", "0.3.4"
gem "middleman-hashicorp", "0.3.12"

View File

@ -1,29 +1,28 @@
GEM
remote: https://rubygems.org/
specs:
activesupport (4.2.7.1)
activesupport (4.2.8)
i18n (~> 0.7)
json (~> 1.7, >= 1.7.7)
minitest (~> 5.1)
thread_safe (~> 0.3, >= 0.3.4)
tzinfo (~> 1.1)
autoprefixer-rails (6.5.1.1)
autoprefixer-rails (6.7.6)
execjs
bootstrap-sass (3.3.7)
autoprefixer-rails (>= 5.2.1)
sass (>= 3.3.4)
builder (3.2.2)
builder (3.2.3)
capybara (2.4.4)
mime-types (>= 1.16)
nokogiri (>= 1.3.3)
rack (>= 1.0.0)
rack-test (>= 0.5.4)
xpath (~> 2.0)
chunky_png (1.3.7)
chunky_png (1.3.8)
coffee-script (2.4.1)
coffee-script-source
execjs
coffee-script-source (1.10.0)
coffee-script-source (1.12.2)
compass (1.0.3)
chunky_png (~> 1.2)
compass-core (~> 1.0.2)
@ -40,9 +39,9 @@ GEM
eventmachine (>= 0.12.9)
http_parser.rb (~> 0.6.0)
erubis (2.7.0)
eventmachine (1.2.0.1)
eventmachine (1.2.3)
execjs (2.7.0)
ffi (1.9.14)
ffi (1.9.18)
haml (4.0.7)
tilt
hike (1.2.3)
@ -50,8 +49,8 @@ GEM
uber (~> 0.0.14)
http_parser.rb (0.6.0)
i18n (0.7.0)
json (1.8.3)
kramdown (1.12.0)
json (2.0.3)
kramdown (1.13.2)
listen (3.0.8)
rb-fsevent (~> 0.9, >= 0.9.4)
rb-inotify (~> 0.9, >= 0.9.7)
@ -78,13 +77,14 @@ GEM
rack (>= 1.4.5, < 2.0)
thor (>= 0.15.2, < 2.0)
tilt (~> 1.4.1, < 2.0)
middleman-hashicorp (0.3.4)
middleman-hashicorp (0.3.12)
bootstrap-sass (~> 3.3)
builder (~> 3.2)
middleman (~> 3.4)
middleman-livereload (~> 3.4)
middleman-syntax (~> 3.0)
redcarpet (~> 3.3)
turbolinks (~> 5.0)
middleman-livereload (3.4.6)
em-websocket (~> 0.5.1)
middleman-core (>= 3.3)
@ -101,9 +101,9 @@ GEM
mime-types-data (~> 3.2015)
mime-types-data (3.2016.0521)
mini_portile2 (2.1.0)
minitest (5.9.1)
minitest (5.10.1)
multi_json (1.12.1)
nokogiri (1.6.8.1)
nokogiri (1.7.0.1)
mini_portile2 (~> 2.1.0)
padrino-helpers (0.12.8.1)
i18n (~> 0.6, >= 0.6.7)
@ -111,17 +111,17 @@ GEM
tilt (~> 1.4.1)
padrino-support (0.12.8.1)
activesupport (>= 3.1)
rack (1.6.4)
rack (1.6.5)
rack-livereload (0.3.16)
rack
rack-test (0.6.3)
rack (>= 1.0)
rb-fsevent (0.9.8)
rb-inotify (0.9.7)
rb-inotify (0.9.8)
ffi (>= 0.5.0)
redcarpet (3.3.4)
rouge (2.0.6)
sass (3.4.22)
redcarpet (3.4.0)
rouge (2.0.7)
sass (3.4.23)
sprockets (2.12.4)
hike (~> 1.2)
multi_json (~> 1.0)
@ -132,9 +132,12 @@ GEM
sprockets-sass (1.3.1)
sprockets (~> 2.0)
tilt (~> 1.1)
thor (0.19.1)
thread_safe (0.3.5)
thor (0.19.4)
thread_safe (0.3.6)
tilt (1.4.1)
turbolinks (5.0.1)
turbolinks-source (~> 5)
turbolinks-source (5.0.0)
tzinfo (1.2.2)
thread_safe (~> 0.1)
uber (0.0.15)
@ -148,7 +151,7 @@ PLATFORMS
ruby
DEPENDENCIES
middleman-hashicorp (= 0.3.4)
middleman-hashicorp (= 0.3.12)
BUNDLED WITH
1.13.6
1.14.6

View File

@ -3,8 +3,8 @@
This license is temporary while a more official one is drafted. However,
this should make it clear:
* The text contents of this website are MPL 2.0 licensed.
The text contents of this website are MPL 2.0 licensed.
* The design contents of this website are proprietary and may not be reproduced
or reused in any way other than to run the Terraform website locally. The license
for the design is owned solely by HashiCorp, Inc.
The design contents of this website are proprietary and may not be reproduced
or reused in any way other than to run the website locally. The license for
the design is owned solely by HashiCorp, Inc.

View File

@ -1,4 +1,4 @@
VERSION?="0.3.4"
VERSION?="0.3.12"
website:
@echo "==> Starting website in Docker..."

View File

@ -1,30 +1,21 @@
# Terraform Website
This subdirectory contains the entire source for the [Terraform Website](https://www.terraform.io/).
This is a [Middleman](http://middlemanapp.com) project, which builds a static
site from these source files.
This subdirectory contains the entire source for the [Terraform
Website][terraform]. This is a [Middleman][middleman] project, which builds a
static site from these source files.
## Contributions Welcome!
If you find a typo or you feel like you can improve the HTML, CSS, or
JavaScript, we welcome contributions. Feel free to open issues or pull
requests like any normal GitHub project, and we'll merge it in.
JavaScript, we welcome contributions. Feel free to open issues or pull requests
like any normal GitHub project, and we'll merge it in.
## Running the Site Locally
To run the site locally, clone this repository and run:
Running the site locally is simple. Clone this repo and run `make website`.
```shell
$ make website
```
Then open up `http://localhost:4567`. Note that some URLs you may need to append
".html" to make them work (in the navigation).
You must have Docker installed for this to work.
Alternatively, you can manually run the website like this:
```shell
$ bundle
$ bundle exec middleman server
```
Then open up `http://localhost:4567`.
[middleman]: https://www.middlemanapp.com
[terraform]: https://www.terraform.io

View File

@ -1,38 +0,0 @@
require "rack"
require "rack/contrib/not_found"
require "rack/contrib/response_headers"
require "rack/contrib/static_cache"
require "rack/contrib/try_static"
require "rack/protection"
# Protect against various bad things
use Rack::Protection::JsonCsrf
use Rack::Protection::RemoteReferrer
use Rack::Protection::HttpOrigin
use Rack::Protection::EscapedParams
use Rack::Protection::XSSHeader
use Rack::Protection::FrameOptions
use Rack::Protection::PathTraversal
use Rack::Protection::IPSpoofing
# Properly compress the output if the client can handle it.
use Rack::Deflater
# Set the "forever expire" cache headers for these static assets. Since
# we hash the contents of the assets to determine filenames, this is safe
# to do.
use Rack::StaticCache,
:root => "build",
:urls => ["/images", "/javascripts", "/stylesheets"],
:duration => 2,
:versioning => false
# Try to find a static file that matches our request, since Middleman
# statically generates everything.
use Rack::TryStatic,
:root => "build",
:urls => ["/"],
:try => [".html", "index.html", "/index.html"]
# 404 if we reached this point. Sad times.
run Rack::NotFound.new(File.expand_path("../build/404.html", __FILE__))

View File

@ -8,7 +8,7 @@
"builders": [
{
"type": "docker",
"image": "hashicorp/middleman-hashicorp:0.3.4",
"image": "hashicorp/middleman-hashicorp:0.3.12",
"discard": "true",
"run_command": ["-d", "-i", "-t", "{{ .Image }}", "/bin/sh"]
}

View File

@ -77,3 +77,7 @@ The following attribute is additionally exported:
* `instance_tenancy` - The allowed tenancy of instances launched into the
selected VPC. May be any of `"default"`, `"dedicated"`, or `"host"`.
* `ipv6_association_id` - The association ID for the IPv6 CIDR block.
* `ipv6_cidr_block` - The IPv6 CIDR block.

View File

@ -66,7 +66,7 @@ resource "aws_iam_policy_attachment" "codebuild_policy_attachment" {
resource "aws_codebuild_project" "foo" {
name = "test-project"
description = "test_codebuild_project"
timeout = "5"
build_timeout = "5"
service_role = "${aws_iam_role.codebuild_role.arn}"
artifacts {
@ -108,7 +108,7 @@ The following arguments are supported:
* `description` - (Optional) A short description of the project.
* `encryption_key` - (Optional) The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build project's build output artifacts.
* `service_role` - (Optional) The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.
* `timeout` - (Optional) How long in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait until timing out any related build that does not get marked as completed. The default is 60 minutes.
* `build_timeout` - (Optional) How long in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait until timing out any related build that does not get marked as completed. The default is 60 minutes.
* `tags` - (Optional) A mapping of tags to assign to the resource.
* `artifacts` - (Required) Information about the project's build output artifacts. Artifact blocks are documented below.
* `environment` - (Required) Information about the project's build environment. Environment blocks are documented below.

View File

@ -54,6 +54,7 @@ The following attributes are exported:
* `arn` - The Amazon Resource Name (ARN) specifying the role.
* `create_date` - The creation date of the IAM role.
* `unique_id` - The stable and unique string identifying the role.
* `name` - The name of the role.
## Example of Using Data Source for Assume Role Policy

View File

@ -102,7 +102,8 @@ The `root_block_device` mapping supports the following:
* `volume_size` - (Optional) The size of the volume in gigabytes.
* `iops` - (Optional) The amount of provisioned
[IOPS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html).
This must be set with a `volume_type` of `"io1"`.
This is only valid for `volume_type` of `"io1"`, and must be specified if
using that type
* `delete_on_termination` - (Optional) Whether the volume should be destroyed
on instance termination (Default: `true`).

View File

@ -44,6 +44,9 @@ The following arguments are supported:
* `platform_update_domain_count` - (Optional) Specifies the number of update domains that are used. Defaults to 5.
* `platform_fault_domain_count` - (Optional) Specifies the number of fault domains that are used. Defaults to 3.
* `managed` - (Optional) Specifies whether the availability set is managed or not. Possible values are `true` (to specify aligned) or `false` (to specify classic). Default is `false`.
* `tags` - (Optional) A mapping of tags to assign to the resource.
## Attributes Reference

View File

@ -3,13 +3,13 @@ layout: "consul"
page_title: "Consul: consul_catalog_entry"
sidebar_current: "docs-consul-resource-catalog-entry"
description: |-
Provides access to Catalog data in Consul. This can be used to define a node or a service. Currently, defining health checks is not supported.
Registers a node or service with the Consul Catalog. Currently, defining health checks is not supported.
---
# consul\_catalog\_entry
Provides access to Catalog data in Consul. This can be used to define a
node or a service. Currently, defining health checks is not supported.
Registers a node or service with the [Consul Catalog](https://www.consul.io/docs/agent/http/catalog.html#catalog_register).
Currently, defining health checks is not supported.
## Example Usage
@ -41,6 +41,11 @@ The following arguments are supported:
* `service` - (Optional) A service to optionally associated with
the node. Supported values are documented below.
* `datacenter` - (Optional) The datacenter to use. This overrides the
datacenter in the provider setup and the agent's default datacenter.
* `token` - (Optional) ACL token.
The `service` block supports the following:
* `address` - (Optional) The address of the service. Defaults to the

View File

@ -62,6 +62,7 @@ The following arguments are supported:
* `must_run` - (Optional, bool) If true, then the Docker container will be
kept running. If false, then as long as the container exists, Terraform
assumes it is successful.
* `capabilities` - (Optional, block) See [Capabilities](#capabilities) below for details.
* `ports` - (Optional, block) See [Ports](#ports) below for details.
* `host` - (Optional, block) See [Extra Hosts](#extra_hosts) below for
details.
@ -82,6 +83,27 @@ The following arguments are supported:
* `destroy_grace_seconds` - (Optional, int) If defined will attempt to stop the container before destroying. Container will be destroyed after `n` seconds or on successful stop.
* `upload` - (Optional, block) See [File Upload](#upload) below for details.
<a id="capabilities"></a>
### Capabilities
`capabilities` is a block within the configuration that allows you to add or drop linux capabilities. For more information about what capabilities you can add and drop please visit the docker run documentation.
* `add` - (Optional, set of strings) list of linux capabilities to add.
* `drop` - (Optional, set of strings) list of linux capabilities to drop.
Example:
```
resource "docker_container" "ubuntu" {
name = "foo"
image = "${docker_image.ubuntu.latest}"
capabilities {
add = ["ALL"]
drop = ["SYS_ADMIN"]
}
}
```
<a id="ports"></a>
### Ports

View File

@ -10,7 +10,7 @@ description: |-
Manages a Global Forwarding Rule within GCE. This binds an ip and port to a target HTTP(s) proxy. For more
information see [the official
documentation](https://cloud.google.com/compute/docs/load-balancing/network/forwarding-rules) and
documentation](https://cloud.google.com/compute/docs/load-balancing/http/global-forwarding-rules) and
[API](https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules).
## Example Usage

View File

@ -1,7 +1,7 @@
---
layout: "ignition"
page_title: "Ignition: ignition_config"
sidebar_current: "docs-ignition-resource-config"
sidebar_current: "docs-ignition-datasource-config"
description: |-
Renders an ignition configuration as JSON
---
@ -13,7 +13,7 @@ Renders an ignition configuration as JSON. It contains all the disks, partition
## Example Usage
```
resource "ignition_config" "example" {
data "ignition_config" "example" {
systemd = [
"${ignition_systemd_unit.example.id}",
]
@ -46,7 +46,7 @@ The following arguments are supported:
The `append` and `replace` blocks supports:
* `source` - (Required) The URL of the config. Supported schemes are http. Note: When using http, it is advisable to use the verification option to ensure the contents havent been modified.
* `verification` - (Optional) The hash of the config, in the form _\<type\>-\<value\>_ where type is sha512.

View File

@ -1,7 +1,7 @@
---
layout: "ignition"
page_title: "Ignition: ignition_disk"
sidebar_current: "docs-ignition-resource-disk"
sidebar_current: "docs-ignition-datasource-disk"
description: |-
Describes the desired state of a systems disk.
---
@ -13,7 +13,7 @@ Describes the desired state of a systems disk.
## Example Usage
```
resource "ignition_disk" "foo" {
data "ignition_disk" "foo" {
device = "/dev/sda"
partition {
start = 2048

View File

@ -1,7 +1,7 @@
---
layout: "ignition"
page_title: "Ignition: ignition_file"
sidebar_current: "docs-ignition-resource-file"
sidebar_current: "docs-ignition-datasource-file"
description: |-
Describes a file to be written in a particular filesystem.
---
@ -16,7 +16,7 @@ File with inline content:
```
resource "ignition_file" "hello" {
data "ignition_file" "hello" {
filesystem = "foo"
path = "/hello.txt"
content {
@ -28,7 +28,7 @@ resource "ignition_file" "hello" {
File with remote content:
```
resource "ignition_file" "hello" {
data "ignition_file" "hello" {
filesystem = "qux"
path = "/hello.txt"
source {

View File

@ -1,19 +1,19 @@
---
layout: "ignition"
page_title: "Ignition: ignition_filesystem"
sidebar_current: "docs-ignition-resource-filesystem"
sidebar_current: "docs-ignition-datasource-filesystem"
description: |-
Describes the desired state of a systems filesystem.
---
# ignition\_filesystem
Describes the desired state of a the systems filesystems to be configured and/or used with the _ignition\_file_ resource.
Describes the desired state of a the systems filesystems to be configured and/or used with the _ignition\_file_ resource.
## Example Usage
```
resource "ignition_filesystem" "foo" {
data "ignition_filesystem" "foo" {
name = "root"
mount {
device = "/dev/disk/by-label/ROOT"

View File

@ -1,7 +1,7 @@
---
layout: "ignition"
page_title: "Ignition: ignition_group"
sidebar_current: "docs-ignition-resource-group"
sidebar_current: "docs-ignition-datasource-group"
description: |-
Describes the desired group additions to the passwd database.
---
@ -13,7 +13,7 @@ Describes the desired group additions to the passwd database.
## Example Usage
```
resource "ignition_group" "foo" {
data "ignition_group" "foo" {
name = "foo"
}
```

Some files were not shown because too many files have changed in this diff Show More