Merge branch 'master' into cloud_router

This commit is contained in:
Paddy 2017-05-03 16:00:13 -07:00 committed by GitHub
commit 8e69d75936
193 changed files with 13165 additions and 1212 deletions

View File

@ -2,7 +2,7 @@ dist: trusty
sudo: false
language: go
go:
- 1.8
- 1.8.x
# add TF_CONSUL_TEST=1 to run consul tests
# they were causing timouts in travis
@ -25,7 +25,7 @@ install:
- bash scripts/gogetcookie.sh
- go get github.com/kardianos/govendor
script:
- make vet vendor-status test
- make vendor-status test vet
- GOOS=windows go build
branches:
only:
@ -39,4 +39,4 @@ notifications:
matrix:
fast_finish: true
allow_failures:
- go: tip
- go: tip

View File

@ -3,16 +3,57 @@
BACKWARDS INCOMPATIBILITIES / NOTES:
* provider/aws: Users of aws_cloudfront_distributions with custom_origins have been broken due to changes in the AWS API requiring `OriginReadTimeout` being set for updates. This has been fixed and will show as a change in terraform plan / apply. [GH-13367]
* provider/aws: Users of China and Gov clouds, cannot use the new tagging of volumes created as part of aws_instances [GH-14055]
FEATURES:
* **New Provider:** `gitlab` [GH-13898]
* **New Provider:** `gitlab` [GH-13898]
* **New Resource:** `aws_emr_security_configuration` [GH-14080]
* **New Resource:** `azurerm_sql_elasticpool` [GH-14099]
* **New Resource:** `google_compute_backend_bucket` [GH-14015]
* **New Resource:** `google_compute_snapshot` [GH-12482]
* **New Resource:** `heroku_app_feature` [GH-14035]
* **New Resource:** `heroku_pipeline` [GH-14078]
* **New Resource:** `heroku_pipeline_coupling` [GH-14078]
* **New Resource:** `vault_auth_backend` [GH-10988]
* **New Data Source:** `aws_efs_file_system` [GH-14041]
IMPROVEMENTS:
* core: `sha512` and `base64sha512` interpolation functions, similar to their `sha256` equivalents. [GH-14100]
* provider/aws: Add support for CustomOrigin timeouts to aws_cloudfront_distribution [GH-13367]
* provider/aws: Add support for IAMDatabaseAuthenticationEnabled [GH-14092]
* provider/aws: aws_dynamodb_table Add support for TimeToLive [GH-14104]
* provider/aws: Add `security_configuration` support to `aws_emr_cluster` [GH-14133]
* provider/aws: Add support for the tenancy placement option in `aws_spot_fleet_request` [GH-14163]
* provider/azurerm: `azurerm_template_deployment` now supports String/Int/Boolean outputs [GH-13670]
* provider/azurerm: Expose the Private IP Address for a Load Balancer, if available [GH-13965]
* provider/dnsimple: Add support for import for dnsimple_records [GH-9130]
* provider/google: Add support for networkIP in compute instance templates [GH-13515]
* provider/google: google_dns_managed_zone is now importable [GH-13824]
* provider/nomad: Add TLS options [GH-13956]
* provider/triton: Add support for reading provider configuration from `TRITON_*` environment variables in addition to `SDC_*`[GH-14000]
* provider/triton: Add `cloud_config` argument to `triton_machine` resources for Linux containers [GH-12840]
* provider/triton: Add `insecure_skip_tls_verify` [GH-14077]
BUG FIXES:
* core: `module` blocks without names are now caught in validation, along with various other block types [GH-14162]
* provider/aws: Update aws_ebs_volume when attached [GH-14005]
* provider/aws: Set aws_instance volume_tags to be Computed [GH-14007]
* provider/aws: Fix issue getting partition for federated users [GH-13992]
* provider/aws: aws_spot_instance_request not forcenew on volume_tags [GH-14046]
* provider/aws: Exclude aws_instance volume tagging for China and Gov Clouds [GH-14055]
* provider/aws: Fix source_dest_check with network_interface [GH-14079]
* provider/aws: Fixes the bug where SNS delivery policy get always recreated [GH-14064]
* provider/digitalocean: Prevent diffs when using IDs of images instead of slugs [GH-13879]
* provider/fastly: Changes setting conditionals to optional [GH-14103]
* provider/google: Ignore certain project services that can't be enabled directly via the api [GH-13730]
* provider/google: Ability to add more than 25 project services [GH-13758]
* provider/google: Fix compute instance panic with bad disk config [GH-14169]
* providers/heroku: Configure buildpacks correctly for both Org Apps and non-org Apps [GH-13990]
* provider/postgres grant role when creating database [GH-11452]
* provisioner/remote-exec: Fix panic from remote_exec provisioner [GH-14134]
## 0.9.4 (26th April 2017)

View File

@ -75,8 +75,8 @@ cover:
# vet runs the Go source code static analysis tool `vet` to find
# any common errors.
vet:
@echo "go vet ."
@go vet $$(go list ./... | grep -v vendor/) ; if [ $$? -eq 1 ]; then \
@echo 'go vet $$(go list ./... | grep -v /terraform/vendor/)'
@go vet $$(go list ./... | grep -v /terraform/vendor/) ; if [ $$? -eq 1 ]; then \
echo ""; \
echo "Vet found suspicious constructs. Please check the reported constructs"; \
echo "and fix them if necessary before submitting the code for review."; \

2
Vagrantfile vendored
View File

@ -5,7 +5,7 @@
VAGRANTFILE_API_VERSION = "2"
# Software version variables
GOVERSION = "1.8"
GOVERSION = "1.8.1"
UBUNTUVERSION = "16.04"
# CPU and RAM can be adjusted depending on your system

View File

@ -54,7 +54,7 @@ func GetAccountInfo(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string)
awsErr, ok := err.(awserr.Error)
// AccessDenied and ValidationError can be raised
// if credentials belong to federated profile, so we ignore these
if !ok || (awsErr.Code() != "AccessDenied" && awsErr.Code() != "ValidationError") {
if !ok || (awsErr.Code() != "AccessDenied" && awsErr.Code() != "ValidationError" && awsErr.Code() != "InvalidClientTokenId") {
return "", "", fmt.Errorf("Failed getting account ID via 'iam:GetUser': %s", err)
}
log.Printf("[DEBUG] Getting account ID via iam:GetUser failed: %s", err)

View File

@ -171,6 +171,20 @@ func (c *AWSClient) DynamoDB() *dynamodb.DynamoDB {
return c.dynamodbconn
}
func (c *AWSClient) IsGovCloud() bool {
if c.region == "us-gov-west-1" {
return true
}
return false
}
func (c *AWSClient) IsChinaCloud() bool {
if c.region == "cn-north-1" {
return true
}
return false
}
// Client configures and returns a fully initialized AWSClient
func (c *Config) Client() (interface{}, error) {
// Get the auth and region. This can fail if keys/regions were not

View File

@ -0,0 +1,113 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/efs"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAwsEfsFileSystem() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsEfsFileSystemRead,
Schema: map[string]*schema.Schema{
"creation_token": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validateMaxLength(64),
},
"file_system_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"performance_mode": {
Type: schema.TypeString,
Computed: true,
},
"tags": tagsSchemaComputed(),
},
}
}
func dataSourceAwsEfsFileSystemRead(d *schema.ResourceData, meta interface{}) error {
efsconn := meta.(*AWSClient).efsconn
describeEfsOpts := &efs.DescribeFileSystemsInput{}
if v, ok := d.GetOk("creation_token"); ok {
describeEfsOpts.CreationToken = aws.String(v.(string))
}
if v, ok := d.GetOk("file_system_id"); ok {
describeEfsOpts.FileSystemId = aws.String(v.(string))
}
describeResp, err := efsconn.DescribeFileSystems(describeEfsOpts)
if err != nil {
return errwrap.Wrapf("Error retrieving EFS: {{err}}", err)
}
if len(describeResp.FileSystems) != 1 {
return fmt.Errorf("Search returned %d results, please revise so only one is returned", len(describeResp.FileSystems))
}
d.SetId(*describeResp.FileSystems[0].FileSystemId)
tags := make([]*efs.Tag, 0)
var marker string
for {
params := &efs.DescribeTagsInput{
FileSystemId: aws.String(d.Id()),
}
if marker != "" {
params.Marker = aws.String(marker)
}
tagsResp, err := efsconn.DescribeTags(params)
if err != nil {
return fmt.Errorf("Error retrieving EC2 tags for EFS file system (%q): %s",
d.Id(), err.Error())
}
for _, tag := range tagsResp.Tags {
tags = append(tags, tag)
}
if tagsResp.NextMarker != nil {
marker = *tagsResp.NextMarker
} else {
break
}
}
err = d.Set("tags", tagsToMapEFS(tags))
if err != nil {
return err
}
var fs *efs.FileSystemDescription
for _, f := range describeResp.FileSystems {
if d.Id() == *f.FileSystemId {
fs = f
break
}
}
if fs == nil {
log.Printf("[WARN] EFS (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
d.Set("creation_token", fs.CreationToken)
d.Set("performance_mode", fs.PerformanceMode)
d.Set("file_system_id", fs.FileSystemId)
return nil
}

View File

@ -0,0 +1,71 @@
package aws
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccDataSourceAwsEfsFileSystem(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccDataSourceAwsEfsFileSystemConfig,
Check: resource.ComposeTestCheckFunc(
testAccDataSourceAwsEfsFileSystemCheck("data.aws_efs_file_system.by_creation_token"),
testAccDataSourceAwsEfsFileSystemCheck("data.aws_efs_file_system.by_id"),
),
},
},
})
}
func testAccDataSourceAwsEfsFileSystemCheck(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("root module has no resource called %s", name)
}
efsRs, ok := s.RootModule().Resources["aws_efs_file_system.test"]
if !ok {
return fmt.Errorf("can't find aws_efs_file_system.test in state")
}
attr := rs.Primary.Attributes
if attr["creation_token"] != efsRs.Primary.Attributes["creation_token"] {
return fmt.Errorf(
"creation_token is %s; want %s",
attr["creation_token"],
efsRs.Primary.Attributes["creation_token"],
)
}
if attr["id"] != efsRs.Primary.Attributes["id"] {
return fmt.Errorf(
"file_system_id is %s; want %s",
attr["id"],
efsRs.Primary.Attributes["id"],
)
}
return nil
}
}
const testAccDataSourceAwsEfsFileSystemConfig = `
resource "aws_efs_file_system" "test" {}
data "aws_efs_file_system" "by_creation_token" {
creation_token = "${aws_efs_file_system.test.creation_token}"
}
data "aws_efs_file_system" "by_id" {
file_system_id = "${aws_efs_file_system.test.id}"
}
`

View File

@ -0,0 +1,28 @@
package aws
import (
"testing"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccAWSEmrSecurityConfiguration_importBasic(t *testing.T) {
resourceName := "aws_emr_security_configuration.foo"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckEmrSecurityConfigurationDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccEmrSecurityConfigurationConfig,
},
resource.TestStep{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}

View File

@ -178,6 +178,7 @@ func Provider() terraform.ResourceProvider {
"aws_ecs_cluster": dataSourceAwsEcsCluster(),
"aws_ecs_container_definition": dataSourceAwsEcsContainerDefinition(),
"aws_ecs_task_definition": dataSourceAwsEcsTaskDefinition(),
"aws_efs_file_system": dataSourceAwsEfsFileSystem(),
"aws_eip": dataSourceAwsEip(),
"aws_elb_hosted_zone_id": dataSourceAwsElbHostedZoneId(),
"aws_elb_service_account": dataSourceAwsElbServiceAccount(),
@ -313,6 +314,7 @@ func Provider() terraform.ResourceProvider {
"aws_elb_attachment": resourceAwsElbAttachment(),
"aws_emr_cluster": resourceAwsEMRCluster(),
"aws_emr_instance_group": resourceAwsEMRInstanceGroup(),
"aws_emr_security_configuration": resourceAwsEMRSecurityConfiguration(),
"aws_flow_log": resourceAwsFlowLog(),
"aws_glacier_vault": resourceAwsGlacierVault(),
"aws_iam_access_key": resourceAwsIamAccessKey(),

View File

@ -335,6 +335,11 @@ func resourceAwsDbInstance() *schema.Resource {
ForceNew: true,
},
"iam_database_authentication_enabled": {
Type: schema.TypeBool,
Optional: true,
},
"tags": tagsSchema(),
},
}
@ -634,6 +639,10 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
opts.KmsKeyId = aws.String(attr.(string))
}
if attr, ok := d.GetOk("iam_database_authentication_enabled"); ok {
opts.EnableIAMDatabaseAuthentication = aws.Bool(attr.(bool))
}
log.Printf("[DEBUG] DB Instance create configuration: %#v", opts)
var err error
err = resource.Retry(5*time.Minute, func() *resource.RetryError {
@ -710,6 +719,7 @@ func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error {
d.Set("multi_az", v.MultiAZ)
d.Set("kms_key_id", v.KmsKeyId)
d.Set("port", v.DbInstancePort)
d.Set("iam_database_authentication_enabled", v.IAMDatabaseAuthenticationEnabled)
if v.DBSubnetGroup != nil {
d.Set("db_subnet_group_name", v.DBSubnetGroup.DBSubnetGroupName)
}
@ -994,6 +1004,11 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
requestUpdate = true
}
if d.HasChange("iam_database_authentication_enabled") {
req.EnableIAMDatabaseAuthentication = aws.Bool(d.Get("iam_database_authentication_enabled").(bool))
requestUpdate = true
}
log.Printf("[DEBUG] Send DB Instance Modification request: %t", requestUpdate)
if requestUpdate {
log.Printf("[DEBUG] DB Instance Modification request: %s", req)

View File

@ -170,6 +170,27 @@ func TestAccAWSDBInstance_optionGroup(t *testing.T) {
})
}
func TestAccAWSDBInstance_iamAuth(t *testing.T) {
var v rds.DBInstance
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSDBInstanceDestroy,
Steps: []resource.TestStep{
{
Config: testAccCheckAWSDBIAMAuth(acctest.RandInt()),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &v),
testAccCheckAWSDBInstanceAttributes(&v),
resource.TestCheckResourceAttr(
"aws_db_instance.bar", "iam_database_authentication_enabled", "true"),
),
},
},
})
}
func TestAccAWSDBInstanceReplica(t *testing.T) {
var s, r rds.DBInstance
@ -773,6 +794,24 @@ resource "aws_db_instance" "bar" {
}`, rName, acctest.RandInt())
}
func testAccCheckAWSDBIAMAuth(n int) string {
return fmt.Sprintf(`
resource "aws_db_instance" "bar" {
identifier = "foobarbaz-test-terraform-%d"
allocated_storage = 10
engine = "mysql"
engine_version = "5.6.34"
instance_class = "db.t2.micro"
name = "baz"
password = "barbarbarbar"
username = "foo"
backup_retention_period = 0
skip_final_snapshot = true
parameter_group_name = "default.mysql5.6"
iam_database_authentication_enabled = true
}`, n)
}
func testAccReplicaInstanceConfig(val int) string {
return fmt.Sprintf(`
resource "aws_db_instance" "bar" {

View File

@ -92,6 +92,23 @@ func resourceAwsDynamoDbTable() *schema.Resource {
return hashcode.String(buf.String())
},
},
"ttl": {
Type: schema.TypeSet,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"attribute_name": {
Type: schema.TypeString,
Required: true,
},
"enabled": {
Type: schema.TypeBool,
Required: true,
},
},
},
},
"local_secondary_index": {
Type: schema.TypeSet,
Optional: true,
@ -296,6 +313,7 @@ func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) er
log.Printf("[DEBUG] Adding StreamSpecifications to the table")
}
_, timeToLiveOk := d.GetOk("ttl")
_, tagsOk := d.GetOk("tags")
attemptCount := 1
@ -326,12 +344,28 @@ func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) er
if err := d.Set("arn", tableArn); err != nil {
return err
}
// Wait, till table is active before imitating any TimeToLive changes
if err := waitForTableToBeActive(d.Id(), meta); err != nil {
log.Printf("[DEBUG] Error waiting for table to be active: %s", err)
return err
}
log.Printf("[DEBUG] Setting DynamoDB TimeToLive on arn: %s", tableArn)
if timeToLiveOk {
if err := updateTimeToLive(d, meta); err != nil {
log.Printf("[DEBUG] Error updating table TimeToLive: %s", err)
return err
}
}
if tagsOk {
log.Printf("[DEBUG] Setting DynamoDB Tags on arn: %s", tableArn)
if err := createTableTags(d, meta); err != nil {
return err
}
}
return resourceAwsDynamoDbTableRead(d, meta)
}
}
@ -587,6 +621,13 @@ func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) er
}
if d.HasChange("ttl") {
if err := updateTimeToLive(d, meta); err != nil {
log.Printf("[DEBUG] Error updating table TimeToLive: %s", err)
return err
}
}
// Update tags
if err := setTagsDynamoDb(dynamodbconn, d); err != nil {
return err
@ -595,6 +636,46 @@ func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) er
return resourceAwsDynamoDbTableRead(d, meta)
}
func updateTimeToLive(d *schema.ResourceData, meta interface{}) error {
dynamodbconn := meta.(*AWSClient).dynamodbconn
if ttl, ok := d.GetOk("ttl"); ok {
timeToLiveSet := ttl.(*schema.Set)
spec := &dynamodb.TimeToLiveSpecification{}
timeToLive := timeToLiveSet.List()[0].(map[string]interface{})
spec.AttributeName = aws.String(timeToLive["attribute_name"].(string))
spec.Enabled = aws.Bool(timeToLive["enabled"].(bool))
req := &dynamodb.UpdateTimeToLiveInput{
TableName: aws.String(d.Id()),
TimeToLiveSpecification: spec,
}
_, err := dynamodbconn.UpdateTimeToLive(req)
if err != nil {
// If ttl was not set within the .tf file before and has now been added we still run this command to update
// But there has been no change so lets continue
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ValidationException" && awsErr.Message() == "TimeToLive is already disabled" {
return nil
}
log.Printf("[DEBUG] Error updating TimeToLive on table: %s", err)
return err
}
log.Printf("[DEBUG] Updated TimeToLive on table")
if err := waitForTimeToLiveUpdateToBeCompleted(d.Id(), timeToLive["enabled"].(bool), meta); err != nil {
return errwrap.Wrapf("Error waiting for Dynamo DB TimeToLive to be updated: {{err}}", err)
}
}
return nil
}
func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) error {
dynamodbconn := meta.(*AWSClient).dynamodbconn
log.Printf("[DEBUG] Loading data for DynamoDB table '%s'", d.Id())
@ -711,6 +792,23 @@ func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) erro
d.Set("arn", table.TableArn)
timeToLiveReq := &dynamodb.DescribeTimeToLiveInput{
TableName: aws.String(d.Id()),
}
timeToLiveOutput, err := dynamodbconn.DescribeTimeToLive(timeToLiveReq)
if err != nil {
return err
}
timeToLive := []interface{}{}
attribute := map[string]*string{
"name": timeToLiveOutput.TimeToLiveDescription.AttributeName,
"type": timeToLiveOutput.TimeToLiveDescription.TimeToLiveStatus,
}
timeToLive = append(timeToLive, attribute)
d.Set("timeToLive", timeToLive)
log.Printf("[DEBUG] Loaded TimeToLive data for DynamoDB table '%s'", d.Id())
tags, err := readTableTags(d, meta)
if err != nil {
return err
@ -910,6 +1008,39 @@ func waitForTableToBeActive(tableName string, meta interface{}) error {
}
func waitForTimeToLiveUpdateToBeCompleted(tableName string, enabled bool, meta interface{}) error {
dynamodbconn := meta.(*AWSClient).dynamodbconn
req := &dynamodb.DescribeTimeToLiveInput{
TableName: aws.String(tableName),
}
stateMatched := false
for stateMatched == false {
result, err := dynamodbconn.DescribeTimeToLive(req)
if err != nil {
return err
}
if enabled {
stateMatched = *result.TimeToLiveDescription.TimeToLiveStatus == dynamodb.TimeToLiveStatusEnabled
} else {
stateMatched = *result.TimeToLiveDescription.TimeToLiveStatus == dynamodb.TimeToLiveStatusDisabled
}
// Wait for a few seconds, this may take a long time...
if !stateMatched {
log.Printf("[DEBUG] Sleeping for 5 seconds before checking TimeToLive state again")
time.Sleep(5 * time.Second)
}
}
log.Printf("[DEBUG] TimeToLive update complete")
return nil
}
func createTableTags(d *schema.ResourceData, meta interface{}) error {
// DynamoDB Table has to be in the ACTIVE state in order to tag the resource
if err := waitForTableToBeActive(d.Id(), meta); err != nil {

View File

@ -110,6 +110,71 @@ func TestAccAWSDynamoDbTable_gsiUpdate(t *testing.T) {
})
}
func TestAccAWSDynamoDbTable_ttl(t *testing.T) {
var conf dynamodb.DescribeTableOutput
rName := acctest.RandomWithPrefix("TerraformTestTable-")
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSDynamoDbTableDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSDynamoDbConfigInitialState(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.basic-dynamodb-table", &conf),
),
},
{
Config: testAccAWSDynamoDbConfigAddTimeToLive(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckDynamoDbTableTimeToLiveWasUpdated("aws_dynamodb_table.basic-dynamodb-table"),
),
},
},
})
}
func testAccCheckDynamoDbTableTimeToLiveWasUpdated(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
log.Printf("[DEBUG] Trying to create initial table state!")
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No DynamoDB table name specified!")
}
conn := testAccProvider.Meta().(*AWSClient).dynamodbconn
params := &dynamodb.DescribeTimeToLiveInput{
TableName: aws.String(rs.Primary.ID),
}
resp, err := conn.DescribeTimeToLive(params)
if err != nil {
return fmt.Errorf("[ERROR] Problem describing time to live for table '%s': %s", rs.Primary.ID, err)
}
ttlDescription := resp.TimeToLiveDescription
log.Printf("[DEBUG] Checking on table %s", rs.Primary.ID)
if *ttlDescription.TimeToLiveStatus != dynamodb.TimeToLiveStatusEnabled {
return fmt.Errorf("TimeToLiveStatus %s, not ENABLED!", *ttlDescription.TimeToLiveStatus)
}
if *ttlDescription.AttributeName != "TestTTL" {
return fmt.Errorf("AttributeName was %s, not TestTTL!", *ttlDescription.AttributeName)
}
return nil
}
}
func TestResourceAWSDynamoDbTableStreamViewType_validation(t *testing.T) {
cases := []struct {
Value string
@ -678,3 +743,55 @@ resource "aws_dynamodb_table" "test" {
}
`, name)
}
func testAccAWSDynamoDbConfigAddTimeToLive(rName string) string {
return fmt.Sprintf(`
resource "aws_dynamodb_table" "basic-dynamodb-table" {
name = "%s"
read_capacity = 10
write_capacity = 20
hash_key = "TestTableHashKey"
range_key = "TestTableRangeKey"
attribute {
name = "TestTableHashKey"
type = "S"
}
attribute {
name = "TestTableRangeKey"
type = "S"
}
attribute {
name = "TestLSIRangeKey"
type = "N"
}
attribute {
name = "TestGSIRangeKey"
type = "S"
}
local_secondary_index {
name = "TestTableLSI"
range_key = "TestLSIRangeKey"
projection_type = "ALL"
}
ttl {
attribute_name = "TestTTL"
enabled = true
}
global_secondary_index {
name = "InitialTestTableGSI"
hash_key = "TestTableHashKey"
range_key = "TestGSIRangeKey"
write_capacity = 10
read_capacity = 10
projection_type = "KEYS_ONLY"
}
}
`, rName)
}

View File

@ -179,7 +179,7 @@ func resourceAWSEbsVolumeUpdate(d *schema.ResourceData, meta interface{}) error
stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "modifying"},
Target: []string{"available"},
Target: []string{"available", "in-use"},
Refresh: volumeStateRefreshFunc(conn, *result.VolumeModification.VolumeId),
Timeout: 5 * time.Minute,
Delay: 10 * time.Second,

View File

@ -30,6 +30,31 @@ func TestAccAWSEBSVolume_basic(t *testing.T) {
})
}
func TestAccAWSEBSVolume_updateAttachedEbsVolume(t *testing.T) {
var v ec2.Volume
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
IDRefreshName: "aws_ebs_volume.test",
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccAwsEbsAttachedVolumeConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckVolumeExists("aws_ebs_volume.test", &v),
resource.TestCheckResourceAttr("aws_ebs_volume.test", "size", "10"),
),
},
{
Config: testAccAwsEbsAttachedVolumeConfigUpdateSize,
Check: resource.ComposeTestCheckFunc(
testAccCheckVolumeExists("aws_ebs_volume.test", &v),
resource.TestCheckResourceAttr("aws_ebs_volume.test", "size", "20"),
),
},
},
})
}
func TestAccAWSEBSVolume_updateSize(t *testing.T) {
var v ec2.Volume
resource.Test(t, resource.TestCase{
@ -200,6 +225,124 @@ resource "aws_ebs_volume" "test" {
}
`
const testAccAwsEbsAttachedVolumeConfig = `
data "aws_ami" "debian_jessie_latest" {
most_recent = true
filter {
name = "name"
values = ["debian-jessie-*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
filter {
name = "architecture"
values = ["x86_64"]
}
filter {
name = "root-device-type"
values = ["ebs"]
}
owners = ["379101102735"] # Debian
}
resource "aws_instance" "test" {
ami = "${data.aws_ami.debian_jessie_latest.id}"
associate_public_ip_address = true
count = 1
instance_type = "t2.medium"
root_block_device {
volume_size = "10"
volume_type = "standard"
delete_on_termination = true
}
tags {
Name = "test-terraform"
}
}
resource "aws_ebs_volume" "test" {
depends_on = ["aws_instance.test"]
availability_zone = "${aws_instance.test.availability_zone}"
type = "gp2"
size = "10"
}
resource "aws_volume_attachment" "test" {
depends_on = ["aws_ebs_volume.test"]
device_name = "/dev/xvdg"
volume_id = "${aws_ebs_volume.test.id}"
instance_id = "${aws_instance.test.id}"
}
`
const testAccAwsEbsAttachedVolumeConfigUpdateSize = `
data "aws_ami" "debian_jessie_latest" {
most_recent = true
filter {
name = "name"
values = ["debian-jessie-*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
filter {
name = "architecture"
values = ["x86_64"]
}
filter {
name = "root-device-type"
values = ["ebs"]
}
owners = ["379101102735"] # Debian
}
resource "aws_instance" "test" {
ami = "${data.aws_ami.debian_jessie_latest.id}"
associate_public_ip_address = true
count = 1
instance_type = "t2.medium"
root_block_device {
volume_size = "10"
volume_type = "standard"
delete_on_termination = true
}
tags {
Name = "test-terraform"
}
}
resource "aws_ebs_volume" "test" {
depends_on = ["aws_instance.test"]
availability_zone = "${aws_instance.test.availability_zone}"
type = "gp2"
size = "20"
}
resource "aws_volume_attachment" "test" {
depends_on = ["aws_ebs_volume.test"]
device_name = "/dev/xvdg"
volume_id = "${aws_ebs_volume.test.id}"
instance_id = "${aws_instance.test.id}"
}
`
const testAccAwsEbsVolumeConfigUpdateSize = `
resource "aws_ebs_volume" "test" {
availability_zone = "us-west-2a"

View File

@ -157,6 +157,11 @@ func resourceAwsEMRCluster() *schema.Resource {
ForceNew: true,
Required: true,
},
"security_configuration": {
Type: schema.TypeString,
ForceNew: true,
Optional: true,
},
"autoscaling_role": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
@ -268,6 +273,10 @@ func resourceAwsEMRClusterCreate(d *schema.ResourceData, meta interface{}) error
params.AutoScalingRole = aws.String(v.(string))
}
if v, ok := d.GetOk("security_configuration"); ok {
params.SecurityConfiguration = aws.String(v.(string))
}
if instanceProfile != "" {
params.JobFlowRole = aws.String(instanceProfile)
}
@ -361,6 +370,7 @@ func resourceAwsEMRClusterRead(d *schema.ResourceData, meta interface{}) error {
d.Set("name", cluster.Name)
d.Set("service_role", cluster.ServiceRole)
d.Set("security_configuration", cluster.SecurityConfiguration)
d.Set("autoscaling_role", cluster.AutoScalingRole)
d.Set("release_label", cluster.ReleaseLabel)
d.Set("log_uri", cluster.LogUri)

View File

@ -30,6 +30,22 @@ func TestAccAWSEMRCluster_basic(t *testing.T) {
})
}
func TestAccAWSEMRCluster_security_config(t *testing.T) {
var cluster emr.Cluster
r := acctest.RandInt()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSEmrDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSEmrClusterConfig_SecurityConfiguration(r),
Check: testAccCheckAWSEmrClusterExists("aws_emr_cluster.tf-test-cluster", &cluster),
},
},
})
}
func TestAccAWSEMRCluster_bootstrap_ordering(t *testing.T) {
var cluster emr.Cluster
rName := acctest.RandomWithPrefix("tf-emr-bootstrap")
@ -881,6 +897,356 @@ resource "aws_iam_role_policy_attachment" "emr-autoscaling-role" {
`, r, r, r, r, r, r, r, r, r, r)
}
func testAccAWSEmrClusterConfig_SecurityConfiguration(r int) string {
return fmt.Sprintf(`
provider "aws" {
region = "us-west-2"
}
resource "aws_emr_cluster" "tf-test-cluster" {
name = "emr-test-%d"
release_label = "emr-5.5.0"
applications = ["Spark"]
ec2_attributes {
subnet_id = "${aws_subnet.main.id}"
emr_managed_master_security_group = "${aws_security_group.allow_all.id}"
emr_managed_slave_security_group = "${aws_security_group.allow_all.id}"
instance_profile = "${aws_iam_instance_profile.emr_profile.arn}"
}
master_instance_type = "m3.xlarge"
core_instance_type = "m3.xlarge"
core_instance_count = 1
security_configuration = "${aws_emr_security_configuration.foo.name}"
tags {
role = "rolename"
dns_zone = "env_zone"
env = "env"
name = "name-env"
}
keep_job_flow_alive_when_no_steps = true
termination_protection = false
bootstrap_action {
path = "s3://elasticmapreduce/bootstrap-actions/run-if"
name = "runif"
args = ["instance.isMaster=true", "echo running on master node"]
}
configurations = "test-fixtures/emr_configurations.json"
depends_on = ["aws_main_route_table_association.a"]
service_role = "${aws_iam_role.iam_emr_default_role.arn}"
autoscaling_role = "${aws_iam_role.emr-autoscaling-role.arn}"
}
resource "aws_security_group" "allow_all" {
name = "allow_all_%d"
description = "Allow all inbound traffic"
vpc_id = "${aws_vpc.main.id}"
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
depends_on = ["aws_subnet.main"]
lifecycle {
ignore_changes = ["ingress", "egress"]
}
tags {
name = "emr_test"
}
}
resource "aws_vpc" "main" {
cidr_block = "168.31.0.0/16"
enable_dns_hostnames = true
tags {
name = "emr_test_%d"
}
}
resource "aws_subnet" "main" {
vpc_id = "${aws_vpc.main.id}"
cidr_block = "168.31.0.0/20"
tags {
name = "emr_test_%d"
}
}
resource "aws_internet_gateway" "gw" {
vpc_id = "${aws_vpc.main.id}"
}
resource "aws_route_table" "r" {
vpc_id = "${aws_vpc.main.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.gw.id}"
}
}
resource "aws_main_route_table_association" "a" {
vpc_id = "${aws_vpc.main.id}"
route_table_id = "${aws_route_table.r.id}"
}
###
# IAM things
###
# IAM role for EMR Service
resource "aws_iam_role" "iam_emr_default_role" {
name = "iam_emr_default_role_%d"
assume_role_policy = <<EOT
{
"Version": "2008-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "elasticmapreduce.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
EOT
}
resource "aws_iam_role_policy_attachment" "service-attach" {
role = "${aws_iam_role.iam_emr_default_role.id}"
policy_arn = "${aws_iam_policy.iam_emr_default_policy.arn}"
}
resource "aws_iam_policy" "iam_emr_default_policy" {
name = "iam_emr_default_policy_%d"
policy = <<EOT
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Resource": "*",
"Action": [
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CancelSpotInstanceRequests",
"ec2:CreateNetworkInterface",
"ec2:CreateSecurityGroup",
"ec2:CreateTags",
"ec2:DeleteNetworkInterface",
"ec2:DeleteSecurityGroup",
"ec2:DeleteTags",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeAccountAttributes",
"ec2:DescribeDhcpOptions",
"ec2:DescribeInstanceStatus",
"ec2:DescribeInstances",
"ec2:DescribeKeyPairs",
"ec2:DescribeNetworkAcls",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribePrefixLists",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSpotInstanceRequests",
"ec2:DescribeSpotPriceHistory",
"ec2:DescribeSubnets",
"ec2:DescribeVpcAttribute",
"ec2:DescribeVpcEndpoints",
"ec2:DescribeVpcEndpointServices",
"ec2:DescribeVpcs",
"ec2:DetachNetworkInterface",
"ec2:ModifyImageAttribute",
"ec2:ModifyInstanceAttribute",
"ec2:RequestSpotInstances",
"ec2:RevokeSecurityGroupEgress",
"ec2:RunInstances",
"ec2:TerminateInstances",
"ec2:DeleteVolume",
"ec2:DescribeVolumeStatus",
"ec2:DescribeVolumes",
"ec2:DetachVolume",
"iam:GetRole",
"iam:GetRolePolicy",
"iam:ListInstanceProfiles",
"iam:ListRolePolicies",
"iam:PassRole",
"s3:CreateBucket",
"s3:Get*",
"s3:List*",
"sdb:BatchPutAttributes",
"sdb:Select",
"sqs:CreateQueue",
"sqs:Delete*",
"sqs:GetQueue*",
"sqs:PurgeQueue",
"sqs:ReceiveMessage"
]
}]
}
EOT
}
# IAM Role for EC2 Instance Profile
resource "aws_iam_role" "iam_emr_profile_role" {
name = "iam_emr_profile_role_%d"
assume_role_policy = <<EOT
{
"Version": "2008-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
EOT
}
resource "aws_iam_instance_profile" "emr_profile" {
name = "emr_profile_%d"
roles = ["${aws_iam_role.iam_emr_profile_role.name}"]
}
resource "aws_iam_role_policy_attachment" "profile-attach" {
role = "${aws_iam_role.iam_emr_profile_role.id}"
policy_arn = "${aws_iam_policy.iam_emr_profile_policy.arn}"
}
resource "aws_iam_policy" "iam_emr_profile_policy" {
name = "iam_emr_profile_policy_%d"
policy = <<EOT
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Resource": "*",
"Action": [
"cloudwatch:*",
"dynamodb:*",
"ec2:Describe*",
"elasticmapreduce:Describe*",
"elasticmapreduce:ListBootstrapActions",
"elasticmapreduce:ListClusters",
"elasticmapreduce:ListInstanceGroups",
"elasticmapreduce:ListInstances",
"elasticmapreduce:ListSteps",
"kinesis:CreateStream",
"kinesis:DeleteStream",
"kinesis:DescribeStream",
"kinesis:GetRecords",
"kinesis:GetShardIterator",
"kinesis:MergeShards",
"kinesis:PutRecord",
"kinesis:SplitShard",
"rds:Describe*",
"s3:*",
"sdb:*",
"sns:*",
"sqs:*"
]
}]
}
EOT
}
# IAM Role for autoscaling
resource "aws_iam_role" "emr-autoscaling-role" {
name = "EMR_AutoScaling_DefaultRole_%d"
assume_role_policy = "${data.aws_iam_policy_document.emr-autoscaling-role-policy.json}"
}
data "aws_iam_policy_document" "emr-autoscaling-role-policy" {
statement {
effect = "Allow"
actions = ["sts:AssumeRole"]
principals = {
type = "Service"
identifiers = ["elasticmapreduce.amazonaws.com","application-autoscaling.amazonaws.com"]
}
}
}
resource "aws_iam_role_policy_attachment" "emr-autoscaling-role" {
role = "${aws_iam_role.emr-autoscaling-role.name}"
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforAutoScalingRole"
}
resource "aws_emr_security_configuration" "foo" {
configuration = <<EOF
{
"EncryptionConfiguration": {
"AtRestEncryptionConfiguration": {
"S3EncryptionConfiguration": {
"EncryptionMode": "SSE-S3"
},
"LocalDiskEncryptionConfiguration": {
"EncryptionKeyProviderType": "AwsKms",
"AwsKmsKey": "${aws_kms_key.foo.arn}"
}
},
"EnableInTransitEncryption": false,
"EnableAtRestEncryption": true
}
}
EOF
}
resource "aws_kms_key" "foo" {
description = "Terraform acc test %d"
deletion_window_in_days = 7
policy = <<POLICY
{
"Version": "2012-10-17",
"Id": "kms-tf-1",
"Statement": [
{
"Sid": "Enable IAM User Permissions",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "kms:*",
"Resource": "*"
}
]
}
POLICY
}
`, r, r, r, r, r, r, r, r, r, r, r)
}
func testAccAWSEmrClusterConfigTerminationPolicyUpdated(r int) string {
return fmt.Sprintf(`
provider "aws" {

View File

@ -0,0 +1,132 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/emr"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsEMRSecurityConfiguration() *schema.Resource {
return &schema.Resource{
Create: resourceAwsEmrSecurityConfigurationCreate,
Read: resourceAwsEmrSecurityConfigurationRead,
Delete: resourceAwsEmrSecurityConfigurationDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ConflictsWith: []string{"name_prefix"},
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if len(value) > 10280 {
errors = append(errors, fmt.Errorf(
"%q cannot be longer than 10280 characters", k))
}
return
},
},
"name_prefix": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if len(value) > 10000 {
errors = append(errors, fmt.Errorf(
"%q cannot be longer than 10000 characters, name is limited to 10280", k))
}
return
},
},
"configuration": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateJsonString,
},
"creation_date": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceAwsEmrSecurityConfigurationCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).emrconn
var emrSCName string
if v, ok := d.GetOk("name"); ok {
emrSCName = v.(string)
} else {
if v, ok := d.GetOk("name_prefix"); ok {
emrSCName = resource.PrefixedUniqueId(v.(string))
} else {
emrSCName = resource.PrefixedUniqueId("tf-emr-sc-")
}
}
resp, err := conn.CreateSecurityConfiguration(&emr.CreateSecurityConfigurationInput{
Name: aws.String(emrSCName),
SecurityConfiguration: aws.String(d.Get("configuration").(string)),
})
if err != nil {
return err
}
d.SetId(*resp.Name)
return resourceAwsEmrSecurityConfigurationRead(d, meta)
}
func resourceAwsEmrSecurityConfigurationRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).emrconn
resp, err := conn.DescribeSecurityConfiguration(&emr.DescribeSecurityConfigurationInput{
Name: aws.String(d.Id()),
})
if err != nil {
if isAWSErr(err, "InvalidRequestException", "does not exist") {
log.Printf("[WARN] EMR Security Configuraiton (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}
return err
}
d.Set("creation_date", resp.CreationDateTime)
d.Set("name", resp.Name)
d.Set("configuration", resp.SecurityConfiguration)
return nil
}
func resourceAwsEmrSecurityConfigurationDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).emrconn
_, err := conn.DeleteSecurityConfiguration(&emr.DeleteSecurityConfigurationInput{
Name: aws.String(d.Id()),
})
if err != nil {
if isAWSErr(err, "InvalidRequestException", "does not exist") {
d.SetId("")
return nil
}
return err
}
d.SetId("")
return nil
}

View File

@ -0,0 +1,111 @@
package aws
import (
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/emr"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSEmrSecurityConfiguration_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckEmrSecurityConfigurationDestroy,
Steps: []resource.TestStep{
{
Config: testAccEmrSecurityConfigurationConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckEmrSecurityConfigurationExists("aws_emr_security_configuration.foo"),
),
},
},
})
}
func testAccCheckEmrSecurityConfigurationDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).emrconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_emr_security_configuration" {
continue
}
// Try to find the Security Configuration
resp, err := conn.DescribeSecurityConfiguration(&emr.DescribeSecurityConfigurationInput{
Name: aws.String(rs.Primary.ID),
})
if err == nil {
if resp.Name != nil && *resp.Name == rs.Primary.ID {
// assume this means the resource still exists
return fmt.Errorf("Error: EMR Security Configuration still exists: %s", *resp.Name)
}
return nil
}
// Verify the error is what we want
if err != nil {
if isAWSErr(err, "InvalidRequestException", "does not exist") {
return nil
}
return err
}
}
return nil
}
func testAccCheckEmrSecurityConfigurationExists(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No EMR Security Configuration ID is set")
}
conn := testAccProvider.Meta().(*AWSClient).emrconn
resp, err := conn.DescribeSecurityConfiguration(&emr.DescribeSecurityConfigurationInput{
Name: aws.String(rs.Primary.ID),
})
if err != nil {
return err
}
if resp.Name == nil {
return fmt.Errorf("EMR Security Configuration had nil name which shouldn't happen")
}
if *resp.Name != rs.Primary.ID {
return fmt.Errorf("EMR Security Configuration name mismatch, got (%s), expected (%s)", *resp.Name, rs.Primary.ID)
}
return nil
}
}
const testAccEmrSecurityConfigurationConfig = `
resource "aws_emr_security_configuration" "foo" {
configuration = <<EOF
{
"EncryptionConfiguration": {
"AtRestEncryptionConfiguration": {
"S3EncryptionConfiguration": {
"EncryptionMode": "SSE-S3"
},
"LocalDiskEncryptionConfiguration": {
"EncryptionKeyProviderType": "AwsKms",
"AwsKmsKey": "arn:aws:kms:us-west-2:187416307283:alias/tf_emr_test_key"
}
},
"EnableInTransitEncryption": false,
"EnableAtRestEncryption": true
}
}
EOF
}
`

View File

@ -90,6 +90,11 @@ func resourceAwsInstance() *schema.Resource {
Type: schema.TypeBool,
Optional: true,
Default: true,
DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool {
// Suppress diff if network_interface is set
_, ok := d.GetOk("network_interface")
return ok
},
},
"user_data": {
@ -234,7 +239,7 @@ func resourceAwsInstance() *schema.Resource {
"tags": tagsSchema(),
"volume_tags": tagsSchema(),
"volume_tags": tagsSchemaComputed(),
"block_device": {
Type: schema.TypeMap,
@ -432,32 +437,35 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error {
runOpts.Ipv6Addresses = ipv6Addresses
}
tagsSpec := make([]*ec2.TagSpecification, 0)
restricted := meta.(*AWSClient).IsGovCloud() || meta.(*AWSClient).IsChinaCloud()
if !restricted {
tagsSpec := make([]*ec2.TagSpecification, 0)
if v, ok := d.GetOk("tags"); ok {
tags := tagsFromMap(v.(map[string]interface{}))
if v, ok := d.GetOk("tags"); ok {
tags := tagsFromMap(v.(map[string]interface{}))
spec := &ec2.TagSpecification{
ResourceType: aws.String("instance"),
Tags: tags,
spec := &ec2.TagSpecification{
ResourceType: aws.String("instance"),
Tags: tags,
}
tagsSpec = append(tagsSpec, spec)
}
tagsSpec = append(tagsSpec, spec)
}
if v, ok := d.GetOk("volume_tags"); ok {
tags := tagsFromMap(v.(map[string]interface{}))
if v, ok := d.GetOk("volume_tags"); ok {
tags := tagsFromMap(v.(map[string]interface{}))
spec := &ec2.TagSpecification{
ResourceType: aws.String("volume"),
Tags: tags,
}
spec := &ec2.TagSpecification{
ResourceType: aws.String("volume"),
Tags: tags,
tagsSpec = append(tagsSpec, spec)
}
tagsSpec = append(tagsSpec, spec)
}
if len(tagsSpec) > 0 {
runOpts.TagSpecifications = tagsSpec
if len(tagsSpec) > 0 {
runOpts.TagSpecifications = tagsSpec
}
}
// Create the instance
@ -639,6 +647,7 @@ func resourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error {
d.Set("primary_network_interface_id", primaryNetworkInterface.NetworkInterfaceId)
d.Set("associate_public_ip_address", primaryNetworkInterface.Association != nil)
d.Set("ipv6_address_count", len(primaryNetworkInterface.Ipv6Addresses))
d.Set("source_dest_check", *primaryNetworkInterface.SourceDestCheck)
for _, address := range primaryNetworkInterface.Ipv6Addresses {
ipv6Addresses = append(ipv6Addresses, *address.Ipv6Address)
@ -713,19 +722,24 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
d.Partial(true)
if d.HasChange("tags") && !d.IsNewResource() {
if err := setTags(conn, d); err != nil {
return err
} else {
d.SetPartial("tags")
restricted := meta.(*AWSClient).IsGovCloud() || meta.(*AWSClient).IsChinaCloud()
if d.HasChange("tags") {
if !d.IsNewResource() || !restricted {
if err := setTags(conn, d); err != nil {
return err
} else {
d.SetPartial("tags")
}
}
}
if d.HasChange("volume_tags") && !d.IsNewResource() {
if err := setVolumeTags(conn, d); err != nil {
return err
} else {
d.SetPartial("volume_tags")
if d.HasChange("volume_tags") {
if !d.IsNewResource() || !restricted {
if err := setVolumeTags(conn, d); err != nil {
return err
} else {
d.SetPartial("volume_tags")
}
}
}

View File

@ -678,6 +678,25 @@ func TestAccAWSInstance_volumeTags(t *testing.T) {
})
}
func TestAccAWSInstance_volumeTagsComputed(t *testing.T) {
var v ec2.Instance
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckInstanceDestroy,
Steps: []resource.TestStep{
{
Config: testAccCheckInstanceConfigWithAttachedVolume,
Check: resource.ComposeTestCheckFunc(
testAccCheckInstanceExists("aws_instance.foo", &v),
),
ExpectNonEmptyPlan: false,
},
},
})
}
func TestAccAWSInstance_instanceProfileChange(t *testing.T) {
var v ec2.Instance
rName := acctest.RandString(5)
@ -947,6 +966,27 @@ func TestAccAWSInstance_primaryNetworkInterface(t *testing.T) {
})
}
func TestAccAWSInstance_primaryNetworkInterfaceSourceDestCheck(t *testing.T) {
var instance ec2.Instance
var ini ec2.NetworkInterface
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckInstanceDestroy,
Steps: []resource.TestStep{
{
Config: testAccInstanceConfigPrimaryNetworkInterfaceSourceDestCheck,
Check: resource.ComposeTestCheckFunc(
testAccCheckInstanceExists("aws_instance.foo", &instance),
testAccCheckAWSENIExists("aws_network_interface.bar", &ini),
resource.TestCheckResourceAttr("aws_instance.foo", "source_dest_check", "false"),
),
},
},
})
}
func TestAccAWSInstance_addSecondaryInterface(t *testing.T) {
var before ec2.Instance
var after ec2.Instance
@ -1382,6 +1422,69 @@ resource "aws_instance" "foo" {
}
`
const testAccCheckInstanceConfigWithAttachedVolume = `
data "aws_ami" "debian_jessie_latest" {
most_recent = true
filter {
name = "name"
values = ["debian-jessie-*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
filter {
name = "architecture"
values = ["x86_64"]
}
filter {
name = "root-device-type"
values = ["ebs"]
}
owners = ["379101102735"] # Debian
}
resource "aws_instance" "foo" {
ami = "${data.aws_ami.debian_jessie_latest.id}"
associate_public_ip_address = true
count = 1
instance_type = "t2.medium"
root_block_device {
volume_size = "10"
volume_type = "standard"
delete_on_termination = true
}
tags {
Name = "test-terraform"
}
}
resource "aws_ebs_volume" "test" {
depends_on = ["aws_instance.foo"]
availability_zone = "${aws_instance.foo.availability_zone}"
type = "gp2"
size = "10"
tags {
Name = "test-terraform"
}
}
resource "aws_volume_attachment" "test" {
depends_on = ["aws_ebs_volume.test"]
device_name = "/dev/xvdg"
volume_id = "${aws_ebs_volume.test.id}"
instance_id = "${aws_instance.foo.id}"
}
`
const testAccCheckInstanceConfigNoVolumeTags = `
resource "aws_instance" "foo" {
ami = "ami-55a7ea65"
@ -1784,6 +1887,42 @@ resource "aws_instance" "foo" {
}
`
const testAccInstanceConfigPrimaryNetworkInterfaceSourceDestCheck = `
resource "aws_vpc" "foo" {
cidr_block = "172.16.0.0/16"
tags {
Name = "tf-instance-test"
}
}
resource "aws_subnet" "foo" {
vpc_id = "${aws_vpc.foo.id}"
cidr_block = "172.16.10.0/24"
availability_zone = "us-west-2a"
tags {
Name = "tf-instance-test"
}
}
resource "aws_network_interface" "bar" {
subnet_id = "${aws_subnet.foo.id}"
private_ips = ["172.16.10.100"]
source_dest_check = false
tags {
Name = "primary_network_interface"
}
}
resource "aws_instance" "foo" {
ami = "ami-22b9a343"
instance_type = "t2.micro"
network_interface {
network_interface_id = "${aws_network_interface.bar.id}"
device_index = 0
}
}
`
const testAccInstanceConfigAddSecondaryNetworkInterfaceBefore = `
resource "aws_vpc" "foo" {
cidr_block = "172.16.0.0/16"

View File

@ -320,19 +320,33 @@ func updateKmsKeyStatus(conn *kms.KMS, id string, shouldBeEnabled bool) error {
}
func updateKmsKeyRotationStatus(conn *kms.KMS, d *schema.ResourceData) error {
var err error
shouldEnableRotation := d.Get("enable_key_rotation").(bool)
if shouldEnableRotation {
log.Printf("[DEBUG] Enabling key rotation for KMS key %q", d.Id())
_, err = conn.EnableKeyRotation(&kms.EnableKeyRotationInput{
KeyId: aws.String(d.Id()),
})
} else {
log.Printf("[DEBUG] Disabling key rotation for KMS key %q", d.Id())
_, err = conn.DisableKeyRotation(&kms.DisableKeyRotationInput{
KeyId: aws.String(d.Id()),
})
}
err := resource.Retry(5*time.Minute, func() *resource.RetryError {
var err error
if shouldEnableRotation {
log.Printf("[DEBUG] Enabling key rotation for KMS key %q", d.Id())
_, err = conn.EnableKeyRotation(&kms.EnableKeyRotationInput{
KeyId: aws.String(d.Id()),
})
} else {
log.Printf("[DEBUG] Disabling key rotation for KMS key %q", d.Id())
_, err = conn.DisableKeyRotation(&kms.DisableKeyRotationInput{
KeyId: aws.String(d.Id()),
})
}
if err != nil {
awsErr, ok := err.(awserr.Error)
if ok && awsErr.Code() == "DisabledException" {
return resource.RetryableError(err)
}
return resource.NonRetryableError(err)
}
return nil
})
if err != nil {
return fmt.Errorf("Failed to set key rotation for %q to %t: %q",

View File

@ -216,6 +216,11 @@ func resourceAwsRDSCluster() *schema.Resource {
Optional: true,
},
"iam_database_authentication_enabled": {
Type: schema.TypeBool,
Optional: true,
},
"tags": tagsSchema(),
},
}
@ -428,6 +433,10 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error
createOpts.KmsKeyId = aws.String(attr.(string))
}
if attr, ok := d.GetOk("iam_database_authentication_enabled"); ok {
createOpts.EnableIAMDatabaseAuthentication = aws.Bool(attr.(bool))
}
log.Printf("[DEBUG] RDS Cluster create options: %s", createOpts)
resp, err := conn.CreateDBCluster(createOpts)
if err != nil {
@ -520,6 +529,7 @@ func resourceAwsRDSClusterRead(d *schema.ResourceData, meta interface{}) error {
d.Set("kms_key_id", dbc.KmsKeyId)
d.Set("reader_endpoint", dbc.ReaderEndpoint)
d.Set("replication_source_identifier", dbc.ReplicationSourceIdentifier)
d.Set("iam_database_authentication_enabled", dbc.IAMDatabaseAuthenticationEnabled)
var vpcg []string
for _, g := range dbc.VpcSecurityGroups {
@ -594,6 +604,11 @@ func resourceAwsRDSClusterUpdate(d *schema.ResourceData, meta interface{}) error
requestUpdate = true
}
if d.HasChange("iam_database_authentication_enabled") {
req.EnableIAMDatabaseAuthentication = aws.Bool(d.Get("iam_database_authentication_enabled").(bool))
requestUpdate = true
}
if requestUpdate {
_, err := conn.ModifyDBCluster(req)
if err != nil {

View File

@ -225,6 +225,26 @@ func TestAccAWSRDSCluster_backupsUpdate(t *testing.T) {
})
}
func TestAccAWSRDSCluster_iamAuth(t *testing.T) {
var v rds.DBCluster
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSClusterConfig_iamAuth(acctest.RandInt()),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSClusterExists("aws_rds_cluster.default", &v),
resource.TestCheckResourceAttr(
"aws_rds_cluster.default", "iam_database_authentication_enabled", "true"),
),
},
},
})
}
func testAccCheckAWSClusterDestroy(s *terraform.State) error {
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_rds_cluster" {
@ -550,3 +570,16 @@ resource "aws_rds_cluster" "default" {
skip_final_snapshot = true
}`, n)
}
func testAccAWSClusterConfig_iamAuth(n int) string {
return fmt.Sprintf(`
resource "aws_rds_cluster" "default" {
cluster_identifier = "tf-aurora-cluster-%d"
availability_zones = ["us-west-2a","us-west-2b","us-west-2c"]
database_name = "mydb"
master_username = "foo"
master_password = "mustbeeightcharaters"
iam_database_authentication_enabled = true
skip_final_snapshot = true
}`, n)
}

View File

@ -484,7 +484,7 @@ func resourceAwsRoute53RecordRead(d *schema.ResourceData, meta interface{}) erro
}
}
err = d.Set("records", flattenResourceRecords(record.ResourceRecords))
err = d.Set("records", flattenResourceRecords(record.ResourceRecords, *record.Type))
if err != nil {
return fmt.Errorf("[DEBUG] Error setting records for: %s, error: %#v", d.Id(), err)
}

View File

@ -55,9 +55,15 @@ func resourceAwsSnsTopic() *schema.Resource {
},
},
"delivery_policy": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: false,
Type: schema.TypeString,
Optional: true,
ForceNew: false,
ValidateFunc: validateJsonString,
DiffSuppressFunc: suppressEquivalentJsonDiffs,
StateFunc: func(v interface{}) string {
json, _ := normalizeJsonString(v)
return json
},
},
"arn": &schema.Schema{
Type: schema.TypeString,

View File

@ -67,6 +67,25 @@ func TestAccAWSSNSTopic_withIAMRole(t *testing.T) {
})
}
func TestAccAWSSNSTopic_withDeliveryPolicy(t *testing.T) {
expectedPolicy := `{"http":{"defaultHealthyRetryPolicy": {"minDelayTarget": 20,"maxDelayTarget": 20,"numMaxDelayRetries": 0,"numRetries": 3,"numNoDelayRetries": 0,"numMinDelayRetries": 0,"backoffFunction": "linear"},"disableSubscriptionOverrides": false}}`
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
IDRefreshName: "aws_sns_topic.test_topic",
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSSNSTopicDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSSNSTopicConfig_withDeliveryPolicy,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSSNSTopicExists("aws_sns_topic.test_topic"),
testAccCheckAWSNSTopicHasDeliveryPolicy("aws_sns_topic.test_topic", expectedPolicy),
),
},
},
})
}
func testAccCheckAWSNSTopicHasPolicy(n string, expectedPolicyText string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
@ -117,6 +136,46 @@ func testAccCheckAWSNSTopicHasPolicy(n string, expectedPolicyText string) resour
}
}
func testAccCheckAWSNSTopicHasDeliveryPolicy(n string, expectedPolicyText string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No Queue URL specified!")
}
conn := testAccProvider.Meta().(*AWSClient).snsconn
params := &sns.GetTopicAttributesInput{
TopicArn: aws.String(rs.Primary.ID),
}
resp, err := conn.GetTopicAttributes(params)
if err != nil {
return err
}
var actualPolicyText string
for k, v := range resp.Attributes {
if k == "DeliveryPolicy" {
actualPolicyText = *v
break
}
}
equivalent := suppressEquivalentJsonDiffs("", actualPolicyText, expectedPolicyText, nil)
if !equivalent {
return fmt.Errorf("Non-equivalent delivery policy error:\n\nexpected: %s\n\n got: %s\n",
expectedPolicyText, actualPolicyText)
}
return nil
}
}
func testAccCheckAWSSNSTopicDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).snsconn
@ -244,3 +303,26 @@ resource "aws_sns_topic" "test_topic" {
EOF
}
`
// Test for https://github.com/hashicorp/terraform/issues/14024
const testAccAWSSNSTopicConfig_withDeliveryPolicy = `
resource "aws_sns_topic" "test_topic" {
name = "test_delivery_policy"
delivery_policy = <<EOF
{
"http": {
"defaultHealthyRetryPolicy": {
"minDelayTarget": 20,
"maxDelayTarget": 20,
"numRetries": 3,
"numMaxDelayRetries": 0,
"numNoDelayRetries": 0,
"numMinDelayRetries": 0,
"backoffFunction": "linear"
},
"disableSubscriptionOverrides": false
}
}
EOF
}
`

View File

@ -206,6 +206,11 @@ func resourceAwsSpotFleetRequest() *schema.Resource {
Computed: true,
ForceNew: true,
},
"placement_tenancy": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"spot_price": {
Type: schema.TypeString,
Optional: true,
@ -304,10 +309,15 @@ func buildSpotFleetLaunchSpecification(d map[string]interface{}, meta interface{
SpotPrice: aws.String(d["spot_price"].(string)),
}
placement := new(ec2.SpotPlacement)
if v, ok := d["availability_zone"]; ok {
opts.Placement = &ec2.SpotPlacement{
AvailabilityZone: aws.String(v.(string)),
}
placement.AvailabilityZone = aws.String(v.(string))
opts.Placement = placement
}
if v, ok := d["placement_tenancy"]; ok {
placement.Tenancy = aws.String(v.(string))
opts.Placement = placement
}
if v, ok := d["ebs_optimized"]; ok {

View File

@ -325,6 +325,30 @@ func TestAccAWSSpotFleetRequest_withEBSDisk(t *testing.T) {
})
}
func TestAccAWSSpotFleetRequest_placementTenancy(t *testing.T) {
var sfr ec2.SpotFleetRequestConfig
rName := acctest.RandString(10)
rInt := acctest.RandInt()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSSpotFleetRequestDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSSpotFleetRequestTenancyConfig(rName, rInt),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckAWSSpotFleetRequestExists(
"aws_spot_fleet_request.foo", &sfr),
resource.TestCheckResourceAttr(
"aws_spot_fleet_request.foo", "spot_request_state", "active"),
testAccCheckAWSSpotFleetRequest_PlacementAttributes(&sfr),
),
},
},
})
}
func TestAccAWSSpotFleetRequest_CannotUseEmptyKeyName(t *testing.T) {
_, errs := validateSpotFleetRequestKeyName("", "key_name")
if len(errs) == 0 {
@ -400,6 +424,27 @@ func testAccCheckAWSSpotFleetRequest_EBSAttributes(
}
}
func testAccCheckAWSSpotFleetRequest_PlacementAttributes(
sfr *ec2.SpotFleetRequestConfig) resource.TestCheckFunc {
return func(s *terraform.State) error {
if len(sfr.SpotFleetRequestConfig.LaunchSpecifications) == 0 {
return errors.New("Missing launch specification")
}
spec := *sfr.SpotFleetRequestConfig.LaunchSpecifications[0]
placement := spec.Placement
if placement == nil {
return fmt.Errorf("Expected placement to be set, got nil")
}
if *placement.Tenancy != "dedicated" {
return fmt.Errorf("Expected placement tenancy to be %q, got %q", "dedicated", placement.Tenancy)
}
return nil
}
}
func testAccCheckAWSSpotFleetRequestDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).ec2conn
@ -1251,3 +1296,78 @@ resource "aws_spot_fleet_request" "foo" {
}
`, rInt, rInt, rName)
}
func testAccAWSSpotFleetRequestTenancyConfig(rName string, rInt int) string {
return fmt.Sprintf(`
resource "aws_key_pair" "debugging" {
key_name = "tmp-key-%s"
public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3F6tyPEFEzV0LX3X8BsXdMsQz1x2cEikKDEY0aIj41qgxMCP/iteneqXSIFZBp5vizPvaoIR3Um9xK7PGoW8giupGn+EPuxIA4cDM4vzOqOkiMPhz5XK0whEjkVzTo4+S0puvDZuwIsdiW9mxhJc7tgBNL0cYlWSYVkz4G/fslNfRPW5mYAM49f4fhtxPb5ok4Q2Lg9dPKVHO/Bgeu5woMc7RY0p1ej6D4CKFE6lymSDJpW0YHX/wqE9+cfEauh7xZcG0q9t2ta6F6fmX0agvpFyZo8aFbXeUBr7osSCJNgvavWbM/06niWrOvYX2xwWdhXmXSrbX8ZbabVohBK41 phodgson@thoughtworks.com"
}
resource "aws_iam_policy" "test-policy" {
name = "test-policy-%d"
path = "/"
description = "Spot Fleet Request ACCTest Policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Action": [
"ec2:DescribeImages",
"ec2:DescribeSubnets",
"ec2:RequestSpotInstances",
"ec2:TerminateInstances",
"ec2:DescribeInstanceStatus",
"iam:PassRole"
],
"Resource": ["*"]
}]
}
EOF
}
resource "aws_iam_policy_attachment" "test-attach" {
name = "test-attachment-%d"
roles = ["${aws_iam_role.test-role.name}"]
policy_arn = "${aws_iam_policy.test-policy.arn}"
}
resource "aws_iam_role" "test-role" {
name = "test-role-%s"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": [
"spotfleet.amazonaws.com",
"ec2.amazonaws.com"
]
},
"Action": "sts:AssumeRole"
}
]
}
EOF
}
resource "aws_spot_fleet_request" "foo" {
iam_fleet_role = "${aws_iam_role.test-role.arn}"
spot_price = "0.005"
target_capacity = 2
valid_until = "2019-11-04T20:44:20Z"
terminate_instances_with_expiration = true
launch_specification {
instance_type = "m1.small"
ami = "ami-d06a90b0"
key_name = "${aws_key_pair.debugging.key_name}"
placement_tenancy = "dedicated"
}
depends_on = ["aws_iam_policy_attachment.test-attach"]
}
`, rName, rInt, rInt, rName)
}

View File

@ -25,7 +25,7 @@ func resourceAwsSpotInstanceRequest() *schema.Resource {
// Everything on a spot instance is ForceNew except tags
for k, v := range s {
if k == "tags" {
if k == "tags" || k == "volume_tags" {
continue
}
v.ForceNew = true

View File

@ -814,11 +814,14 @@ func flattenStepAdjustments(adjustments []*autoscaling.StepAdjustment) []map[str
return result
}
func flattenResourceRecords(recs []*route53.ResourceRecord) []string {
func flattenResourceRecords(recs []*route53.ResourceRecord, typeStr string) []string {
strs := make([]string, 0, len(recs))
for _, r := range recs {
if r.Value != nil {
s := strings.Replace(*r.Value, "\"", "", 2)
s := *r.Value
if typeStr == "TXT" || typeStr == "SPF" {
s = strings.Replace(s, "\"", "", 2)
}
strs = append(strs, s)
}
}
@ -829,13 +832,11 @@ func expandResourceRecords(recs []interface{}, typeStr string) []*route53.Resour
records := make([]*route53.ResourceRecord, 0, len(recs))
for _, r := range recs {
s := r.(string)
switch typeStr {
case "TXT", "SPF":
str := fmt.Sprintf("\"%s\"", s)
records = append(records, &route53.ResourceRecord{Value: aws.String(str)})
default:
records = append(records, &route53.ResourceRecord{Value: aws.String(s)})
if typeStr == "TXT" || typeStr == "SPF" {
// `flattenResourceRecords` removes quotes. Add them back.
s = fmt.Sprintf("\"%s\"", s)
}
records = append(records, &route53.ResourceRecord{Value: aws.String(s)})
}
return records
}

View File

@ -819,23 +819,56 @@ func TestFlattenStepAdjustments(t *testing.T) {
}
func TestFlattenResourceRecords(t *testing.T) {
expanded := []*route53.ResourceRecord{
&route53.ResourceRecord{
Value: aws.String("127.0.0.1"),
},
&route53.ResourceRecord{
Value: aws.String("127.0.0.3"),
},
original := []string{
`127.0.0.1`,
`"abc def"`,
}
result := flattenResourceRecords(expanded)
dequoted := []string{
`127.0.0.1`,
`abc def`,
}
var wrapped []*route53.ResourceRecord = nil
for _, original := range original {
wrapped = append(wrapped, &route53.ResourceRecord{Value: aws.String(original)})
}
sub := func(recordType string, expected []string) {
t.Run(recordType, func(t *testing.T) {
checkFlattenResourceRecords(t, recordType, wrapped, expected)
})
}
// These record types should be dequoted.
sub("TXT", dequoted)
sub("SPF", dequoted)
// These record types should not be touched.
sub("CNAME", original)
sub("MX", original)
}
func checkFlattenResourceRecords(
t *testing.T,
recordType string,
expanded []*route53.ResourceRecord,
expected []string) {
result := flattenResourceRecords(expanded, recordType)
if result == nil {
t.Fatal("expected result to have value, but got nil")
}
if len(result) != 2 {
t.Fatal("expected result to have value, but got nil")
if len(result) != len(expected) {
t.Fatalf("expected %v, got %v", expected, result)
}
for i, e := range expected {
if result[i] != e {
t.Fatalf("expected %v, got %v", expected, result)
}
}
}

View File

@ -19,6 +19,7 @@ import (
"github.com/Azure/azure-sdk-for-go/arm/resources/resources"
"github.com/Azure/azure-sdk-for-go/arm/scheduler"
"github.com/Azure/azure-sdk-for-go/arm/servicebus"
"github.com/Azure/azure-sdk-for-go/arm/sql"
"github.com/Azure/azure-sdk-for-go/arm/storage"
"github.com/Azure/azure-sdk-for-go/arm/trafficmanager"
mainStorage "github.com/Azure/azure-sdk-for-go/storage"
@ -99,6 +100,8 @@ type ArmClient struct {
serviceBusSubscriptionsClient servicebus.SubscriptionsClient
keyVaultClient keyvault.VaultsClient
sqlElasticPoolsClient sql.ElasticPoolsClient
}
func withRequestLogging() autorest.SendDecorator {
@ -458,6 +461,12 @@ func (c *Config) getArmClient() (*ArmClient, error) {
kvc.Sender = autorest.CreateSender(withRequestLogging())
client.keyVaultClient = kvc
sqlepc := sql.NewElasticPoolsClientWithBaseURI(endpoint, c.SubscriptionID)
setUserAgent(&sqlepc.Client)
sqlepc.Authorizer = spt
sqlepc.Sender = autorest.CreateSender(withRequestLogging())
client.sqlElasticPoolsClient = sqlepc
return &client, nil
}

View File

@ -0,0 +1,32 @@
package azurerm
import (
"fmt"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"testing"
)
func TestAccAzureRMSqlElasticPool_importBasic(t *testing.T) {
resourceName := "azurerm_sql_elasticpool.test"
ri := acctest.RandInt()
config := fmt.Sprintf(testAccAzureRMSqlElasticPool_basic, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMSqlElasticPoolDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: config,
},
resource.TestStep{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}

View File

@ -99,6 +99,7 @@ func Provider() terraform.ResourceProvider {
"azurerm_servicebus_namespace": resourceArmServiceBusNamespace(),
"azurerm_servicebus_subscription": resourceArmServiceBusSubscription(),
"azurerm_servicebus_topic": resourceArmServiceBusTopic(),
"azurerm_sql_elasticpool": resourceArmSqlElasticPool(),
"azurerm_storage_account": resourceArmStorageAccount(),
"azurerm_storage_blob": resourceArmStorageBlob(),
"azurerm_storage_container": resourceArmStorageContainer(),

View File

@ -92,6 +92,11 @@ func resourceArmLoadBalancer() *schema.Resource {
},
},
"private_ip_address": {
Type: schema.TypeString,
Computed: true,
},
"tags": tagsSchema(),
},
}
@ -172,7 +177,17 @@ func resourecArmLoadBalancerRead(d *schema.ResourceData, meta interface{}) error
d.Set("resource_group_name", id.ResourceGroup)
if loadBalancer.LoadBalancerPropertiesFormat != nil && loadBalancer.LoadBalancerPropertiesFormat.FrontendIPConfigurations != nil {
d.Set("frontend_ip_configuration", flattenLoadBalancerFrontendIpConfiguration(loadBalancer.LoadBalancerPropertiesFormat.FrontendIPConfigurations))
ipconfigs := loadBalancer.LoadBalancerPropertiesFormat.FrontendIPConfigurations
d.Set("frontend_ip_configuration", flattenLoadBalancerFrontendIpConfiguration(ipconfigs))
for _, config := range *ipconfigs {
if config.FrontendIPConfigurationPropertiesFormat.PrivateIPAddress != nil {
d.Set("private_ip_address", config.FrontendIPConfigurationPropertiesFormat.PrivateIPAddress)
// set the private IP address at most once
break
}
}
}
flattenAndSetTags(d, loadBalancer.Tags)

View File

@ -158,6 +158,10 @@ func resourceArmSqlDatabaseCreate(d *schema.ResourceData, meta interface{}) erro
command.RequestedServiceObjectiveID = azure.String(v.(string))
}
if v, ok := d.GetOk("elastic_pool_name"); ok {
command.ElasticPoolName = azure.String(v.(string))
}
if v, ok := d.GetOk("requested_service_objective_name"); ok {
command.RequestedServiceObjectiveName = azure.String(v.(string))
}
@ -216,6 +220,7 @@ func resourceArmSqlDatabaseRead(d *schema.ResourceData, meta interface{}) error
d.Set("name", resp.Name)
d.Set("creation_date", resp.CreationDate)
d.Set("default_secondary_location", resp.DefaultSecondaryLocation)
d.Set("elastic_pool_name", resp.ElasticPoolName)
flattenAndSetTags(d, resp.Tags)

View File

@ -65,6 +65,26 @@ func TestAccAzureRMSqlDatabase_basic(t *testing.T) {
})
}
func TestAccAzureRMSqlDatabase_elasticPool(t *testing.T) {
ri := acctest.RandInt()
config := fmt.Sprintf(testAccAzureRMSqlDatabase_elasticPool, ri, ri, ri, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMSqlDatabaseDestroy,
Steps: []resource.TestStep{
{
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMSqlDatabaseExists("azurerm_sql_database.test"),
resource.TestCheckResourceAttr("azurerm_sql_database.test", "elastic_pool_name", fmt.Sprintf("acctestep%d", ri)),
),
},
},
})
}
func TestAccAzureRMSqlDatabase_withTags(t *testing.T) {
ri := acctest.RandInt()
preConfig := fmt.Sprintf(testAccAzureRMSqlDatabase_withTags, ri, ri, ri)
@ -163,6 +183,44 @@ func testCheckAzureRMSqlDatabaseDestroy(s *terraform.State) error {
return nil
}
var testAccAzureRMSqlDatabase_elasticPool = `
resource "azurerm_resource_group" "test" {
name = "acctestRG_%d"
location = "West US"
}
resource "azurerm_sql_server" "test" {
name = "acctestsqlserver%d"
resource_group_name = "${azurerm_resource_group.test.name}"
location = "West US"
version = "12.0"
administrator_login = "mradministrator"
administrator_login_password = "thisIsDog11"
}
resource "azurerm_sql_elasticpool" "test" {
name = "acctestep%d"
resource_group_name = "${azurerm_resource_group.test.name}"
location = "West US"
server_name = "${azurerm_sql_server.test.name}"
edition = "Basic"
dtu = 50
pool_size = 5000
}
resource "azurerm_sql_database" "test" {
name = "acctestdb%d"
resource_group_name = "${azurerm_resource_group.test.name}"
server_name = "${azurerm_sql_server.test.name}"
location = "West US"
edition = "${azurerm_sql_elasticpool.test.edition}"
collation = "SQL_Latin1_General_CP1_CI_AS"
max_size_bytes = "1073741824"
elastic_pool_name = "${azurerm_sql_elasticpool.test.name}"
requested_service_objective_name = "ElasticPool"
}
`
var testAccAzureRMSqlDatabase_basic = `
resource "azurerm_resource_group" "test" {
name = "acctestRG_%d"

View File

@ -0,0 +1,220 @@
package azurerm
import (
"fmt"
"github.com/Azure/azure-sdk-for-go/arm/sql"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
"log"
"net/http"
"time"
)
func resourceArmSqlElasticPool() *schema.Resource {
return &schema.Resource{
Create: resourceArmSqlElasticPoolCreate,
Read: resourceArmSqlElasticPoolRead,
Update: resourceArmSqlElasticPoolCreate,
Delete: resourceArmSqlElasticPoolDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"location": locationSchema(),
"resource_group_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"server_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"edition": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateSqlElasticPoolEdition(),
},
"dtu": {
Type: schema.TypeInt,
Required: true,
},
"db_dtu_min": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"db_dtu_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"pool_size": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"creation_date": {
Type: schema.TypeString,
Computed: true,
},
"tags": tagsSchema(),
},
}
}
func resourceArmSqlElasticPoolCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
elasticPoolsClient := client.sqlElasticPoolsClient
log.Printf("[INFO] preparing arguments for Azure ARM SQL ElasticPool creation.")
name := d.Get("name").(string)
serverName := d.Get("server_name").(string)
location := d.Get("location").(string)
resGroup := d.Get("resource_group_name").(string)
tags := d.Get("tags").(map[string]interface{})
elasticPool := sql.ElasticPool{
Name: &name,
Location: &location,
ElasticPoolProperties: getArmSqlElasticPoolProperties(d),
Tags: expandTags(tags),
}
_, err := elasticPoolsClient.CreateOrUpdate(resGroup, serverName, name, elasticPool, make(chan struct{}))
if err != nil {
return err
}
read, err := elasticPoolsClient.Get(resGroup, serverName, name)
if err != nil {
return err
}
if read.ID == nil {
return fmt.Errorf("Cannot read SQL ElasticPool %s (resource group %s) ID", name, resGroup)
}
d.SetId(*read.ID)
return resourceArmSqlElasticPoolRead(d, meta)
}
func resourceArmSqlElasticPoolRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
elasticPoolsClient := client.sqlElasticPoolsClient
resGroup, serverName, name, err := parseArmSqlElasticPoolId(d.Id())
if err != nil {
return err
}
resp, err := elasticPoolsClient.Get(resGroup, serverName, name)
if err != nil {
if resp.StatusCode == http.StatusNotFound {
d.SetId("")
return nil
}
return fmt.Errorf("Error making Read request on Sql Elastic Pool %s: %s", name, err)
}
d.Set("name", resp.Name)
d.Set("resource_group_name", resGroup)
d.Set("location", azureRMNormalizeLocation(*resp.Location))
d.Set("server_name", serverName)
elasticPool := resp.ElasticPoolProperties
if elasticPool != nil {
d.Set("edition", string(elasticPool.Edition))
d.Set("dtu", int(*elasticPool.Dtu))
d.Set("db_dtu_min", int(*elasticPool.DatabaseDtuMin))
d.Set("db_dtu_max", int(*elasticPool.DatabaseDtuMax))
d.Set("pool_size", int(*elasticPool.StorageMB))
if elasticPool.CreationDate != nil {
d.Set("creation_date", elasticPool.CreationDate.Format(time.RFC3339))
}
}
flattenAndSetTags(d, resp.Tags)
return nil
}
func resourceArmSqlElasticPoolDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
elasticPoolsClient := client.sqlElasticPoolsClient
resGroup, serverName, name, err := parseArmSqlElasticPoolId(d.Id())
if err != nil {
return err
}
_, err = elasticPoolsClient.Delete(resGroup, serverName, name)
return err
}
func getArmSqlElasticPoolProperties(d *schema.ResourceData) *sql.ElasticPoolProperties {
edition := sql.ElasticPoolEditions(d.Get("edition").(string))
dtu := int32(d.Get("dtu").(int))
props := &sql.ElasticPoolProperties{
Edition: edition,
Dtu: &dtu,
}
if databaseDtuMin, ok := d.GetOk("db_dtu_min"); ok {
databaseDtuMin := int32(databaseDtuMin.(int))
props.DatabaseDtuMin = &databaseDtuMin
}
if databaseDtuMax, ok := d.GetOk("db_dtu_max"); ok {
databaseDtuMax := int32(databaseDtuMax.(int))
props.DatabaseDtuMax = &databaseDtuMax
}
if poolSize, ok := d.GetOk("pool_size"); ok {
poolSize := int32(poolSize.(int))
props.StorageMB = &poolSize
}
return props
}
func parseArmSqlElasticPoolId(sqlElasticPoolId string) (string, string, string, error) {
id, err := parseAzureResourceID(sqlElasticPoolId)
if err != nil {
return "", "", "", fmt.Errorf("[ERROR] Unable to parse SQL ElasticPool ID '%s': %+v", sqlElasticPoolId, err)
}
return id.ResourceGroup, id.Path["servers"], id.Path["elasticPools"], nil
}
func validateSqlElasticPoolEdition() schema.SchemaValidateFunc {
return validation.StringInSlice([]string{
string(sql.ElasticPoolEditionsBasic),
string(sql.ElasticPoolEditionsStandard),
string(sql.ElasticPoolEditionsPremium),
}, false)
}

View File

@ -0,0 +1,168 @@
package azurerm
import (
"fmt"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"net/http"
"testing"
)
func TestAccAzureRMSqlElasticPool_basic(t *testing.T) {
ri := acctest.RandInt()
config := fmt.Sprintf(testAccAzureRMSqlElasticPool_basic, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMSqlElasticPoolDestroy,
Steps: []resource.TestStep{
{
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMSqlElasticPoolExists("azurerm_sql_elasticpool.test"),
),
},
},
})
}
func TestAccAzureRMSqlElasticPool_resizeDtu(t *testing.T) {
ri := acctest.RandInt()
preConfig := fmt.Sprintf(testAccAzureRMSqlElasticPool_basic, ri)
postConfig := fmt.Sprintf(testAccAzureRMSqlElasticPool_resizedDtu, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMSqlElasticPoolDestroy,
Steps: []resource.TestStep{
{
Config: preConfig,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMSqlElasticPoolExists("azurerm_sql_elasticpool.test"),
resource.TestCheckResourceAttr(
"azurerm_sql_elasticpool.test", "dtu", "50"),
resource.TestCheckResourceAttr(
"azurerm_sql_elasticpool.test", "pool_size", "5000"),
),
},
{
Config: postConfig,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMSqlElasticPoolExists("azurerm_sql_elasticpool.test"),
resource.TestCheckResourceAttr(
"azurerm_sql_elasticpool.test", "dtu", "100"),
resource.TestCheckResourceAttr(
"azurerm_sql_elasticpool.test", "pool_size", "10000"),
),
},
},
})
}
func testCheckAzureRMSqlElasticPoolExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
ressource, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("Not found: %s", name)
}
resourceGroup, serverName, name, err := parseArmSqlElasticPoolId(ressource.Primary.ID)
if err != nil {
return err
}
conn := testAccProvider.Meta().(*ArmClient).sqlElasticPoolsClient
resp, err := conn.Get(resourceGroup, serverName, name)
if err != nil {
return fmt.Errorf("Bad: Get on sqlElasticPoolsClient: %s", err)
}
if resp.StatusCode == http.StatusNotFound {
return fmt.Errorf("Bad: SQL Elastic Pool %q on server: %q (resource group: %q) does not exist", name, serverName, resourceGroup)
}
return nil
}
}
func testCheckAzureRMSqlElasticPoolDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*ArmClient).sqlElasticPoolsClient
for _, rs := range s.RootModule().Resources {
if rs.Type != "azurerm_sql_elasticpool" {
continue
}
name := rs.Primary.Attributes["name"]
serverName := rs.Primary.Attributes["server_name"]
resourceGroup := rs.Primary.Attributes["resource_group_name"]
resp, err := conn.Get(resourceGroup, serverName, name)
if err != nil {
return nil
}
if resp.StatusCode != http.StatusNotFound {
return fmt.Errorf("SQL Elastic Pool still exists:\n%#v", resp.ElasticPoolProperties)
}
}
return nil
}
var testAccAzureRMSqlElasticPool_basic = `
resource "azurerm_resource_group" "test" {
name = "acctest-%[1]d"
location = "West US"
}
resource "azurerm_sql_server" "test" {
name = "acctest%[1]d"
resource_group_name = "${azurerm_resource_group.test.name}"
location = "West US"
version = "12.0"
administrator_login = "4dm1n157r470r"
administrator_login_password = "4-v3ry-53cr37-p455w0rd"
}
resource "azurerm_sql_elasticpool" "test" {
name = "acctest-pool-%[1]d"
resource_group_name = "${azurerm_resource_group.test.name}"
location = "West US"
server_name = "${azurerm_sql_server.test.name}"
edition = "Basic"
dtu = 50
pool_size = 5000
}
`
var testAccAzureRMSqlElasticPool_resizedDtu = `
resource "azurerm_resource_group" "test" {
name = "acctest-%[1]d"
location = "West US"
}
resource "azurerm_sql_server" "test" {
name = "acctest%[1]d"
resource_group_name = "${azurerm_resource_group.test.name}"
location = "West US"
version = "12.0"
administrator_login = "4dm1n157r470r"
administrator_login_password = "4-v3ry-53cr37-p455w0rd"
}
resource "azurerm_sql_elasticpool" "test" {
name = "acctest-pool-%[1]d"
resource_group_name = "${azurerm_resource_group.test.name}"
location = "West US"
server_name = "${azurerm_sql_server.test.name}"
edition = "Basic"
dtu = 100
pool_size = 10000
}
`

View File

@ -5,6 +5,7 @@ import (
"fmt"
"log"
"net/http"
"strconv"
"strings"
"time"
@ -155,20 +156,40 @@ func resourceArmTemplateDeploymentRead(d *schema.ResourceData, meta interface{})
if resp.Properties.Outputs != nil && len(*resp.Properties.Outputs) > 0 {
outputs = make(map[string]string)
for key, output := range *resp.Properties.Outputs {
log.Printf("[DEBUG] Processing deployment output %s", key)
outputMap := output.(map[string]interface{})
outputValue, ok := outputMap["value"]
if !ok {
// No value
log.Printf("[DEBUG] No value - skipping")
continue
}
outputType, ok := outputMap["type"]
if !ok {
log.Printf("[DEBUG] No type - skipping")
continue
}
outputs[key] = outputValue.(string)
var outputValueString string
switch strings.ToLower(outputType.(string)) {
case "bool":
outputValueString = strconv.FormatBool(outputValue.(bool))
case "string":
outputValueString = outputValue.(string)
case "int":
outputValueString = fmt.Sprint(outputValue)
default:
log.Printf("[WARN] Ignoring output %s: Outputs of type %s are not currently supported in azurerm_template_deployment.",
key, outputType)
continue
}
outputs[key] = outputValueString
}
}
d.Set("outputs", outputs)
return nil
return d.Set("outputs", outputs)
}
func resourceArmTemplateDeploymentDelete(d *schema.ResourceData, meta interface{}) error {

View File

@ -68,6 +68,29 @@ func TestAccAzureRMTemplateDeployment_withParams(t *testing.T) {
})
}
func TestAccAzureRMTemplateDeployment_withOutputs(t *testing.T) {
ri := acctest.RandInt()
config := fmt.Sprintf(testAccAzureRMTemplateDeployment_withOutputs, ri, ri, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMTemplateDeploymentDestroy,
Steps: []resource.TestStep{
{
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMTemplateDeploymentExists("azurerm_template_deployment.test"),
resource.TestCheckOutput("tfIntOutput", "-123"),
resource.TestCheckOutput("tfStringOutput", "Standard_GRS"),
resource.TestCheckOutput("tfFalseOutput", "false"),
resource.TestCheckOutput("tfTrueOutput", "true"),
resource.TestCheckResourceAttr("azurerm_template_deployment.test", "outputs.stringOutput", "Standard_GRS"),
),
},
},
})
}
func TestAccAzureRMTemplateDeployment_withError(t *testing.T) {
ri := acctest.RandInt()
config := fmt.Sprintf(testAccAzureRMTemplateDeployment_withError, ri, ri)
@ -352,6 +375,126 @@ DEPLOY
`
var testAccAzureRMTemplateDeployment_withOutputs = `
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
}
output "tfStringOutput" {
value = "${azurerm_template_deployment.test.outputs.stringOutput}"
}
output "tfIntOutput" {
value = "${azurerm_template_deployment.test.outputs.intOutput}"
}
output "tfFalseOutput" {
value = "${azurerm_template_deployment.test.outputs.falseOutput}"
}
output "tfTrueOutput" {
value = "${azurerm_template_deployment.test.outputs.trueOutput}"
}
resource "azurerm_template_deployment" "test" {
name = "acctesttemplate-%d"
resource_group_name = "${azurerm_resource_group.test.name}"
template_body = <<DEPLOY
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"storageAccountType": {
"type": "string",
"defaultValue": "Standard_LRS",
"allowedValues": [
"Standard_LRS",
"Standard_GRS",
"Standard_ZRS"
],
"metadata": {
"description": "Storage Account type"
}
},
"dnsLabelPrefix": {
"type": "string",
"metadata": {
"description": "DNS Label for the Public IP. Must be lowercase. It should match with the following regular expression: ^[a-z][a-z0-9-]{1,61}[a-z0-9]$ or it will raise an error."
}
},
"intParameter": {
"type": "int",
"defaultValue": -123
},
"falseParameter": {
"type": "bool",
"defaultValue": false
},
"trueParameter": {
"type": "bool",
"defaultValue": true
}
},
"variables": {
"location": "[resourceGroup().location]",
"storageAccountName": "[concat(uniquestring(resourceGroup().id), 'storage')]",
"publicIPAddressName": "[concat('myPublicIp', uniquestring(resourceGroup().id))]",
"publicIPAddressType": "Dynamic",
"apiVersion": "2015-06-15"
},
"resources": [
{
"type": "Microsoft.Storage/storageAccounts",
"name": "[variables('storageAccountName')]",
"apiVersion": "[variables('apiVersion')]",
"location": "[variables('location')]",
"properties": {
"accountType": "[parameters('storageAccountType')]"
}
},
{
"type": "Microsoft.Network/publicIPAddresses",
"apiVersion": "[variables('apiVersion')]",
"name": "[variables('publicIPAddressName')]",
"location": "[variables('location')]",
"properties": {
"publicIPAllocationMethod": "[variables('publicIPAddressType')]",
"dnsSettings": {
"domainNameLabel": "[parameters('dnsLabelPrefix')]"
}
}
}
],
"outputs": {
"stringOutput": {
"type": "string",
"value": "[parameters('storageAccountType')]"
},
"intOutput": {
"type": "int",
"value": "[parameters('intParameter')]"
},
"falseOutput": {
"type": "bool",
"value": "[parameters('falseParameter')]"
},
"trueOutput": {
"type": "bool",
"value": "[parameters('trueParameter')]"
}
}
}
DEPLOY
parameters {
dnsLabelPrefix = "terraform-test-%d"
storageAccountType = "Standard_GRS"
}
deployment_mode = "Incremental"
}
`
// StorageAccount name is too long, forces error
var testAccAzureRMTemplateDeployment_withError = `
resource "azurerm_resource_group" "test" {

View File

@ -2,9 +2,12 @@ package digitalocean
import (
"log"
"net/http"
"net/http/httputil"
"time"
"github.com/digitalocean/godo"
"github.com/hashicorp/terraform/helper/logging"
"github.com/hashicorp/terraform/helper/resource"
"golang.org/x/oauth2"
)
@ -21,11 +24,31 @@ func (c *Config) Client() (*godo.Client, error) {
client := godo.NewClient(oauth2.NewClient(oauth2.NoContext, tokenSrc))
if logging.IsDebugOrHigher() {
client.OnRequestCompleted(logRequestAndResponse)
}
log.Printf("[INFO] DigitalOcean Client configured for URL: %s", client.BaseURL.String())
return client, nil
}
func logRequestAndResponse(req *http.Request, resp *http.Response) {
reqData, err := httputil.DumpRequest(req, true)
if err == nil {
log.Printf("[DEBUG] "+logReqMsg, string(reqData))
} else {
log.Printf("[ERROR] DigitalOcean API Request error: %#v", err)
}
respData, err := httputil.DumpResponse(resp, true)
if err == nil {
log.Printf("[DEBUG] "+logRespMsg, string(respData))
} else {
log.Printf("[ERROR] DigitalOcean API Response error: %#v", err)
}
}
// waitForAction waits for the action to finish using the resource.StateChangeConf.
func waitForAction(client *godo.Client, action *godo.Action) error {
var (
@ -61,3 +84,13 @@ func waitForAction(client *godo.Client, action *godo.Action) error {
}).WaitForState()
return err
}
const logReqMsg = `DigitalOcean API Request Details:
---[ REQUEST ]---------------------------------------
%s
-----------------------------------------------------`
const logRespMsg = `DigitalOcean API Response Details:
---[ RESPONSE ]--------------------------------------
%s
-----------------------------------------------------`

View File

@ -260,10 +260,13 @@ func resourceDigitalOceanDropletRead(d *schema.ResourceData, meta interface{}) e
return fmt.Errorf("Error retrieving droplet: %s", err)
}
if droplet.Image.Slug != "" {
d.Set("image", droplet.Image.Slug)
} else {
_, err = strconv.Atoi(d.Get("image").(string))
if err == nil || droplet.Image.Slug == "" {
// The image field is provided as an ID (number), or
// the image bash no slug. In both cases we store it as an ID.
d.Set("image", droplet.Image.ID)
} else {
d.Set("image", droplet.Image.Slug)
}
d.Set("name", droplet.Name)

View File

@ -41,16 +41,31 @@ func TestAccDigitalOceanDroplet_Basic(t *testing.T) {
resource.TestCheckResourceAttr(
"digitalocean_droplet.foobar", "user_data", "foobar"),
),
Destroy: false,
},
{
Config: testAccCheckDigitalOceanDropletConfig_basic(rInt),
PlanOnly: true,
},
},
})
}
func TestAccDigitalOceanDroplet_WithID(t *testing.T) {
var droplet godo.Droplet
rInt := acctest.RandInt()
// TODO: not hardcode this as it will change over time
centosID := 22995941
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDigitalOceanDropletDestroy,
Steps: []resource.TestStep{
{
Config: testAccCheckDigitalOceanDropletConfig_withID(centosID, rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &droplet),
),
},
},
})
}
func TestAccDigitalOceanDroplet_withSSH(t *testing.T) {
var droplet godo.Droplet
rInt := acctest.RandInt()
@ -504,6 +519,17 @@ resource "digitalocean_droplet" "foobar" {
}`, rInt)
}
func testAccCheckDigitalOceanDropletConfig_withID(imageID, rInt int) string {
return fmt.Sprintf(`
resource "digitalocean_droplet" "foobar" {
name = "foo-%d"
size = "512mb"
image = "%d"
region = "nyc3"
user_data = "foobar"
}`, rInt, imageID)
}
func testAccCheckDigitalOceanDropletConfig_withSSH(rInt int) string {
return fmt.Sprintf(`
resource "digitalocean_ssh_key" "foobar" {

View File

@ -33,6 +33,9 @@ func resourceDMERecord() *schema.Resource {
"value": &schema.Schema{
Type: schema.TypeString,
Required: true,
StateFunc: func(value interface{}) string {
return strings.ToLower(value.(string))
},
},
"ttl": &schema.Schema{
Type: schema.TypeInt,

View File

@ -318,17 +318,18 @@ func resourceServiceV1() *schema.Resource {
Required: true,
Description: "A name to refer to this Cache Setting",
},
"cache_condition": {
Type: schema.TypeString,
Required: true,
Description: "Name of a condition to check if this Cache Setting applies",
},
"action": {
Type: schema.TypeString,
Optional: true,
Description: "Action to take",
},
// optional
"cache_condition": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: "Name of a condition to check if this Cache Setting applies",
},
"stale_ttl": {
Type: schema.TypeInt,
Optional: true,
@ -776,12 +777,13 @@ func resourceServiceV1() *schema.Resource {
Required: true,
Description: "Unique name to refer to this Request Setting",
},
// Optional fields
"request_condition": {
Type: schema.TypeString,
Required: true,
Description: "Name of a request condition to apply.",
Optional: true,
Default: "",
Description: "Name of a request condition to apply. If there is no condition this setting will always be applied.",
},
// Optional fields
"max_stale_age": {
Type: schema.TypeInt,
Optional: true,

View File

@ -2,18 +2,21 @@ package google
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"testing"
)
func TestAccDataSourceGoogleNetwork(t *testing.T) {
networkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: TestAccDataSourceGoogleNetworkConfig,
Config: testAccDataSourceGoogleNetworkConfig(networkName),
Check: resource.ComposeTestCheckFunc(
testAccDataSourceGoogleNetworkCheck("data.google_compute_network.my_network", "google_compute_network.foobar"),
),
@ -57,12 +60,14 @@ func testAccDataSourceGoogleNetworkCheck(data_source_name string, resource_name
}
}
var TestAccDataSourceGoogleNetworkConfig = `
func testAccDataSourceGoogleNetworkConfig(name string) string {
return fmt.Sprintf(`
resource "google_compute_network" "foobar" {
name = "network-test"
name = "%s"
description = "my-description"
}
data "google_compute_network" "my_network" {
name = "${google_compute_network.foobar.name}"
}`
}`, name)
}

View File

@ -0,0 +1,28 @@
package google
import (
"testing"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccDnsManagedZone_importBasic(t *testing.T) {
resourceName := "google_dns_managed_zone.foobar"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDnsManagedZoneDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDnsManagedZone_basic,
},
resource.TestStep{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}

View File

@ -62,8 +62,10 @@ func Provider() terraform.ResourceProvider {
"google_bigquery_dataset": resourceBigQueryDataset(),
"google_compute_autoscaler": resourceComputeAutoscaler(),
"google_compute_address": resourceComputeAddress(),
"google_compute_backend_bucket": resourceComputeBackendBucket(),
"google_compute_backend_service": resourceComputeBackendService(),
"google_compute_disk": resourceComputeDisk(),
"google_compute_snapshot": resourceComputeSnapshot(),
"google_compute_firewall": resourceComputeFirewall(),
"google_compute_forwarding_rule": resourceComputeForwardingRule(),
"google_compute_global_address": resourceComputeGlobalAddress(),

View File

@ -333,7 +333,7 @@ func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) e
}
op, err := config.clientCompute.Autoscalers.Patch(
project, zone, d.Id(), scaler).Do()
project, zone, scaler).Do()
if err != nil {
return fmt.Errorf("Error updating Autoscaler: %s", err)
}

View File

@ -0,0 +1,201 @@
package google
import (
"fmt"
"log"
"regexp"
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
)
func resourceComputeBackendBucket() *schema.Resource {
return &schema.Resource{
Create: resourceComputeBackendBucketCreate,
Read: resourceComputeBackendBucketRead,
Update: resourceComputeBackendBucketUpdate,
Delete: resourceComputeBackendBucketDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
re := `^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$`
if !regexp.MustCompile(re).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q (%q) doesn't match regexp %q", k, value, re))
}
return
},
},
"bucket_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"description": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"enable_cdn": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"project": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"self_link": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceComputeBackendBucketCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
bucket := compute.BackendBucket{
Name: d.Get("name").(string),
BucketName: d.Get("bucket_name").(string),
}
if v, ok := d.GetOk("description"); ok {
bucket.Description = v.(string)
}
if v, ok := d.GetOk("enable_cdn"); ok {
bucket.EnableCdn = v.(bool)
}
project, err := getProject(d, config)
if err != nil {
return err
}
log.Printf("[DEBUG] Creating new Backend Bucket: %#v", bucket)
op, err := config.clientCompute.BackendBuckets.Insert(
project, &bucket).Do()
if err != nil {
return fmt.Errorf("Error creating backend bucket: %s", err)
}
log.Printf("[DEBUG] Waiting for new backend bucket, operation: %#v", op)
// Store the ID now
d.SetId(bucket.Name)
// Wait for the operation to complete
waitErr := computeOperationWaitGlobal(config, op, project, "Creating Backend Bucket")
if waitErr != nil {
// The resource didn't actually create
d.SetId("")
return waitErr
}
return resourceComputeBackendBucketRead(d, meta)
}
func resourceComputeBackendBucketRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
bucket, err := config.clientCompute.BackendBuckets.Get(
project, d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
// The resource doesn't exist anymore
log.Printf("[WARN] Removing Backend Bucket %q because it's gone", d.Get("name").(string))
d.SetId("")
return nil
}
return fmt.Errorf("Error reading bucket: %s", err)
}
d.Set("bucket_name", bucket.BucketName)
d.Set("description", bucket.Description)
d.Set("enable_cdn", bucket.EnableCdn)
d.Set("self_link", bucket.SelfLink)
return nil
}
func resourceComputeBackendBucketUpdate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
bucket := compute.BackendBucket{
Name: d.Get("name").(string),
BucketName: d.Get("bucket_name").(string),
}
// Optional things
if v, ok := d.GetOk("description"); ok {
bucket.Description = v.(string)
}
if v, ok := d.GetOk("enable_cdn"); ok {
bucket.EnableCdn = v.(bool)
}
log.Printf("[DEBUG] Updating existing Backend Bucket %q: %#v", d.Id(), bucket)
op, err := config.clientCompute.BackendBuckets.Update(
project, d.Id(), &bucket).Do()
if err != nil {
return fmt.Errorf("Error updating backend bucket: %s", err)
}
d.SetId(bucket.Name)
err = computeOperationWaitGlobal(config, op, project, "Updating Backend Bucket")
if err != nil {
return err
}
return resourceComputeBackendBucketRead(d, meta)
}
func resourceComputeBackendBucketDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
log.Printf("[DEBUG] Deleting backend bucket %s", d.Id())
op, err := config.clientCompute.BackendBuckets.Delete(
project, d.Id()).Do()
if err != nil {
return fmt.Errorf("Error deleting backend bucket: %s", err)
}
err = computeOperationWaitGlobal(config, op, project, "Deleting Backend Bucket")
if err != nil {
return err
}
d.SetId("")
return nil
}

View File

@ -0,0 +1,191 @@
package google
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
)
func TestAccComputeBackendBucket_basic(t *testing.T) {
backendName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
storageName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
var svc compute.BackendBucket
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeBackendBucketDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeBackendBucket_basic(backendName, storageName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeBackendBucketExists(
"google_compute_backend_bucket.foobar", &svc),
),
},
},
})
if svc.BucketName != storageName {
t.Errorf("Expected BucketName to be %q, got %q", storageName, svc.BucketName)
}
}
func TestAccComputeBackendBucket_basicModified(t *testing.T) {
backendName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
storageName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
secondStorageName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
var svc compute.BackendBucket
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeBackendBucketDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeBackendBucket_basic(backendName, storageName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeBackendBucketExists(
"google_compute_backend_bucket.foobar", &svc),
),
},
resource.TestStep{
Config: testAccComputeBackendBucket_basicModified(
backendName, storageName, secondStorageName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeBackendBucketExists(
"google_compute_backend_bucket.foobar", &svc),
),
},
},
})
if svc.BucketName != secondStorageName {
t.Errorf("Expected BucketName to be %q, got %q", secondStorageName, svc.BucketName)
}
}
func testAccCheckComputeBackendBucketDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
for _, rs := range s.RootModule().Resources {
if rs.Type != "google_compute_backend_bucket" {
continue
}
_, err := config.clientCompute.BackendBuckets.Get(
config.Project, rs.Primary.ID).Do()
if err == nil {
return fmt.Errorf("Backend bucket %s still exists", rs.Primary.ID)
}
}
return nil
}
func testAccCheckComputeBackendBucketExists(n string, svc *compute.BackendBucket) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
config := testAccProvider.Meta().(*Config)
found, err := config.clientCompute.BackendBuckets.Get(
config.Project, rs.Primary.ID).Do()
if err != nil {
return err
}
if found.Name != rs.Primary.ID {
return fmt.Errorf("Backend bucket %s not found", rs.Primary.ID)
}
*svc = *found
return nil
}
}
func TestAccComputeBackendBucket_withCdnEnabled(t *testing.T) {
backendName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
storageName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
var svc compute.BackendBucket
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeBackendBucketDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeBackendBucket_withCdnEnabled(
backendName, storageName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeBackendBucketExists(
"google_compute_backend_bucket.foobar", &svc),
),
},
},
})
if svc.EnableCdn != true {
t.Errorf("Expected EnableCdn == true, got %t", svc.EnableCdn)
}
}
func testAccComputeBackendBucket_basic(backendName, storageName string) string {
return fmt.Sprintf(`
resource "google_compute_backend_bucket" "foobar" {
name = "%s"
bucket_name = "${google_storage_bucket.bucket_one.name}"
}
resource "google_storage_bucket" "bucket_one" {
name = "%s"
location = "EU"
}
`, backendName, storageName)
}
func testAccComputeBackendBucket_basicModified(backendName, bucketOne, bucketTwo string) string {
return fmt.Sprintf(`
resource "google_compute_backend_bucket" "foobar" {
name = "%s"
bucket_name = "${google_storage_bucket.bucket_two.name}"
}
resource "google_storage_bucket" "bucket_one" {
name = "%s"
location = "EU"
}
resource "google_storage_bucket" "bucket_two" {
name = "%s"
location = "EU"
}
`, backendName, bucketOne, bucketTwo)
}
func testAccComputeBackendBucket_withCdnEnabled(backendName, storageName string) string {
return fmt.Sprintf(`
resource "google_compute_backend_bucket" "foobar" {
name = "%s"
bucket_name = "${google_storage_bucket.bucket.name}"
enable_cdn = true
}
resource "google_storage_bucket" "bucket" {
name = "%s"
location = "EU"
}
`, backendName, storageName)
}

View File

@ -200,11 +200,15 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{
log.Printf("[DEBUG] Waiting for new backend service, operation: %#v", op)
// Store the ID now
d.SetId(service.Name)
err = computeOperationWaitGlobal(config, op, project, "Creating Backend Service")
if err != nil {
return err
// Wait for the operation to complete
waitErr := computeOperationWaitGlobal(config, op, project, "Creating Backend Service")
if waitErr != nil {
// The resource didn't actually create
d.SetId("")
return waitErr
}
return resourceComputeBackendServiceRead(d, meta)

View File

@ -125,7 +125,7 @@ func testAccCheckComputeBackendServiceDestroy(s *terraform.State) error {
_, err := config.clientCompute.BackendServices.Get(
config.Project, rs.Primary.ID).Do()
if err == nil {
return fmt.Errorf("Backend service still exists")
return fmt.Errorf("Backend service %s still exists", rs.Primary.ID)
}
}
@ -152,7 +152,7 @@ func testAccCheckComputeBackendServiceExists(n string, svc *compute.BackendServi
}
if found.Name != rs.Primary.ID {
return fmt.Errorf("Backend service not found")
return fmt.Errorf("Backend service %s not found", rs.Primary.ID)
}
*svc = *found

View File

@ -429,6 +429,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
}
}
hasSource := false
// Load up the disk for this disk if specified
if v, ok := d.GetOk(prefix + ".disk"); ok {
diskName := v.(string)
@ -441,6 +442,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
}
disk.Source = diskData.SelfLink
hasSource = true
} else {
// Create a new disk
disk.InitializeParams = &compute.AttachedDiskInitializeParams{}
@ -453,7 +455,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
}
// Load up the image for this disk if specified
if v, ok := d.GetOk(prefix + ".image"); ok {
if v, ok := d.GetOk(prefix + ".image"); ok && !hasSource {
imageName := v.(string)
imageUrl, err := resolveImage(config, imageName)
@ -464,9 +466,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
}
disk.InitializeParams.SourceImage = imageUrl
} else if ok && hasSource {
return fmt.Errorf("Cannot specify disk image when referencing an existing disk")
}
if v, ok := d.GetOk(prefix + ".type"); ok {
if v, ok := d.GetOk(prefix + ".type"); ok && !hasSource {
diskTypeName := v.(string)
diskType, err := readDiskType(config, zone, diskTypeName)
if err != nil {
@ -476,11 +480,15 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
}
disk.InitializeParams.DiskType = diskType.SelfLink
} else if ok && hasSource {
return fmt.Errorf("Cannot specify disk type when referencing an existing disk")
}
if v, ok := d.GetOk(prefix + ".size"); ok {
if v, ok := d.GetOk(prefix + ".size"); ok && !hasSource {
diskSizeGb := v.(int)
disk.InitializeParams.DiskSizeGb = int64(diskSizeGb)
} else if ok && hasSource {
return fmt.Errorf("Cannot specify disk size when referencing an existing disk")
}
if v, ok := d.GetOk(prefix + ".device_name"); ok {

View File

@ -197,6 +197,12 @@ func resourceComputeInstanceTemplate() *schema.Resource {
Computed: true,
},
"network_ip": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"subnetwork": &schema.Schema{
Type: schema.TypeString,
Optional: true,
@ -462,7 +468,9 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) ([]*compute.Network
var iface compute.NetworkInterface
iface.Network = networkLink
iface.Subnetwork = subnetworkLink
if v, ok := d.GetOk(prefix + ".network_ip"); ok {
iface.NetworkIP = v.(string)
}
accessConfigsCount := d.Get(prefix + ".access_config.#").(int)
iface.AccessConfigs = make([]*compute.AccessConfig, accessConfigsCount)
for j := 0; j < accessConfigsCount; j++ {
@ -648,6 +656,9 @@ func flattenNetworkInterfaces(networkInterfaces []*compute.NetworkInterface) ([]
networkUrl := strings.Split(networkInterface.Network, "/")
networkInterfaceMap["network"] = networkUrl[len(networkUrl)-1]
}
if networkInterface.NetworkIP != "" {
networkInterfaceMap["network_ip"] = networkInterface.NetworkIP
}
if networkInterface.Subnetwork != "" {
subnetworkUrl := strings.Split(networkInterface.Subnetwork, "/")
networkInterfaceMap["subnetwork"] = subnetworkUrl[len(subnetworkUrl)-1]

View File

@ -27,7 +27,7 @@ func TestAccComputeInstanceTemplate_basic(t *testing.T) {
"google_compute_instance_template.foobar", &instanceTemplate),
testAccCheckComputeInstanceTemplateTag(&instanceTemplate, "foo"),
testAccCheckComputeInstanceTemplateMetadata(&instanceTemplate, "foo", "bar"),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true),
),
},
},
@ -54,6 +54,29 @@ func TestAccComputeInstanceTemplate_IP(t *testing.T) {
})
}
func TestAccComputeInstanceTemplate_networkIP(t *testing.T) {
var instanceTemplate compute.InstanceTemplate
networkIP := "10.128.0.2"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstanceTemplate_networkIP(networkIP),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceTemplateExists(
"google_compute_instance_template.foobar", &instanceTemplate),
testAccCheckComputeInstanceTemplateNetwork(&instanceTemplate),
testAccCheckComputeInstanceTemplateNetworkIP(
"google_compute_instance_template.foobar", networkIP, &instanceTemplate),
),
},
},
})
}
func TestAccComputeInstanceTemplate_disks(t *testing.T) {
var instanceTemplate compute.InstanceTemplate
@ -67,7 +90,7 @@ func TestAccComputeInstanceTemplate_disks(t *testing.T) {
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceTemplateExists(
"google_compute_instance_template.foobar", &instanceTemplate),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true),
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "terraform-test-foobar", false, false),
),
},
@ -335,6 +358,17 @@ func testAccCheckComputeInstanceTemplateStartupScript(instanceTemplate *compute.
}
}
func testAccCheckComputeInstanceTemplateNetworkIP(n, networkIP string, instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc {
return func(s *terraform.State) error {
ip := instanceTemplate.Properties.NetworkInterfaces[0].NetworkIP
err := resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", ip)(s)
if err != nil {
return err
}
return resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", networkIP)(s)
}
}
var testAccComputeInstanceTemplate_basic = fmt.Sprintf(`
resource "google_compute_instance_template" "foobar" {
name = "instancet-test-%s"
@ -392,6 +426,28 @@ resource "google_compute_instance_template" "foobar" {
}
}`, acctest.RandString(10), acctest.RandString(10))
func testAccComputeInstanceTemplate_networkIP(networkIP string) string {
return fmt.Sprintf(`
resource "google_compute_instance_template" "foobar" {
name = "instancet-test-%s"
machine_type = "n1-standard-1"
tags = ["foo", "bar"]
disk {
source_image = "debian-8-jessie-v20160803"
}
network_interface {
network = "default"
network_ip = "%s"
}
metadata {
foo = "bar"
}
}`, acctest.RandString(10), networkIP)
}
var testAccComputeInstanceTemplate_disks = fmt.Sprintf(`
resource "google_compute_disk" "foobar" {
name = "instancet-test-%s"

View File

@ -2,8 +2,10 @@ package google
import (
"fmt"
"os"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
@ -11,7 +13,16 @@ import (
// Add two key value pairs
func TestAccComputeProjectMetadata_basic(t *testing.T) {
skipIfEnvNotSet(t,
[]string{
"GOOGLE_ORG",
"GOOGLE_BILLING_ACCOUNT",
}...,
)
billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT")
var project compute.Project
projectID := "terrafom-test-" + acctest.RandString(10)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -19,13 +30,13 @@ func TestAccComputeProjectMetadata_basic(t *testing.T) {
CheckDestroy: testAccCheckComputeProjectMetadataDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeProject_basic0_metadata,
Config: testAccComputeProject_basic0_metadata(projectID, pname, org, billingId),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeProjectExists(
"google_compute_project_metadata.fizzbuzz", &project),
testAccCheckComputeProjectMetadataContains(&project, "banana", "orange"),
testAccCheckComputeProjectMetadataContains(&project, "sofa", "darwinism"),
testAccCheckComputeProjectMetadataSize(&project, 2),
"google_compute_project_metadata.fizzbuzz", projectID, &project),
testAccCheckComputeProjectMetadataContains(projectID, "banana", "orange"),
testAccCheckComputeProjectMetadataContains(projectID, "sofa", "darwinism"),
testAccCheckComputeProjectMetadataSize(projectID, 2),
),
},
},
@ -34,7 +45,16 @@ func TestAccComputeProjectMetadata_basic(t *testing.T) {
// Add three key value pairs, then replace one and modify a second
func TestAccComputeProjectMetadata_modify_1(t *testing.T) {
skipIfEnvNotSet(t,
[]string{
"GOOGLE_ORG",
"GOOGLE_BILLING_ACCOUNT",
}...,
)
billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT")
var project compute.Project
projectID := "terrafom-test-" + acctest.RandString(10)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -42,26 +62,26 @@ func TestAccComputeProjectMetadata_modify_1(t *testing.T) {
CheckDestroy: testAccCheckComputeProjectMetadataDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeProject_modify0_metadata,
Config: testAccComputeProject_modify0_metadata(projectID, pname, org, billingId),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeProjectExists(
"google_compute_project_metadata.fizzbuzz", &project),
testAccCheckComputeProjectMetadataContains(&project, "paper", "pen"),
testAccCheckComputeProjectMetadataContains(&project, "genghis_khan", "french bread"),
testAccCheckComputeProjectMetadataContains(&project, "happy", "smiling"),
testAccCheckComputeProjectMetadataSize(&project, 3),
"google_compute_project_metadata.fizzbuzz", projectID, &project),
testAccCheckComputeProjectMetadataContains(projectID, "paper", "pen"),
testAccCheckComputeProjectMetadataContains(projectID, "genghis_khan", "french bread"),
testAccCheckComputeProjectMetadataContains(projectID, "happy", "smiling"),
testAccCheckComputeProjectMetadataSize(projectID, 3),
),
},
resource.TestStep{
Config: testAccComputeProject_modify1_metadata,
Config: testAccComputeProject_modify1_metadata(projectID, pname, org, billingId),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeProjectExists(
"google_compute_project_metadata.fizzbuzz", &project),
testAccCheckComputeProjectMetadataContains(&project, "paper", "pen"),
testAccCheckComputeProjectMetadataContains(&project, "paris", "french bread"),
testAccCheckComputeProjectMetadataContains(&project, "happy", "laughing"),
testAccCheckComputeProjectMetadataSize(&project, 3),
"google_compute_project_metadata.fizzbuzz", projectID, &project),
testAccCheckComputeProjectMetadataContains(projectID, "paper", "pen"),
testAccCheckComputeProjectMetadataContains(projectID, "paris", "french bread"),
testAccCheckComputeProjectMetadataContains(projectID, "happy", "laughing"),
testAccCheckComputeProjectMetadataSize(projectID, 3),
),
},
},
@ -70,7 +90,16 @@ func TestAccComputeProjectMetadata_modify_1(t *testing.T) {
// Add two key value pairs, and replace both
func TestAccComputeProjectMetadata_modify_2(t *testing.T) {
skipIfEnvNotSet(t,
[]string{
"GOOGLE_ORG",
"GOOGLE_BILLING_ACCOUNT",
}...,
)
billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT")
var project compute.Project
projectID := "terraform-test-" + acctest.RandString(10)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -78,24 +107,24 @@ func TestAccComputeProjectMetadata_modify_2(t *testing.T) {
CheckDestroy: testAccCheckComputeProjectMetadataDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeProject_basic0_metadata,
Config: testAccComputeProject_basic0_metadata(projectID, pname, org, billingId),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeProjectExists(
"google_compute_project_metadata.fizzbuzz", &project),
testAccCheckComputeProjectMetadataContains(&project, "banana", "orange"),
testAccCheckComputeProjectMetadataContains(&project, "sofa", "darwinism"),
testAccCheckComputeProjectMetadataSize(&project, 2),
"google_compute_project_metadata.fizzbuzz", projectID, &project),
testAccCheckComputeProjectMetadataContains(projectID, "banana", "orange"),
testAccCheckComputeProjectMetadataContains(projectID, "sofa", "darwinism"),
testAccCheckComputeProjectMetadataSize(projectID, 2),
),
},
resource.TestStep{
Config: testAccComputeProject_basic1_metadata,
Config: testAccComputeProject_basic1_metadata(projectID, pname, org, billingId),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeProjectExists(
"google_compute_project_metadata.fizzbuzz", &project),
testAccCheckComputeProjectMetadataContains(&project, "kiwi", "papaya"),
testAccCheckComputeProjectMetadataContains(&project, "finches", "darwinism"),
testAccCheckComputeProjectMetadataSize(&project, 2),
"google_compute_project_metadata.fizzbuzz", projectID, &project),
testAccCheckComputeProjectMetadataContains(projectID, "kiwi", "papaya"),
testAccCheckComputeProjectMetadataContains(projectID, "finches", "darwinism"),
testAccCheckComputeProjectMetadataSize(projectID, 2),
),
},
},
@ -105,15 +134,21 @@ func TestAccComputeProjectMetadata_modify_2(t *testing.T) {
func testAccCheckComputeProjectMetadataDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
project, err := config.clientCompute.Projects.Get(config.Project).Do()
if err == nil && len(project.CommonInstanceMetadata.Items) > 0 {
return fmt.Errorf("Error, metadata items still exist")
for _, rs := range s.RootModule().Resources {
if rs.Type != "google_compute_project_metadata" {
continue
}
project, err := config.clientCompute.Projects.Get(rs.Primary.ID).Do()
if err == nil && len(project.CommonInstanceMetadata.Items) > 0 {
return fmt.Errorf("Error, metadata items still exist in %s", rs.Primary.ID)
}
}
return nil
}
func testAccCheckComputeProjectExists(n string, project *compute.Project) resource.TestCheckFunc {
func testAccCheckComputeProjectExists(n, projectID string, project *compute.Project) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
@ -126,8 +161,7 @@ func testAccCheckComputeProjectExists(n string, project *compute.Project) resour
config := testAccProvider.Meta().(*Config)
found, err := config.clientCompute.Projects.Get(
config.Project).Do()
found, err := config.clientCompute.Projects.Get(projectID).Do()
if err != nil {
return err
}
@ -142,10 +176,10 @@ func testAccCheckComputeProjectExists(n string, project *compute.Project) resour
}
}
func testAccCheckComputeProjectMetadataContains(project *compute.Project, key string, value string) resource.TestCheckFunc {
func testAccCheckComputeProjectMetadataContains(projectID, key, value string) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
project, err := config.clientCompute.Projects.Get(config.Project).Do()
project, err := config.clientCompute.Projects.Get(projectID).Do()
if err != nil {
return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err)
}
@ -161,14 +195,14 @@ func testAccCheckComputeProjectMetadataContains(project *compute.Project, key st
}
}
return fmt.Errorf("Error, key %s not present", key)
return fmt.Errorf("Error, key %s not present in %s", key, project.SelfLink)
}
}
func testAccCheckComputeProjectMetadataSize(project *compute.Project, size int) resource.TestCheckFunc {
func testAccCheckComputeProjectMetadataSize(projectID string, size int) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
project, err := config.clientCompute.Projects.Get(config.Project).Do()
project, err := config.clientCompute.Projects.Get(projectID).Do()
if err != nil {
return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err)
}
@ -182,36 +216,100 @@ func testAccCheckComputeProjectMetadataSize(project *compute.Project, size int)
}
}
const testAccComputeProject_basic0_metadata = `
resource "google_compute_project_metadata" "fizzbuzz" {
metadata {
banana = "orange"
sofa = "darwinism"
}
}`
func testAccComputeProject_basic0_metadata(projectID, name, org, billing string) string {
return fmt.Sprintf(`
resource "google_project" "project" {
project_id = "%s"
name = "%s"
org_id = "%s"
billing_account = "%s"
}
const testAccComputeProject_basic1_metadata = `
resource "google_compute_project_metadata" "fizzbuzz" {
metadata {
kiwi = "papaya"
finches = "darwinism"
}
}`
resource "google_project_services" "services" {
project = "${google_project.project.project_id}"
services = ["compute-component.googleapis.com"]
}
const testAccComputeProject_modify0_metadata = `
resource "google_compute_project_metadata" "fizzbuzz" {
metadata {
paper = "pen"
genghis_khan = "french bread"
happy = "smiling"
}
}`
project = "${google_project.project.project_id}"
metadata {
banana = "orange"
sofa = "darwinism"
}
depends_on = ["google_project_services.services"]
}`, projectID, name, org, billing)
}
func testAccComputeProject_basic1_metadata(projectID, name, org, billing string) string {
return fmt.Sprintf(`
resource "google_project" "project" {
project_id = "%s"
name = "%s"
org_id = "%s"
billing_account = "%s"
}
resource "google_project_services" "services" {
project = "${google_project.project.project_id}"
services = ["compute-component.googleapis.com"]
}
const testAccComputeProject_modify1_metadata = `
resource "google_compute_project_metadata" "fizzbuzz" {
metadata {
paper = "pen"
paris = "french bread"
happy = "laughing"
}
}`
project = "${google_project.project.project_id}"
metadata {
kiwi = "papaya"
finches = "darwinism"
}
depends_on = ["google_project_services.services"]
}`, projectID, name, org, billing)
}
func testAccComputeProject_modify0_metadata(projectID, name, org, billing string) string {
return fmt.Sprintf(`
resource "google_project" "project" {
project_id = "%s"
name = "%s"
org_id = "%s"
billing_account = "%s"
}
resource "google_project_services" "services" {
project = "${google_project.project.project_id}"
services = ["compute-component.googleapis.com"]
}
resource "google_compute_project_metadata" "fizzbuzz" {
project = "${google_project.project.project_id}"
metadata {
paper = "pen"
genghis_khan = "french bread"
happy = "smiling"
}
depends_on = ["google_project_services.services"]
}`, projectID, name, org, billing)
}
func testAccComputeProject_modify1_metadata(projectID, name, org, billing string) string {
return fmt.Sprintf(`
resource "google_project" "project" {
project_id = "%s"
name = "%s"
org_id = "%s"
billing_account = "%s"
}
resource "google_project_services" "services" {
project = "${google_project.project.project_id}"
services = ["compute-component.googleapis.com"]
}
resource "google_compute_project_metadata" "fizzbuzz" {
project = "${google_project.project.project_id}"
metadata {
paper = "pen"
paris = "french bread"
happy = "laughing"
}
depends_on = ["google_project_services.services"]
}`, projectID, name, org, billing)
}

View File

@ -0,0 +1,210 @@
package google
import (
"fmt"
"log"
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
)
func resourceComputeSnapshot() *schema.Resource {
return &schema.Resource{
Create: resourceComputeSnapshotCreate,
Read: resourceComputeSnapshotRead,
Delete: resourceComputeSnapshotDelete,
Exists: resourceComputeSnapshotExists,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"zone": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"snapshot_encryption_key_raw": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Sensitive: true,
},
"snapshot_encryption_key_sha256": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"source_disk_encryption_key_raw": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Sensitive: true,
},
"source_disk_encryption_key_sha256": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"source_disk": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"source_disk_link": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"project": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"self_link": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceComputeSnapshotCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
// Build the snapshot parameter
snapshot := &compute.Snapshot{
Name: d.Get("name").(string),
}
source_disk := d.Get("source_disk").(string)
if v, ok := d.GetOk("snapshot_encryption_key_raw"); ok {
snapshot.SnapshotEncryptionKey = &compute.CustomerEncryptionKey{}
snapshot.SnapshotEncryptionKey.RawKey = v.(string)
}
if v, ok := d.GetOk("source_disk_encryption_key_raw"); ok {
snapshot.SourceDiskEncryptionKey = &compute.CustomerEncryptionKey{}
snapshot.SourceDiskEncryptionKey.RawKey = v.(string)
}
op, err := config.clientCompute.Disks.CreateSnapshot(
project, d.Get("zone").(string), source_disk, snapshot).Do()
if err != nil {
return fmt.Errorf("Error creating snapshot: %s", err)
}
// It probably maybe worked, so store the ID now
d.SetId(snapshot.Name)
err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating Snapshot")
if err != nil {
return err
}
return resourceComputeSnapshotRead(d, meta)
}
func resourceComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
snapshot, err := config.clientCompute.Snapshots.Get(
project, d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
log.Printf("[WARN] Removing Snapshot %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
d.SetId("")
return nil
}
return fmt.Errorf("Error reading snapshot: %s", err)
}
d.Set("self_link", snapshot.SelfLink)
d.Set("source_disk_link", snapshot.SourceDisk)
d.Set("name", snapshot.Name)
if snapshot.SnapshotEncryptionKey != nil && snapshot.SnapshotEncryptionKey.Sha256 != "" {
d.Set("snapshot_encryption_key_sha256", snapshot.SnapshotEncryptionKey.Sha256)
}
if snapshot.SourceDiskEncryptionKey != nil && snapshot.SourceDiskEncryptionKey.Sha256 != "" {
d.Set("source_disk_encryption_key_sha256", snapshot.SourceDiskEncryptionKey.Sha256)
}
return nil
}
func resourceComputeSnapshotDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
// Delete the snapshot
op, err := config.clientCompute.Snapshots.Delete(
project, d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
log.Printf("[WARN] Removing Snapshot %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
d.SetId("")
return nil
}
return fmt.Errorf("Error deleting snapshot: %s", err)
}
err = computeOperationWaitGlobal(config, op, project, "Deleting Snapshot")
if err != nil {
return err
}
d.SetId("")
return nil
}
func resourceComputeSnapshotExists(d *schema.ResourceData, meta interface{}) (bool, error) {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return false, err
}
_, err = config.clientCompute.Snapshots.Get(
project, d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
log.Printf("[WARN] Removing Snapshot %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
d.SetId("")
return false, err
}
return true, err
}
return true, nil
}

View File

@ -0,0 +1,183 @@
package google
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
)
func TestAccComputeSnapshot_basic(t *testing.T) {
snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
var snapshot compute.Snapshot
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeSnapshotDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeSnapshot_basic(snapshotName, diskName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeSnapshotExists(
"google_compute_snapshot.foobar", &snapshot),
),
},
},
})
}
func TestAccComputeSnapshot_encryption(t *testing.T) {
snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
var snapshot compute.Snapshot
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeSnapshotDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeSnapshot_encryption(snapshotName, diskName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeSnapshotExists(
"google_compute_snapshot.foobar", &snapshot),
),
},
},
})
}
func testAccCheckComputeSnapshotDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
for _, rs := range s.RootModule().Resources {
if rs.Type != "google_compute_snapshot" {
continue
}
_, err := config.clientCompute.Snapshots.Get(
config.Project, rs.Primary.ID).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
return nil
} else if ok {
return fmt.Errorf("Error while requesting Google Cloud Plateform: http code error : %d, http message error: %s", gerr.Code, gerr.Message)
}
return fmt.Errorf("Error while requesting Google Cloud Plateform")
}
return fmt.Errorf("Snapshot still exists")
}
return nil
}
func testAccCheckComputeSnapshotExists(n string, snapshot *compute.Snapshot) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
config := testAccProvider.Meta().(*Config)
found, err := config.clientCompute.Snapshots.Get(
config.Project, rs.Primary.ID).Do()
if err != nil {
return err
}
if found.Name != rs.Primary.ID {
return fmt.Errorf("Snapshot %s not found", n)
}
attr := rs.Primary.Attributes["snapshot_encryption_key_sha256"]
if found.SnapshotEncryptionKey != nil && found.SnapshotEncryptionKey.Sha256 != attr {
return fmt.Errorf("Snapshot %s has mismatched encryption key (Sha256).\nTF State: %+v.\nGCP State: %+v",
n, attr, found.SnapshotEncryptionKey.Sha256)
} else if found.SnapshotEncryptionKey == nil && attr != "" {
return fmt.Errorf("Snapshot %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v",
n, attr, found.SnapshotEncryptionKey)
}
attr = rs.Primary.Attributes["source_disk_encryption_key_sha256"]
if found.SourceDiskEncryptionKey != nil && found.SourceDiskEncryptionKey.Sha256 != attr {
return fmt.Errorf("Snapshot %s has mismatched source disk encryption key (Sha256).\nTF State: %+v.\nGCP State: %+v",
n, attr, found.SourceDiskEncryptionKey.Sha256)
} else if found.SourceDiskEncryptionKey == nil && attr != "" {
return fmt.Errorf("Snapshot %s has mismatched source disk encryption key.\nTF State: %+v.\nGCP State: %+v",
n, attr, found.SourceDiskEncryptionKey)
}
attr = rs.Primary.Attributes["source_disk_link"]
if found.SourceDisk != attr {
return fmt.Errorf("Snapshot %s has mismatched source disk link.\nTF State: %+v.\nGCP State: %+v",
n, attr, found.SourceDisk)
}
foundDisk, errDisk := config.clientCompute.Disks.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["source_disk"]).Do()
if errDisk != nil {
return errDisk
}
if foundDisk.SelfLink != attr {
return fmt.Errorf("Snapshot %s has mismatched source disk\nTF State: %+v.\nGCP State: %+v",
n, attr, foundDisk.SelfLink)
}
attr = rs.Primary.Attributes["self_link"]
if found.SelfLink != attr {
return fmt.Errorf("Snapshot %s has mismatched self link.\nTF State: %+v.\nGCP State: %+v",
n, attr, found.SelfLink)
}
*snapshot = *found
return nil
}
}
func testAccComputeSnapshot_basic(snapshotName string, diskName string) string {
return fmt.Sprintf(`
resource "google_compute_disk" "foobar" {
name = "%s"
image = "debian-8-jessie-v20160921"
size = 10
type = "pd-ssd"
zone = "us-central1-a"
}
resource "google_compute_snapshot" "foobar" {
name = "%s"
source_disk = "${google_compute_disk.foobar.name}"
zone = "us-central1-a"
}`, diskName, snapshotName)
}
func testAccComputeSnapshot_encryption(snapshotName string, diskName string) string {
return fmt.Sprintf(`
resource "google_compute_disk" "foobar" {
name = "%s"
image = "debian-8-jessie-v20160921"
size = 10
type = "pd-ssd"
zone = "us-central1-a"
disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
}
resource "google_compute_snapshot" "foobar" {
name = "%s"
source_disk = "${google_compute_disk.foobar.name}"
zone = "us-central1-a"
source_disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
snapshot_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
}`, diskName, snapshotName)
}

View File

@ -403,7 +403,7 @@ var testAccContainerCluster_withVersion = fmt.Sprintf(`
resource "google_container_cluster" "with_version" {
name = "cluster-test-%s"
zone = "us-central1-a"
node_version = "1.6.0"
node_version = "1.6.1"
initial_node_count = 1
master_auth {

View File

@ -14,7 +14,9 @@ func resourceDnsManagedZone() *schema.Resource {
Create: resourceDnsManagedZoneCreate,
Read: resourceDnsManagedZoneRead,
Delete: resourceDnsManagedZoneDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"dns_name": &schema.Schema{
Type: schema.TypeString,
@ -109,6 +111,9 @@ func resourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) error
}
d.Set("name_servers", zone.NameServers)
d.Set("name", zone.Name)
d.Set("dns_name", zone.DnsName)
d.Set("description", zone.Description)
return nil
}

View File

@ -31,6 +31,14 @@ func resourceGoogleProjectServices() *schema.Resource {
}
}
// These services can only be enabled as a side-effect of enabling other services,
// so don't bother storing them in the config or using them for diffing.
var ignore = map[string]struct{}{
"containeranalysis.googleapis.com": struct{}{},
"dataproc-control.googleapis.com": struct{}{},
"source.googleapis.com": struct{}{},
}
func resourceGoogleProjectServicesCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
pid := d.Get("project").(string)
@ -155,12 +163,19 @@ func getConfigServices(d *schema.ResourceData) (services []string) {
func getApiServices(pid string, config *Config) ([]string, error) {
apiServices := make([]string, 0)
// Get services from the API
svcResp, err := config.clientServiceMan.Services.List().ConsumerId("project:" + pid).Do()
if err != nil {
return apiServices, err
}
for _, v := range svcResp.Services {
apiServices = append(apiServices, v.ServiceName)
token := ""
for paginate := true; paginate; {
svcResp, err := config.clientServiceMan.Services.List().ConsumerId("project:" + pid).PageToken(token).Do()
if err != nil {
return apiServices, err
}
for _, v := range svcResp.Services {
if _, ok := ignore[v.ServiceName]; !ok {
apiServices = append(apiServices, v.ServiceName)
}
}
token = svcResp.NextPageToken
paginate = token != ""
}
return apiServices, nil
}

View File

@ -4,6 +4,7 @@ import (
"bytes"
"fmt"
"log"
"os"
"reflect"
"sort"
"testing"
@ -123,6 +124,103 @@ func TestAccGoogleProjectServices_authoritative2(t *testing.T) {
})
}
// Test that services that can't be enabled on their own (such as dataproc-control.googleapis.com)
// don't end up causing diffs when they are enabled as a side-effect of a different service's
// enablement.
func TestAccGoogleProjectServices_ignoreUnenablableServices(t *testing.T) {
skipIfEnvNotSet(t,
[]string{
"GOOGLE_ORG",
"GOOGLE_BILLING_ACCOUNT",
}...,
)
billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT")
pid := "terraform-" + acctest.RandString(10)
services := []string{
"dataproc.googleapis.com",
// The following services are enabled as a side-effect of dataproc's enablement
"storage-component.googleapis.com",
"deploymentmanager.googleapis.com",
"replicapool.googleapis.com",
"replicapoolupdater.googleapis.com",
"resourceviews.googleapis.com",
"compute-component.googleapis.com",
"container.googleapis.com",
"containerregistry.googleapis.com",
"storage-api.googleapis.com",
"pubsub.googleapis.com",
}
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccGoogleProjectAssociateServicesBasic_withBilling(services, pid, pname, org, billingId),
Check: resource.ComposeTestCheckFunc(
testProjectServicesMatch(services, pid),
),
},
},
})
}
func TestAccGoogleProjectServices_manyServices(t *testing.T) {
skipIfEnvNotSet(t,
[]string{
"GOOGLE_ORG",
"GOOGLE_BILLING_ACCOUNT",
}...,
)
billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT")
pid := "terraform-" + acctest.RandString(10)
services := []string{
"bigquery-json.googleapis.com",
"cloudbuild.googleapis.com",
"cloudfunctions.googleapis.com",
"cloudresourcemanager.googleapis.com",
"cloudtrace.googleapis.com",
"compute-component.googleapis.com",
"container.googleapis.com",
"containerregistry.googleapis.com",
"dataflow.googleapis.com",
"dataproc.googleapis.com",
"deploymentmanager.googleapis.com",
"dns.googleapis.com",
"endpoints.googleapis.com",
"iam.googleapis.com",
"logging.googleapis.com",
"ml.googleapis.com",
"monitoring.googleapis.com",
"pubsub.googleapis.com",
"replicapool.googleapis.com",
"replicapoolupdater.googleapis.com",
"resourceviews.googleapis.com",
"runtimeconfig.googleapis.com",
"servicecontrol.googleapis.com",
"servicemanagement.googleapis.com",
"sourcerepo.googleapis.com",
"spanner.googleapis.com",
"storage-api.googleapis.com",
"storage-component.googleapis.com",
}
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccGoogleProjectAssociateServicesBasic_withBilling(services, pid, pname, org, billingId),
Check: resource.ComposeTestCheckFunc(
testProjectServicesMatch(services, pid),
),
},
},
})
}
func testAccGoogleProjectAssociateServicesBasic(services []string, pid, name, org string) string {
return fmt.Sprintf(`
resource "google_project" "acceptance" {
@ -137,6 +235,21 @@ resource "google_project_services" "acceptance" {
`, pid, name, org, testStringsToString(services))
}
func testAccGoogleProjectAssociateServicesBasic_withBilling(services []string, pid, name, org, billing string) string {
return fmt.Sprintf(`
resource "google_project" "acceptance" {
project_id = "%s"
name = "%s"
org_id = "%s"
billing_account = "%s"
}
resource "google_project_services" "acceptance" {
project = "${google_project.acceptance.project_id}"
services = [%s]
}
`, pid, name, org, billing, testStringsToString(services))
}
func testProjectServicesMatch(services []string, pid string) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)

View File

@ -1,7 +1,9 @@
package heroku
import (
"fmt"
"log"
"strings"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
@ -25,12 +27,15 @@ func Provider() terraform.ResourceProvider {
},
ResourcesMap: map[string]*schema.Resource{
"heroku_addon": resourceHerokuAddon(),
"heroku_app": resourceHerokuApp(),
"heroku_cert": resourceHerokuCert(),
"heroku_domain": resourceHerokuDomain(),
"heroku_drain": resourceHerokuDrain(),
"heroku_space": resourceHerokuSpace(),
"heroku_addon": resourceHerokuAddon(),
"heroku_app": resourceHerokuApp(),
"heroku_app_feature": resourceHerokuAppFeature(),
"heroku_cert": resourceHerokuCert(),
"heroku_domain": resourceHerokuDomain(),
"heroku_drain": resourceHerokuDrain(),
"heroku_pipeline": resourceHerokuPipeline(),
"heroku_pipeline_coupling": resourceHerokuPipelineCoupling(),
"heroku_space": resourceHerokuSpace(),
},
ConfigureFunc: providerConfigure,
@ -46,3 +51,12 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
log.Println("[INFO] Initializing Heroku client")
return config.Client()
}
func buildCompositeID(a, b string) string {
return fmt.Sprintf("%s:%s", a, b)
}
func parseCompositeID(id string) (string, string) {
parts := strings.SplitN(id, ":", 2)
return parts[0], parts[1]
}

View File

@ -232,15 +232,8 @@ func resourceHerokuAppCreate(d *schema.ResourceData, meta interface{}) error {
d.SetId(a.Name)
log.Printf("[INFO] App ID: %s", d.Id())
if v, ok := d.GetOk("config_vars"); ok {
err = updateConfigVars(d.Id(), client, nil, v.([]interface{}))
if err != nil {
return err
}
}
if v, ok := d.GetOk("buildpacks"); ok {
err = updateBuildpacks(d.Id(), client, v.([]interface{}))
if err := performAppPostCreateTasks(d, client); err != nil {
return err
}
return resourceHerokuAppRead(d, meta)
@ -305,11 +298,8 @@ func resourceHerokuOrgAppCreate(d *schema.ResourceData, meta interface{}) error
d.SetId(a.Name)
log.Printf("[INFO] App ID: %s", d.Id())
if v, ok := d.GetOk("config_vars"); ok {
err = updateConfigVars(d.Id(), client, nil, v.([]interface{}))
if err != nil {
return err
}
if err := performAppPostCreateTasks(d, client); err != nil {
return err
}
return resourceHerokuAppRead(d, meta)
@ -534,3 +524,20 @@ func updateBuildpacks(id string, client *heroku.Service, v []interface{}) error
return nil
}
// performAppPostCreateTasks performs post-create tasks common to both org and non-org apps.
func performAppPostCreateTasks(d *schema.ResourceData, client *heroku.Service) error {
if v, ok := d.GetOk("config_vars"); ok {
if err := updateConfigVars(d.Id(), client, nil, v.([]interface{})); err != nil {
return err
}
}
if v, ok := d.GetOk("buildpacks"); ok {
if err := updateBuildpacks(d.Id(), client, v.([]interface{})); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,101 @@
package heroku
import (
"context"
"log"
heroku "github.com/cyberdelia/heroku-go/v3"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceHerokuAppFeature() *schema.Resource {
return &schema.Resource{
Create: resourceHerokuAppFeatureCreate,
Update: resourceHerokuAppFeatureUpdate,
Read: resourceHerokuAppFeatureRead,
Delete: resourceHerokuAppFeatureDelete,
Schema: map[string]*schema.Schema{
"app": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Default: true,
},
},
}
}
func resourceHerokuAppFeatureRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*heroku.Service)
app, id := parseCompositeID(d.Id())
feature, err := client.AppFeatureInfo(context.TODO(), app, id)
if err != nil {
return err
}
d.Set("app", app)
d.Set("name", feature.Name)
d.Set("enabled", feature.Enabled)
return nil
}
func resourceHerokuAppFeatureCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*heroku.Service)
app := d.Get("app").(string)
featureName := d.Get("name").(string)
enabled := d.Get("enabled").(bool)
opts := heroku.AppFeatureUpdateOpts{Enabled: enabled}
log.Printf("[DEBUG] Feature set configuration: %#v, %#v", featureName, opts)
feature, err := client.AppFeatureUpdate(context.TODO(), app, featureName, opts)
if err != nil {
return err
}
d.SetId(buildCompositeID(app, feature.ID))
return resourceHerokuAppFeatureRead(d, meta)
}
func resourceHerokuAppFeatureUpdate(d *schema.ResourceData, meta interface{}) error {
if d.HasChange("enabled") {
return resourceHerokuAppFeatureCreate(d, meta)
}
return resourceHerokuAppFeatureRead(d, meta)
}
func resourceHerokuAppFeatureDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*heroku.Service)
app, id := parseCompositeID(d.Id())
featureName := d.Get("name").(string)
log.Printf("[INFO] Deleting app feature %s (%s) for app %s", featureName, id, app)
opts := heroku.AppFeatureUpdateOpts{Enabled: false}
_, err := client.AppFeatureUpdate(context.TODO(), app, id, opts)
if err != nil {
return err
}
d.SetId("")
return nil
}

View File

@ -0,0 +1,135 @@
package heroku
import (
"context"
"fmt"
"testing"
heroku "github.com/cyberdelia/heroku-go/v3"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccHerokuAppFeature(t *testing.T) {
var feature heroku.AppFeatureInfoResult
appName := fmt.Sprintf("tftest-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckHerokuFeatureDestroy,
Steps: []resource.TestStep{
{
Config: testAccCheckHerokuFeature_basic(appName),
Check: resource.ComposeTestCheckFunc(
testAccCheckHerokuFeatureExists("heroku_app_feature.runtime_metrics", &feature),
testAccCheckHerokuFeatureEnabled(&feature, true),
resource.TestCheckResourceAttr(
"heroku_app_feature.runtime_metrics", "enabled", "true",
),
),
},
{
Config: testAccCheckHerokuFeature_disabled(appName),
Check: resource.ComposeTestCheckFunc(
testAccCheckHerokuFeatureExists("heroku_app_feature.runtime_metrics", &feature),
testAccCheckHerokuFeatureEnabled(&feature, false),
resource.TestCheckResourceAttr(
"heroku_app_feature.runtime_metrics", "enabled", "false",
),
),
},
},
})
}
func testAccCheckHerokuFeatureDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*heroku.Service)
for _, rs := range s.RootModule().Resources {
if rs.Type != "heroku_app_feature" {
continue
}
_, err := client.AppFeatureInfo(context.TODO(), rs.Primary.Attributes["app"], rs.Primary.ID)
if err == nil {
return fmt.Errorf("Feature still exists")
}
}
return nil
}
func testAccCheckHerokuFeatureExists(n string, feature *heroku.AppFeatureInfoResult) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No feature ID is set")
}
app, id := parseCompositeID(rs.Primary.ID)
if app != rs.Primary.Attributes["app"] {
return fmt.Errorf("Bad app: %s", app)
}
client := testAccProvider.Meta().(*heroku.Service)
foundFeature, err := client.AppFeatureInfo(context.TODO(), app, id)
if err != nil {
return err
}
if foundFeature.ID != id {
return fmt.Errorf("Feature not found")
}
*feature = *foundFeature
return nil
}
}
func testAccCheckHerokuFeatureEnabled(feature *heroku.AppFeatureInfoResult, enabled bool) resource.TestCheckFunc {
return func(s *terraform.State) error {
if feature.Enabled != enabled {
return fmt.Errorf("Bad enabled: %v", feature.Enabled)
}
return nil
}
}
func testAccCheckHerokuFeature_basic(appName string) string {
return fmt.Sprintf(`
resource "heroku_app" "example" {
name = "%s"
region = "us"
}
resource "heroku_app_feature" "runtime_metrics" {
app = "${heroku_app.example.name}"
name = "log-runtime-metrics"
}
`, appName)
}
func testAccCheckHerokuFeature_disabled(appName string) string {
return fmt.Sprintf(`
resource "heroku_app" "example" {
name = "%s"
region = "us"
}
resource "heroku_app_feature" "runtime_metrics" {
app = "${heroku_app.example.name}"
name = "log-runtime-metrics"
enabled = false
}
`, appName)
}

View File

@ -0,0 +1,92 @@
package heroku
import (
"context"
"fmt"
"log"
"github.com/cyberdelia/heroku-go/v3"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceHerokuPipeline() *schema.Resource {
return &schema.Resource{
Create: resourceHerokuPipelineCreate,
Update: resourceHerokuPipelineUpdate,
Read: resourceHerokuPipelineRead,
Delete: resourceHerokuPipelineDelete,
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
},
},
}
}
func resourceHerokuPipelineCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*heroku.Service)
opts := heroku.PipelineCreateOpts{
Name: d.Get("name").(string),
}
log.Printf("[DEBUG] Pipeline create configuration: %#v", opts)
p, err := client.PipelineCreate(context.TODO(), opts)
if err != nil {
return fmt.Errorf("Error creating pipeline: %s", err)
}
d.SetId(p.ID)
d.Set("name", p.Name)
log.Printf("[INFO] Pipeline ID: %s", d.Id())
return resourceHerokuPipelineUpdate(d, meta)
}
func resourceHerokuPipelineUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*heroku.Service)
if d.HasChange("name") {
name := d.Get("name").(string)
opts := heroku.PipelineUpdateOpts{
Name: &name,
}
_, err := client.PipelineUpdate(context.TODO(), d.Id(), opts)
if err != nil {
return err
}
}
return resourceHerokuPipelineRead(d, meta)
}
func resourceHerokuPipelineDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*heroku.Service)
log.Printf("[INFO] Deleting pipeline: %s", d.Id())
_, err := client.PipelineDelete(context.TODO(), d.Id())
if err != nil {
return fmt.Errorf("Error deleting pipeline: %s", err)
}
return nil
}
func resourceHerokuPipelineRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*heroku.Service)
p, err := client.PipelineInfo(context.TODO(), d.Id())
if err != nil {
return fmt.Errorf("Error retrieving pipeline: %s", err)
}
d.Set("name", p.Name)
return nil
}

View File

@ -0,0 +1,89 @@
package heroku
import (
"context"
"fmt"
"log"
"github.com/cyberdelia/heroku-go/v3"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceHerokuPipelineCoupling() *schema.Resource {
return &schema.Resource{
Create: resourceHerokuPipelineCouplingCreate,
Read: resourceHerokuPipelineCouplingRead,
Delete: resourceHerokuPipelineCouplingDelete,
Schema: map[string]*schema.Schema{
"app": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"pipeline": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateUUID,
},
"stage": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validatePipelineStageName,
},
},
}
}
func resourceHerokuPipelineCouplingCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*heroku.Service)
opts := heroku.PipelineCouplingCreateOpts{
App: d.Get("app").(string),
Pipeline: d.Get("pipeline").(string),
Stage: d.Get("stage").(string),
}
log.Printf("[DEBUG] PipelineCoupling create configuration: %#v", opts)
p, err := client.PipelineCouplingCreate(context.TODO(), opts)
if err != nil {
return fmt.Errorf("Error creating pipeline: %s", err)
}
d.SetId(p.ID)
log.Printf("[INFO] PipelineCoupling ID: %s", d.Id())
return resourceHerokuPipelineCouplingRead(d, meta)
}
func resourceHerokuPipelineCouplingDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*heroku.Service)
log.Printf("[INFO] Deleting pipeline: %s", d.Id())
_, err := client.PipelineCouplingDelete(context.TODO(), d.Id())
if err != nil {
return fmt.Errorf("Error deleting pipeline: %s", err)
}
return nil
}
func resourceHerokuPipelineCouplingRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*heroku.Service)
p, err := client.PipelineCouplingInfo(context.TODO(), d.Id())
if err != nil {
return fmt.Errorf("Error retrieving pipeline: %s", err)
}
d.Set("app", p.App)
d.Set("pipeline", p.Pipeline)
d.Set("stage", p.Stage)
return nil
}

View File

@ -0,0 +1,123 @@
package heroku
import (
"context"
"fmt"
"testing"
heroku "github.com/cyberdelia/heroku-go/v3"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccHerokuPipelineCoupling_Basic(t *testing.T) {
var coupling heroku.PipelineCouplingInfoResult
appName := fmt.Sprintf("tftest-%s", acctest.RandString(10))
pipelineName := fmt.Sprintf("tftest-%s", acctest.RandString(10))
stageName := "development"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckHerokuPipelineCouplingDestroy,
Steps: []resource.TestStep{
{
Config: testAccCheckHerokuPipelineCouplingConfig_basic(appName, pipelineName, stageName),
Check: resource.ComposeTestCheckFunc(
testAccCheckHerokuPipelineCouplingExists("heroku_pipeline_coupling.default", &coupling),
testAccCheckHerokuPipelineCouplingAttributes(
&coupling,
"heroku_pipeline.default",
stageName,
),
),
},
},
})
}
func testAccCheckHerokuPipelineCouplingConfig_basic(appName, pipelineName, stageName string) string {
return fmt.Sprintf(`
resource "heroku_app" "default" {
name = "%s"
region = "us"
}
resource "heroku_pipeline" "default" {
name = "%s"
}
resource "heroku_pipeline_coupling" "default" {
app = "${heroku_app.default.id}"
pipeline = "${heroku_pipeline.default.id}"
stage = "%s"
}
`, appName, pipelineName, stageName)
}
func testAccCheckHerokuPipelineCouplingExists(n string, pipeline *heroku.PipelineCouplingInfoResult) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No coupling ID set")
}
client := testAccProvider.Meta().(*heroku.Service)
foundPipelineCoupling, err := client.PipelineCouplingInfo(context.TODO(), rs.Primary.ID)
if err != nil {
return err
}
if foundPipelineCoupling.ID != rs.Primary.ID {
return fmt.Errorf("PipelineCoupling not found: %s != %s", foundPipelineCoupling.ID, rs.Primary.ID)
}
*pipeline = *foundPipelineCoupling
return nil
}
}
func testAccCheckHerokuPipelineCouplingAttributes(coupling *heroku.PipelineCouplingInfoResult, pipelineResource, stageName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
pipeline, ok := s.RootModule().Resources[pipelineResource]
if !ok {
return fmt.Errorf("Pipeline not found: %s", pipelineResource)
}
if coupling.Pipeline.ID != pipeline.Primary.ID {
return fmt.Errorf("Bad pipeline ID: %v != %v", coupling.Pipeline.ID, pipeline.Primary.ID)
}
if coupling.Stage != stageName {
return fmt.Errorf("Bad stage: %s", coupling.Stage)
}
return nil
}
}
func testAccCheckHerokuPipelineCouplingDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*heroku.Service)
for _, rs := range s.RootModule().Resources {
if rs.Type != "heroku_pipeline_coupling" {
continue
}
_, err := client.PipelineCouplingInfo(context.TODO(), rs.Primary.ID)
if err == nil {
return fmt.Errorf("PipelineCoupling still exists")
}
}
return nil
}

View File

@ -0,0 +1,96 @@
package heroku
import (
"context"
"fmt"
"testing"
heroku "github.com/cyberdelia/heroku-go/v3"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccHerokuPipeline_Basic(t *testing.T) {
var pipeline heroku.PipelineInfoResult
pipelineName := fmt.Sprintf("tftest-%s", acctest.RandString(10))
pipelineName2 := fmt.Sprintf("%s-2", pipelineName)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckHerokuPipelineDestroy,
Steps: []resource.TestStep{
{
Config: testAccCheckHerokuPipelineConfig_basic(pipelineName),
Check: resource.ComposeTestCheckFunc(
testAccCheckHerokuPipelineExists("heroku_pipeline.foobar", &pipeline),
resource.TestCheckResourceAttr(
"heroku_pipeline.foobar", "name", pipelineName),
),
},
{
Config: testAccCheckHerokuPipelineConfig_basic(pipelineName2),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(
"heroku_pipeline.foobar", "name", pipelineName2),
),
},
},
})
}
func testAccCheckHerokuPipelineConfig_basic(pipelineName string) string {
return fmt.Sprintf(`
resource "heroku_pipeline" "foobar" {
name = "%s"
}
`, pipelineName)
}
func testAccCheckHerokuPipelineExists(n string, pipeline *heroku.PipelineInfoResult) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No pipeline name set")
}
client := testAccProvider.Meta().(*heroku.Service)
foundPipeline, err := client.PipelineInfo(context.TODO(), rs.Primary.ID)
if err != nil {
return err
}
if foundPipeline.ID != rs.Primary.ID {
return fmt.Errorf("Pipeline not found")
}
*pipeline = *foundPipeline
return nil
}
}
func testAccCheckHerokuPipelineDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*heroku.Service)
for _, rs := range s.RootModule().Resources {
if rs.Type != "heroku_pipeline" {
continue
}
_, err := client.PipelineInfo(context.TODO(), rs.Primary.ID)
if err == nil {
return fmt.Errorf("Pipeline still exists")
}
}
return nil
}

View File

@ -2,9 +2,12 @@ package heroku
import (
"context"
"fmt"
"log"
"time"
heroku "github.com/cyberdelia/heroku-go/v3"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
@ -56,23 +59,32 @@ func resourceHerokuSpaceCreate(d *schema.ResourceData, meta interface{}) error {
d.SetId(space.ID)
log.Printf("[INFO] Space ID: %s", d.Id())
// The type conversion here can be dropped when the vendored version of
// heroku-go is updated.
setSpaceAttributes(d, (*heroku.Space)(space))
return nil
// Wait for the Space to be allocated
log.Printf("[DEBUG] Waiting for Space (%s) to be allocated", d.Id())
stateConf := &resource.StateChangeConf{
Pending: []string{"allocating"},
Target: []string{"allocated"},
Refresh: SpaceStateRefreshFunc(client, d.Id()),
Timeout: 20 * time.Minute,
}
if _, err := stateConf.WaitForState(); err != nil {
return fmt.Errorf("Error waiting for Space (%s) to become available: %s", d.Id(), err)
}
return resourceHerokuSpaceRead(d, meta)
}
func resourceHerokuSpaceRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*heroku.Service)
space, err := client.SpaceInfo(context.TODO(), d.Id())
spaceRaw, _, err := SpaceStateRefreshFunc(client, d.Id())()
if err != nil {
return err
}
space := spaceRaw.(*heroku.Space)
// The type conversion here can be dropped when the vendored version of
// heroku-go is updated.
setSpaceAttributes(d, (*heroku.Space)(space))
setSpaceAttributes(d, space)
return nil
}
@ -115,3 +127,18 @@ func resourceHerokuSpaceDelete(d *schema.ResourceData, meta interface{}) error {
d.SetId("")
return nil
}
// SpaceStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch
// a Space.
func SpaceStateRefreshFunc(client *heroku.Service, id string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
space, err := client.SpaceInfo(context.TODO(), id)
if err != nil {
return nil, "", err
}
// The type conversion here can be dropped when the vendored version of
// heroku-go is updated.
return (*heroku.Space)(space), space.State, nil
}
}

View File

@ -0,0 +1,38 @@
package heroku
import (
"fmt"
"strings"
"github.com/satori/uuid"
)
func validatePipelineStageName(v interface{}, k string) (ws []string, errors []error) {
validPipelineStageNames := []string{
"review",
"development",
"staging",
"production",
}
for _, s := range validPipelineStageNames {
if v == s {
return
}
}
err := fmt.Errorf(
"%s is an invalid pipeline stage, must be one of [%s]",
v,
strings.Join(validPipelineStageNames, ", "),
)
errors = append(errors, err)
return
}
func validateUUID(v interface{}, k string) (ws []string, errors []error) {
if _, err := uuid.FromString(v.(string)); err != nil {
errors = append(errors, fmt.Errorf("%q is an invalid UUID: %s", k, err))
}
return
}

View File

@ -0,0 +1,53 @@
package heroku
import "testing"
func TestPipelineStage(t *testing.T) {
valid := []string{
"review",
"development",
"staging",
"production",
}
for _, v := range valid {
_, errors := validatePipelineStageName(v, "stage")
if len(errors) != 0 {
t.Fatalf("%q should be a valid stage: %q", v, errors)
}
}
invalid := []string{
"foobarbaz",
"another-stage",
"",
}
for _, v := range invalid {
_, errors := validatePipelineStageName(v, "stage")
if len(errors) == 0 {
t.Fatalf("%q should be an invalid stage", v)
}
}
}
func TestValidateUUID(t *testing.T) {
valid := []string{
"4812ccbc-2a2e-4c6c-bae4-a3d04ed51c0e",
}
for _, v := range valid {
_, errors := validateUUID(v, "id")
if len(errors) != 0 {
t.Fatalf("%q should be a valid UUID: %q", v, errors)
}
}
invalid := []string{
"foobarbaz",
"my-app-name",
}
for _, v := range invalid {
_, errors := validateUUID(v, "id")
if len(errors) == 0 {
t.Fatalf("%q should be an invalid UUID", v)
}
}
}

View File

@ -24,6 +24,24 @@ func Provider() terraform.ResourceProvider {
DefaultFunc: schema.EnvDefaultFunc("NOMAD_REGION", ""),
Description: "Region of the target Nomad agent.",
},
"ca_file": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("NOMAD_CACERT", ""),
Description: "A path to a PEM-encoded certificate authority used to verify the remote agent's certificate.",
},
"cert_file": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("NOMAD_CLIENT_CERT", ""),
Description: "A path to a PEM-encoded certificate provided to the remote agent; requires use of key_file.",
},
"key_file": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("NOMAD_CLIENT_KEY", ""),
Description: "A path to a PEM-encoded private key, required if cert_file is specified.",
},
},
ConfigureFunc: providerConfigure,
@ -38,6 +56,9 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
config := api.DefaultConfig()
config.Address = d.Get("address").(string)
config.Region = d.Get("region").(string)
config.TLSConfig.CACert = d.Get("ca_file").(string)
config.TLSConfig.ClientCert = d.Get("cert_file").(string)
config.TLSConfig.ClientKey = d.Get("key_file").(string)
client, err := api.NewClient(config)
if err != nil {

View File

@ -207,15 +207,17 @@ func testResourceJob_checkExists(s *terraform.State) error {
func testResourceJob_checkDestroy(jobID string) r.TestCheckFunc {
return func(*terraform.State) error {
client := testProvider.Meta().(*api.Client)
_, _, err := client.Jobs().Info(jobID, nil)
if err != nil && strings.Contains(err.Error(), "404") {
job, _, err := client.Jobs().Info(jobID, nil)
// This should likely never happen, due to how nomad caches jobs
if err != nil && strings.Contains(err.Error(), "404") || job == nil {
return nil
}
if err == nil {
err = errors.New("not destroyed")
if job.Status != "dead" {
return fmt.Errorf("Job %q has not been stopped. Status: %s", jobID, job.Status)
}
return err
return nil
}
}
@ -284,9 +286,12 @@ func testResourceJob_updateCheck(s *terraform.State) error {
{
// Verify foo doesn't exist
_, _, err := client.Jobs().Info("foo", nil)
if err == nil {
return errors.New("reading foo success")
job, _, err := client.Jobs().Info("foo", nil)
if err != nil {
return fmt.Errorf("error reading %q job: %s", "foo", err)
}
if job.Status != "dead" {
return fmt.Errorf("%q job is not dead. Status: %q", "foo", job.Status)
}
}

View File

@ -122,6 +122,12 @@ func resourcePostgreSQLDatabaseCreate(d *schema.ResourceData, meta interface{})
b := bytes.NewBufferString("CREATE DATABASE ")
fmt.Fprint(b, pq.QuoteIdentifier(dbName))
//needed in order to set the owner of the db if the connection user is not a superuser
err = grantRoleMembership(conn, d.Get(dbOwnerAttr).(string), c.username)
if err != nil {
return errwrap.Wrapf(fmt.Sprintf("Error granting role membership on database %s: {{err}}", dbName), err)
}
// Handle each option individually and stream results into the query
// buffer.
@ -464,3 +470,18 @@ func doSetDBIsTemplate(conn *sql.DB, dbName string, isTemplate bool) error {
return nil
}
func grantRoleMembership(conn *sql.DB, dbOwner string, connUsername string) error {
if dbOwner != "" && dbOwner != connUsername {
query := fmt.Sprintf("GRANT %s TO %s", pq.QuoteIdentifier(dbOwner), pq.QuoteIdentifier(connUsername))
_, err := conn.Query(query)
if err != nil {
// is already member or role
if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
return nil
}
return errwrap.Wrapf("Error granting membership: {{err}}", err)
}
}
return nil
}

View File

@ -17,7 +17,7 @@ func TestAccDataSourceImage_basic(t *testing.T) {
Config: testAccDataSourceProfitBricksImage_basic,
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("data.profitbricks_image.img", "location", "us/las"),
resource.TestCheckResourceAttr("data.profitbricks_image.img", "name", "Ubuntu-16.04-LTS-server-2017-02-01"),
resource.TestCheckResourceAttr("data.profitbricks_image.img", "name", "Ubuntu-16.04-LTS-server-2017-05-01"),
resource.TestCheckResourceAttr("data.profitbricks_image.img", "type", "HDD"),
),
},

View File

@ -254,34 +254,38 @@ func resourceProfitBricksServerCreate(d *schema.ResourceData, meta interface{})
var sshkey_path []interface{}
var image, licenceType, availabilityZone string
if !IsValidUUID(rawMap["image_name"].(string)) {
if rawMap["image_name"] != nil {
image = getImageId(d.Get("datacenter_id").(string), rawMap["image_name"].(string), rawMap["disk_type"].(string))
if image == "" {
dc := profitbricks.GetDatacenter(d.Get("datacenter_id").(string))
return fmt.Errorf("Image '%s' doesn't exist. in location %s", rawMap["image_name"], dc.Properties.Location)
}
}
} else {
image = rawMap["image_name"].(string)
}
if rawMap["licence_type"] != nil {
licenceType = rawMap["licence_type"].(string)
}
if rawMap["image_password"] != nil {
imagePassword = rawMap["image_password"].(string)
}
if rawMap["ssh_key_path"] != nil {
sshkey_path = rawMap["ssh_key_path"].([]interface{})
}
if rawMap["image_name"] != nil {
image_name := rawMap["image_name"].(string)
if !IsValidUUID(image_name) {
if imagePassword == "" && len(sshkey_path) == 0 {
return fmt.Errorf("Either 'image_password' or 'ssh_key_path' must be provided.")
return fmt.Errorf("Either 'image_password' or 'sshkey' must be provided.")
}
image = getImageId(d.Get("datacenter_id").(string), image_name, rawMap["disk_type"].(string))
} else {
img := profitbricks.GetImage(image_name)
if img.StatusCode > 299 {
return fmt.Errorf("Error fetching image: %s", img.Response)
}
if img.Properties.Public == true {
if imagePassword == "" && len(sshkey_path) == 0 {
return fmt.Errorf("Either 'image_password' or 'sshkey' must be provided.")
}
image = image_name
} else {
image = image_name
}
}
if rawMap["licence_type"] != nil {
licenceType = rawMap["licence_type"].(string)
}
var publicKeys []string
if len(sshkey_path) != 0 {
for _, path := range sshkey_path {

View File

@ -77,12 +77,6 @@ func resourceProfitBricksVolumeCreate(d *schema.ResourceData, meta interface{})
ssh_keypath = d.Get("ssh_key_path").([]interface{})
image_name := d.Get("image_name").(string)
if image_name != "" {
if imagePassword == "" && len(ssh_keypath) == 0 {
return fmt.Errorf("Either 'image_password' or 'sshkey' must be provided.")
}
}
licenceType := d.Get("licence_type").(string)
if image_name == "" && licenceType == "" {
@ -102,10 +96,26 @@ func resourceProfitBricksVolumeCreate(d *schema.ResourceData, meta interface{})
}
var image string
if !IsValidUUID(image_name) {
image = getImageId(d.Get("datacenter_id").(string), image_name, d.Get("disk_type").(string))
} else {
image = image_name
if image_name != "" {
if !IsValidUUID(image_name) {
if imagePassword == "" && len(ssh_keypath) == 0 {
return fmt.Errorf("Either 'image_password' or 'sshkey' must be provided.")
}
image = getImageId(d.Get("datacenter_id").(string), image_name, d.Get("disk_type").(string))
} else {
img := profitbricks.GetImage(image_name)
if img.StatusCode > 299 {
return fmt.Errorf("Error fetching image: %s", img.Response)
}
if img.Properties.Public == true {
if imagePassword == "" && len(ssh_keypath) == 0 {
return fmt.Errorf("Either 'image_password' or 'sshkey' must be provided.")
}
image = image_name
} else {
image = image_name
}
}
}
volume := profitbricks.Volume{

View File

@ -99,3 +99,59 @@ resource "test_resource" "foo" {
},
})
}
// TestDataSource_dataSourceCountGrandChild tests that a grandchild data source
// that is based off of count works, ie: dependency chain foo -> bar -> baz.
// This was failing because CountBoundaryTransformer is being run during apply
// instead of plan, which meant that it wasn't firing after data sources were
// potentially changing state and causing diff/interpolation issues.
//
// This happens after the initial apply, after state is saved.
func TestDataSource_dataSourceCountGrandChild(t *testing.T) {
resource.UnitTest(t, resource.TestCase{
Providers: testAccProviders,
CheckDestroy: func(s *terraform.State) error {
return nil
},
Steps: []resource.TestStep{
{
Config: dataSourceCountGrandChildConfig,
},
{
Config: dataSourceCountGrandChildConfig,
Check: func(s *terraform.State) error {
for _, v := range []string{"foo", "bar", "baz"} {
count := 0
for k := range s.RootModule().Resources {
if strings.HasPrefix(k, fmt.Sprintf("data.test_data_source.%s.", v)) {
count++
}
}
if count != 2 {
return fmt.Errorf("bad count for data.test_data_source.%s: %d", v, count)
}
}
return nil
},
},
},
})
}
const dataSourceCountGrandChildConfig = `
data "test_data_source" "foo" {
count = 2
input = "one"
}
data "test_data_source" "bar" {
count = "${length(data.test_data_source.foo.*.id)}"
input = "${data.test_data_source.foo.*.output[count.index]}"
}
data "test_data_source" "baz" {
count = "${length(data.test_data_source.bar.*.id)}"
input = "${data.test_data_source.bar.*.output[count.index]}"
}
`

View File

@ -42,6 +42,12 @@ func Provider() terraform.ResourceProvider {
Required: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{"TRITON_KEY_ID", "SDC_KEY_ID"}, ""),
},
"insecure_skip_tls_verify": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("TRITON_SKIP_TLS_VERIFY", ""),
},
},
ResourcesMap: map[string]*schema.Resource{
@ -56,10 +62,11 @@ func Provider() terraform.ResourceProvider {
}
type Config struct {
Account string
KeyMaterial string
KeyID string
URL string
Account string
KeyMaterial string
KeyID string
URL string
InsecureSkipTLSVerify bool
}
func (c Config) validate() error {
@ -98,6 +105,10 @@ func (c Config) getTritonClient() (*triton.Client, error) {
return nil, errwrap.Wrapf("Error Creating Triton Client: {{err}}", err)
}
if c.InsecureSkipTLSVerify {
client.InsecureSkipTLSVerify()
}
return client, nil
}
@ -106,6 +117,8 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
Account: d.Get("account").(string),
URL: d.Get("url").(string),
KeyID: d.Get("key_id").(string),
InsecureSkipTLSVerify: d.Get("insecure_skip_tls_verify").(bool),
}
if keyMaterial, ok := d.GetOk("key_material"); ok {

View File

@ -23,6 +23,7 @@ var (
"user_script": "user-script",
"user_data": "user-data",
"administrator_pw": "administrator-pw",
"cloud_config": "cloud-init:user-data",
}
)
@ -182,6 +183,12 @@ func resourceMachine() *schema.Resource {
Optional: true,
Computed: true,
},
"cloud_config": {
Description: "copied to machine on boot",
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"user_data": {
Description: "Data copied to machine on boot",
Type: schema.TypeString,

View File

@ -87,6 +87,7 @@ func Provider() terraform.ResourceProvider {
},
ResourcesMap: map[string]*schema.Resource{
"vault_auth_backend": authBackendResource(),
"vault_generic_secret": genericSecretResource(),
"vault_policy": policyResource(),
},

View File

@ -0,0 +1,121 @@
package vault
import (
"errors"
"fmt"
"log"
"strings"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/vault/api"
)
func authBackendResource() *schema.Resource {
return &schema.Resource{
Create: authBackendWrite,
Delete: authBackendDelete,
Read: authBackendRead,
Schema: map[string]*schema.Schema{
"type": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "Name of the auth backend",
},
"path": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
Description: "path to mount the backend. This defaults to the type.",
ValidateFunc: func(v interface{}, k string) (ws []string, errs []error) {
value := v.(string)
if strings.HasSuffix(value, "/") {
errs = append(errs, errors.New("cannot write to a path ending in '/'"))
}
return
},
},
"description": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
Optional: true,
Description: "The description of the auth backend",
},
},
}
}
func authBackendWrite(d *schema.ResourceData, meta interface{}) error {
client := meta.(*api.Client)
name := d.Get("type").(string)
desc := d.Get("description").(string)
path := d.Get("path").(string)
log.Printf("[DEBUG] Writing auth %s to Vault", name)
var err error
if path == "" {
path = name
err = d.Set("path", name)
if err != nil {
return fmt.Errorf("unable to set state: %s", err)
}
}
err = client.Sys().EnableAuth(path, name, desc)
if err != nil {
return fmt.Errorf("error writing to Vault: %s", err)
}
d.SetId(name)
return nil
}
func authBackendDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*api.Client)
name := d.Id()
log.Printf("[DEBUG] Deleting auth %s from Vault", name)
err := client.Sys().DisableAuth(name)
if err != nil {
return fmt.Errorf("error disabling auth from Vault: %s", err)
}
return nil
}
func authBackendRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*api.Client)
name := d.Id()
auths, err := client.Sys().ListAuth()
if err != nil {
return fmt.Errorf("error reading from Vault: %s", err)
}
for path, auth := range auths {
configuredPath := d.Get("path").(string)
vaultPath := configuredPath + "/"
if auth.Type == name && path == vaultPath {
return nil
}
}
// If we fell out here then we didn't find our Auth in the list.
d.SetId("")
return nil
}

View File

@ -0,0 +1,129 @@
package vault
import (
"fmt"
"testing"
r "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/hashicorp/vault/api"
)
func TestResourceAuth(t *testing.T) {
r.Test(t, r.TestCase{
Providers: testProviders,
PreCheck: func() { testAccPreCheck(t) },
Steps: []r.TestStep{
r.TestStep{
Config: testResourceAuth_initialConfig,
Check: testResourceAuth_initialCheck,
},
r.TestStep{
Config: testResourceAuth_updateConfig,
Check: testResourceAuth_updateCheck,
},
},
})
}
var testResourceAuth_initialConfig = `
resource "vault_auth_backend" "test" {
type = "github"
}
`
func testResourceAuth_initialCheck(s *terraform.State) error {
resourceState := s.Modules[0].Resources["vault_auth_backend.test"]
if resourceState == nil {
return fmt.Errorf("resource not found in state")
}
instanceState := resourceState.Primary
if instanceState == nil {
return fmt.Errorf("resource has no primary instance")
}
name := instanceState.ID
if name != instanceState.Attributes["type"] {
return fmt.Errorf("id doesn't match name")
}
if name != "github" {
return fmt.Errorf("unexpected auth name %s", name)
}
client := testProvider.Meta().(*api.Client)
auths, err := client.Sys().ListAuth()
if err != nil {
return fmt.Errorf("error reading back auth: %s", err)
}
found := false
for _, auth := range auths {
if auth.Type == name {
found = true
break
}
}
if !found {
return fmt.Errorf("could not find auth backend %s in %+v", name, auths)
}
return nil
}
var testResourceAuth_updateConfig = `
resource "vault_auth_backend" "test" {
type = "ldap"
}
`
func testResourceAuth_updateCheck(s *terraform.State) error {
resourceState := s.Modules[0].Resources["vault_auth_backend.test"]
if resourceState == nil {
return fmt.Errorf("resource not found in state")
}
instanceState := resourceState.Primary
if instanceState == nil {
return fmt.Errorf("resource has no primary instance")
}
name := instanceState.ID
if name != instanceState.Attributes["type"] {
return fmt.Errorf("id doesn't match name")
}
if name != "ldap" {
return fmt.Errorf("unexpected auth name")
}
client := testProvider.Meta().(*api.Client)
auths, err := client.Sys().ListAuth()
if err != nil {
return fmt.Errorf("error reading back auth: %s", err)
}
found := false
for _, auth := range auths {
if auth.Type == name {
found = true
break
}
}
if !found {
return fmt.Errorf("could not find auth backend %s in %+v", name, auths)
}
return nil
}

View File

@ -22,7 +22,7 @@ import (
func Provisioner() terraform.ResourceProvisioner {
return &schema.Provisioner{
Schema: map[string]*schema.Schema{
"inline": &schema.Schema{
"inline": {
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
PromoteSingle: true,
@ -30,13 +30,13 @@ func Provisioner() terraform.ResourceProvisioner {
ConflictsWith: []string{"script", "scripts"},
},
"script": &schema.Schema{
"script": {
Type: schema.TypeString,
Optional: true,
ConflictsWith: []string{"inline", "scripts"},
},
"scripts": &schema.Schema{
"scripts": {
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
Optional: true,
@ -81,7 +81,11 @@ func applyFn(ctx context.Context) error {
func generateScripts(d *schema.ResourceData) ([]string, error) {
var lines []string
for _, l := range d.Get("inline").([]interface{}) {
lines = append(lines, l.(string))
line, ok := l.(string)
if !ok {
return nil, fmt.Errorf("Error parsing %v as a string", l)
}
lines = append(lines, line)
}
lines = append(lines, "")
@ -109,12 +113,20 @@ func collectScripts(d *schema.ResourceData) ([]io.ReadCloser, error) {
// Collect scripts
var scripts []string
if script, ok := d.GetOk("script"); ok {
scripts = append(scripts, script.(string))
scr, ok := script.(string)
if !ok {
return nil, fmt.Errorf("Error parsing script %v as string", script)
}
scripts = append(scripts, scr)
}
if scriptList, ok := d.GetOk("scripts"); ok {
for _, script := range scriptList.([]interface{}) {
scripts = append(scripts, script.(string))
scr, ok := script.(string)
if !ok {
return nil, fmt.Errorf("Error parsing script %v as string", script)
}
scripts = append(scripts, scr)
}
}

View File

@ -9,6 +9,8 @@ import (
"testing"
"time"
"strings"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
@ -71,6 +73,23 @@ func TestResourceProvider_generateScript(t *testing.T) {
}
}
func TestResourceProvider_generateScriptEmptyInline(t *testing.T) {
p := Provisioner().(*schema.Provisioner)
conf := map[string]interface{}{
"inline": []interface{}{""},
}
_, err := generateScripts(schema.TestResourceDataRaw(
t, p.Schema, conf))
if err == nil {
t.Fatal("expected error, got none")
}
if !strings.Contains(err.Error(), "Error parsing") {
t.Fatalf("expected parsing error, got: %s", err)
}
}
func TestResourceProvider_CollectScripts_inline(t *testing.T) {
p := Provisioner().(*schema.Provisioner)
conf := map[string]interface{}{
@ -162,6 +181,24 @@ func TestResourceProvider_CollectScripts_scripts(t *testing.T) {
}
}
func TestResourceProvider_CollectScripts_scriptsEmpty(t *testing.T) {
p := Provisioner().(*schema.Provisioner)
conf := map[string]interface{}{
"scripts": []interface{}{""},
}
_, err := collectScripts(schema.TestResourceDataRaw(
t, p.Schema, conf))
if err == nil {
t.Fatal("expected error")
}
if !strings.Contains(err.Error(), "Error parsing") {
t.Fatalf("Expected parsing error, got: %s", err)
}
}
func TestRetryFunc(t *testing.T) {
// succeed on the third try
errs := []error{io.EOF, &net.OpError{Err: errors.New("ERROR")}, nil}

View File

@ -57,7 +57,7 @@ const (
envDoesNotExist = `
Environment %q doesn't exist!
You can create this environment with the "-new" option.`
You can create this environment with the "new" option.`
envChanged = `[reset][green]Switched to environment %q!`

View File

@ -42,6 +42,10 @@ func (h *CountHook) PreApply(
h.Lock()
defer h.Unlock()
if d.Empty() {
return terraform.HookActionContinue, nil
}
if h.pending == nil {
h.pending = make(map[string]countHookAction)
}

View File

@ -59,6 +59,11 @@ func (h *UiHook) PreApply(
d *terraform.InstanceDiff) (terraform.HookAction, error) {
h.once.Do(h.init)
// if there's no diff, there's nothing to output
if d.Empty() {
return terraform.HookActionContinue, nil
}
id := n.HumanId()
op := uiResourceModify

View File

@ -4,6 +4,7 @@ import (
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/base64"
"encoding/hex"
"encoding/json"
@ -57,6 +58,7 @@ func Funcs() map[string]ast.Function {
"base64decode": interpolationFuncBase64Decode(),
"base64encode": interpolationFuncBase64Encode(),
"base64sha256": interpolationFuncBase64Sha256(),
"base64sha512": interpolationFuncBase64Sha512(),
"ceil": interpolationFuncCeil(),
"chomp": interpolationFuncChomp(),
"cidrhost": interpolationFuncCidrHost(),
@ -90,6 +92,7 @@ func Funcs() map[string]ast.Function {
"replace": interpolationFuncReplace(),
"sha1": interpolationFuncSha1(),
"sha256": interpolationFuncSha256(),
"sha512": interpolationFuncSha512(),
"signum": interpolationFuncSignum(),
"slice": interpolationFuncSlice(),
"sort": interpolationFuncSort(),
@ -1240,6 +1243,20 @@ func interpolationFuncSha256() ast.Function {
}
}
func interpolationFuncSha512() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeString},
ReturnType: ast.TypeString,
Callback: func(args []interface{}) (interface{}, error) {
s := args[0].(string)
h := sha512.New()
h.Write([]byte(s))
hash := hex.EncodeToString(h.Sum(nil))
return hash, nil
},
}
}
func interpolationFuncTrimSpace() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeString},
@ -1266,6 +1283,21 @@ func interpolationFuncBase64Sha256() ast.Function {
}
}
func interpolationFuncBase64Sha512() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeString},
ReturnType: ast.TypeString,
Callback: func(args []interface{}) (interface{}, error) {
s := args[0].(string)
h := sha512.New()
h.Write([]byte(s))
shaSum := h.Sum(nil)
encoded := base64.StdEncoding.EncodeToString(shaSum[:])
return encoded, nil
},
}
}
func interpolationFuncUUID() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{},

View File

@ -2070,6 +2070,18 @@ func TestInterpolateFuncSha256(t *testing.T) {
})
}
func TestInterpolateFuncSha512(t *testing.T) {
testFunction(t, testFunctionConfig{
Cases: []testFunctionCase{
{
`${sha512("test")}`,
"ee26b0dd4af7e749aa1a8ee3c10ae9923f618980772e473f8819a5d4940e0db27ac185f8a0e1d5f84f88bc887fd67b143732c304cc5fa9ad8e6f57f50028a8ff",
false,
},
},
})
}
func TestInterpolateFuncTitle(t *testing.T) {
testFunction(t, testFunctionConfig{
Cases: []testFunctionCase{
@ -2129,6 +2141,23 @@ func TestInterpolateFuncBase64Sha256(t *testing.T) {
})
}
func TestInterpolateFuncBase64Sha512(t *testing.T) {
testFunction(t, testFunctionConfig{
Cases: []testFunctionCase{
{
`${base64sha512("test")}`,
"7iaw3Ur350mqGo7jwQrpkj9hiYB3Lkc/iBml1JQODbJ6wYX4oOHV+E+IvIh/1nsUNzLDBMxfqa2Ob1f1ACio/w==",
false,
},
{ // This will differ because we're base64-encoding hex represantiation, not raw bytes
`${base64encode(sha512("test"))}`,
"ZWUyNmIwZGQ0YWY3ZTc0OWFhMWE4ZWUzYzEwYWU5OTIzZjYxODk4MDc3MmU0NzNmODgxOWE1ZDQ5NDBlMGRiMjdhYzE4NWY4YTBlMWQ1Zjg0Zjg4YmM4ODdmZDY3YjE0MzczMmMzMDRjYzVmYTlhZDhlNmY1N2Y1MDAyOGE4ZmY=",
false,
},
},
})
}
func TestInterpolateFuncMd5(t *testing.T) {
testFunction(t, testFunctionConfig{
Cases: []testFunctionCase{

View File

@ -327,6 +327,10 @@ func loadAtlasHcl(list *ast.ObjectList) (*AtlasConfig, error) {
// represents exactly one module definition in the HCL configuration.
// We leave it up to another pass to merge them together.
func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) {
if err := assertAllBlocksHaveNames("module", list); err != nil {
return nil, err
}
list = list.Children()
if len(list.Items) == 0 {
return nil, nil
@ -391,12 +395,12 @@ func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) {
// LoadOutputsHcl recurses into the given HCL object and turns
// it into a mapping of outputs.
func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) {
list = list.Children()
if len(list.Items) == 0 {
return nil, fmt.Errorf(
"'output' must be followed by exactly one string: a name")
if err := assertAllBlocksHaveNames("output", list); err != nil {
return nil, err
}
list = list.Children()
// Go through each object and turn it into an actual result.
result := make([]*Output, 0, len(list.Items))
for _, item := range list.Items {
@ -450,12 +454,12 @@ func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) {
// LoadVariablesHcl recurses into the given HCL object and turns
// it into a list of variables.
func loadVariablesHcl(list *ast.ObjectList) ([]*Variable, error) {
list = list.Children()
if len(list.Items) == 0 {
return nil, fmt.Errorf(
"'variable' must be followed by exactly one strings: a name")
if err := assertAllBlocksHaveNames("variable", list); err != nil {
return nil, err
}
list = list.Children()
// hclVariable is the structure each variable is decoded into
type hclVariable struct {
DeclaredType string `hcl:"type"`
@ -531,6 +535,10 @@ func loadVariablesHcl(list *ast.ObjectList) ([]*Variable, error) {
// LoadProvidersHcl recurses into the given HCL object and turns
// it into a mapping of provider configs.
func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) {
if err := assertAllBlocksHaveNames("provider", list); err != nil {
return nil, err
}
list = list.Children()
if len(list.Items) == 0 {
return nil, nil
@ -592,6 +600,10 @@ func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) {
// represents exactly one data definition in the HCL configuration.
// We leave it up to another pass to merge them together.
func loadDataResourcesHcl(list *ast.ObjectList) ([]*Resource, error) {
if err := assertAllBlocksHaveNames("data", list); err != nil {
return nil, err
}
list = list.Children()
if len(list.Items) == 0 {
return nil, nil
@ -901,6 +913,10 @@ func loadManagedResourcesHcl(list *ast.ObjectList) ([]*Resource, error) {
}
func loadProvisionersHcl(list *ast.ObjectList, connInfo map[string]interface{}) ([]*Provisioner, error) {
if err := assertAllBlocksHaveNames("provisioner", list); err != nil {
return nil, err
}
list = list.Children()
if len(list.Items) == 0 {
return nil, nil
@ -1023,6 +1039,29 @@ func hclObjectMap(os *hclobj.Object) map[string]ast.ListNode {
}
*/
// assertAllBlocksHaveNames returns an error if any of the items in
// the given object list are blocks without keys (like "module {}")
// or simple assignments (like "module = 1"). It returns nil if
// neither of these things are true.
//
// The given name is used in any generated error messages, and should
// be the name of the block we're dealing with. The given list should
// be the result of calling .Filter on an object list with that same
// name.
func assertAllBlocksHaveNames(name string, list *ast.ObjectList) error {
if elem := list.Elem(); len(elem.Items) != 0 {
switch et := elem.Items[0].Val.(type) {
case *ast.ObjectType:
pos := et.Lbrace
return fmt.Errorf("%s: %q must be followed by a name", pos, name)
default:
pos := elem.Items[0].Val.Pos()
return fmt.Errorf("%s: %q must be a configuration block", pos, name)
}
}
return nil
}
func checkHCLKeys(node ast.Node, valid []string) error {
var list *ast.ObjectList
switch n := node.(type) {

View File

@ -314,6 +314,18 @@ func TestLoadFileBasic_modules(t *testing.T) {
}
}
func TestLoadFile_unnamedModule(t *testing.T) {
_, err := LoadFile(filepath.Join(fixtureDir, "module-unnamed.tf"))
if err == nil {
t.Fatalf("bad: expected error")
}
errorStr := err.Error()
if !strings.Contains(errorStr, `"module" must be followed`) {
t.Fatalf("bad: expected error has wrong text: %s", errorStr)
}
}
func TestLoadFile_outputDependsOn(t *testing.T) {
c, err := LoadFile(filepath.Join(fixtureDir, "output-depends-on.tf"))
if err != nil {
@ -696,7 +708,7 @@ func TestLoadFile_variableNoName(t *testing.T) {
}
errorStr := err.Error()
if !strings.Contains(errorStr, "'variable' must be followed") {
if !strings.Contains(errorStr, `"variable" must be followed`) {
t.Fatalf("bad: expected error has wrong text: %s", errorStr)
}
}
@ -740,7 +752,7 @@ func TestLoadFile_unnamedOutput(t *testing.T) {
}
errorStr := err.Error()
if !strings.Contains(errorStr, "'output' must be followed") {
if !strings.Contains(errorStr, `"output" must be followed`) {
t.Fatalf("bad: expected error has wrong text: %s", errorStr)
}
}

Some files were not shown because too many files have changed in this diff Show More