gofmt files from recently merged PRs
This commit is contained in:
parent
40f09b7cbd
commit
f9efede852
|
@ -62,7 +62,7 @@ type AWSClient struct {
|
|||
kinesisconn *kinesis.Kinesis
|
||||
elasticacheconn *elasticache.ElastiCache
|
||||
lambdaconn *lambda.Lambda
|
||||
opsworksconn *opsworks.OpsWorks
|
||||
opsworksconn *opsworks.OpsWorks
|
||||
}
|
||||
|
||||
// Client configures and returns a fully initialized AWSClient
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func makeAwsStringList(in []interface {}) []*string {
|
||||
func makeAwsStringList(in []interface{}) []*string {
|
||||
ret := make([]*string, len(in), len(in))
|
||||
for i := 0; i < len(in); i++ {
|
||||
ret[i] = aws.String(in[i].(string))
|
||||
|
|
|
@ -219,8 +219,8 @@ func Provider() terraform.ResourceProvider {
|
|||
"aws_opsworks_ganglia_layer": resourceAwsOpsworksGangliaLayer(),
|
||||
"aws_opsworks_custom_layer": resourceAwsOpsworksCustomLayer(),
|
||||
"aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(),
|
||||
"aws_rds_cluster": resourceAwsRDSCluster(),
|
||||
"aws_rds_cluster_instance": resourceAwsRDSClusterInstance(),
|
||||
"aws_rds_cluster": resourceAwsRDSCluster(),
|
||||
"aws_rds_cluster_instance": resourceAwsRDSClusterInstance(),
|
||||
"aws_route53_delegation_set": resourceAwsRoute53DelegationSet(),
|
||||
"aws_route53_record": resourceAwsRoute53Record(),
|
||||
"aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(),
|
||||
|
|
|
@ -75,10 +75,10 @@ func resourceAwsDbInstance() *schema.Resource {
|
|||
},
|
||||
|
||||
"identifier": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateRdsId,
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateRdsId,
|
||||
},
|
||||
|
||||
"instance_class": &schema.Schema{
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
|
||||
func resourceAwsOpsworksGangliaLayer() *schema.Resource {
|
||||
layerType := &opsworksLayerType{
|
||||
TypeName: "monitoring-master",
|
||||
TypeName: "monitoring-master",
|
||||
DefaultLayerName: "Ganglia",
|
||||
|
||||
Attributes: map[string]*opsworksLayerTypeAttribute{
|
||||
|
|
|
@ -306,9 +306,9 @@ func resourceAwsOpsworksStackCreate(d *schema.ResourceData, meta interface{}) er
|
|||
|
||||
req := &opsworks.CreateStackInput{
|
||||
DefaultInstanceProfileArn: aws.String(d.Get("default_instance_profile_arn").(string)),
|
||||
Name: aws.String(d.Get("name").(string)),
|
||||
Region: aws.String(d.Get("region").(string)),
|
||||
ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)),
|
||||
Name: aws.String(d.Get("name").(string)),
|
||||
Region: aws.String(d.Get("region").(string)),
|
||||
ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)),
|
||||
}
|
||||
inVpc := false
|
||||
if vpcId, ok := d.GetOk("vpc_id"); ok {
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
|
@ -460,22 +460,22 @@ func expandResourceRecords(recs []interface{}, typeStr string) []*route53.Resour
|
|||
}
|
||||
|
||||
func validateRdsId(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"only lowercase alphanumeric characters and hyphens allowed in %q", k))
|
||||
}
|
||||
if !regexp.MustCompile(`^[a-z]`).MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"first character of %q must be a letter", k))
|
||||
}
|
||||
if regexp.MustCompile(`--`).MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot contain two consecutive hyphens", k))
|
||||
}
|
||||
if regexp.MustCompile(`-$`).MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot end with a hyphen", k))
|
||||
}
|
||||
return
|
||||
value := v.(string)
|
||||
if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"only lowercase alphanumeric characters and hyphens allowed in %q", k))
|
||||
}
|
||||
if !regexp.MustCompile(`^[a-z]`).MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"first character of %q must be a letter", k))
|
||||
}
|
||||
if regexp.MustCompile(`--`).MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot contain two consecutive hyphens", k))
|
||||
}
|
||||
if regexp.MustCompile(`-$`).MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot end with a hyphen", k))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
|
@ -20,7 +20,7 @@ func setTagsRDS(conn *rds.RDS, d *schema.ResourceData, arn string) error {
|
|||
|
||||
// Set tags
|
||||
if len(remove) > 0 {
|
||||
log.Printf("[DEBUG] Removing tags: %s", remove)
|
||||
log.Printf("[DEBUG] Removing tags: %s", remove)
|
||||
k := make([]*string, len(remove), len(remove))
|
||||
for i, t := range remove {
|
||||
k[i] = t.Key
|
||||
|
@ -35,7 +35,7 @@ func setTagsRDS(conn *rds.RDS, d *schema.ResourceData, arn string) error {
|
|||
}
|
||||
}
|
||||
if len(create) > 0 {
|
||||
log.Printf("[DEBUG] Creating tags: %s", create)
|
||||
log.Printf("[DEBUG] Creating tags: %s", create)
|
||||
_, err := conn.AddTagsToResource(&rds.AddTagsToResourceInput{
|
||||
ResourceName: aws.String(arn),
|
||||
Tags: create,
|
||||
|
@ -96,18 +96,18 @@ func tagsToMapRDS(ts []*rds.Tag) map[string]string {
|
|||
}
|
||||
|
||||
func saveTagsRDS(conn *rds.RDS, d *schema.ResourceData, arn string) error {
|
||||
resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{
|
||||
ResourceName: aws.String(arn),
|
||||
})
|
||||
resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{
|
||||
ResourceName: aws.String(arn),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("[DEBUG] Error retreiving tags for ARN: %s", arn)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("[DEBUG] Error retreiving tags for ARN: %s", arn)
|
||||
}
|
||||
|
||||
var dt []*rds.Tag
|
||||
if len(resp.TagList) > 0 {
|
||||
dt = resp.TagList
|
||||
}
|
||||
var dt []*rds.Tag
|
||||
if len(resp.TagList) > 0 {
|
||||
dt = resp.TagList
|
||||
}
|
||||
|
||||
return d.Set("tags", tagsToMapRDS(dt))
|
||||
return d.Set("tags", tagsToMapRDS(dt))
|
||||
}
|
||||
|
|
|
@ -297,15 +297,15 @@ func resourceAzureInstanceCreate(d *schema.ResourceData, meta interface{}) (err
|
|||
if err != nil {
|
||||
return fmt.Errorf("Error configuring %s for Windows: %s", name, err)
|
||||
}
|
||||
|
||||
|
||||
if domain_name, ok := d.GetOk("domain_name"); ok {
|
||||
err = vmutils.ConfigureWindowsToJoinDomain(
|
||||
&role,
|
||||
d.Get("domain_username").(string),
|
||||
d.Get("domain_password").(string),
|
||||
domain_name.(string),
|
||||
&role,
|
||||
d.Get("domain_username").(string),
|
||||
d.Get("domain_password").(string),
|
||||
domain_name.(string),
|
||||
d.Get("domain_ou").(string),
|
||||
)
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error configuring %s for WindowsToJoinDomain: %s", name, err)
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ func MetadataRetryWrapper(update func() error) error {
|
|||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("Failed to update metadata after %d retries", attempt);
|
||||
return fmt.Errorf("Failed to update metadata after %d retries", attempt)
|
||||
}
|
||||
|
||||
// Update the metadata (serverMD) according to the provided diff (oldMDMap v
|
||||
|
@ -51,7 +51,7 @@ func MetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interfa
|
|||
// Reformat old metadata into a list
|
||||
serverMD.Items = nil
|
||||
for key, val := range curMDMap {
|
||||
v := val;
|
||||
v := val
|
||||
serverMD.Items = append(serverMD.Items, &compute.MetadataItems{
|
||||
Key: key,
|
||||
Value: &v,
|
||||
|
@ -60,7 +60,7 @@ func MetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interfa
|
|||
}
|
||||
|
||||
// Format metadata from the server data format -> schema data format
|
||||
func MetadataFormatSchema(md *compute.Metadata) (map[string]interface{}) {
|
||||
func MetadataFormatSchema(md *compute.Metadata) map[string]interface{} {
|
||||
newMD := make(map[string]interface{})
|
||||
|
||||
for _, kv := range md.Items {
|
||||
|
|
|
@ -507,12 +507,12 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
|
|||
func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
instance, err := getInstance(config, d);
|
||||
instance, err := getInstance(config, d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Synch metadata
|
||||
// Synch metadata
|
||||
md := instance.Metadata
|
||||
|
||||
if err = d.Set("metadata", MetadataFormatSchema(md)); err != nil {
|
||||
|
@ -644,7 +644,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err
|
|||
|
||||
zone := d.Get("zone").(string)
|
||||
|
||||
instance, err := getInstance(config, d);
|
||||
instance, err := getInstance(config, d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -658,7 +658,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err
|
|||
|
||||
updateMD := func() error {
|
||||
// Reload the instance in the case of a fingerprint mismatch
|
||||
instance, err = getInstance(config, d);
|
||||
instance, err = getInstance(config, d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -72,10 +72,10 @@ func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface
|
|||
|
||||
err := MetadataRetryWrapper(createMD)
|
||||
if err != nil {
|
||||
return err;
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeProjectMetadataRead(d, meta);
|
||||
return resourceComputeProjectMetadataRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{}) error {
|
||||
|
@ -115,7 +115,7 @@ func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface
|
|||
|
||||
md := project.CommonInstanceMetadata
|
||||
|
||||
MetadataUpdate(o.(map[string]interface{}), n.(map[string]interface{}), md)
|
||||
MetadataUpdate(o.(map[string]interface{}), n.(map[string]interface{}), md)
|
||||
|
||||
op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(config.Project, md).Do()
|
||||
|
||||
|
@ -133,10 +133,10 @@ func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface
|
|||
|
||||
err := MetadataRetryWrapper(updateMD)
|
||||
if err != nil {
|
||||
return err;
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeProjectMetadataRead(d, meta);
|
||||
return resourceComputeProjectMetadataRead(d, meta)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -56,8 +56,8 @@ func resourceComputeVpnGatewayCreate(d *schema.ResourceData, meta interface{}) e
|
|||
vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute)
|
||||
|
||||
vpnGateway := &compute.TargetVpnGateway{
|
||||
Name: name,
|
||||
Network: network,
|
||||
Name: name,
|
||||
Network: network,
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
|
|
|
@ -128,8 +128,8 @@ func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error
|
|||
return fmt.Errorf("At most one website block is allowed")
|
||||
}
|
||||
|
||||
// Setting fields to "" to be explicit that the PATCH call will
|
||||
// delete this field.
|
||||
// Setting fields to "" to be explicit that the PATCH call will
|
||||
// delete this field.
|
||||
if len(websites) == 0 {
|
||||
sb.Website.NotFoundPage = ""
|
||||
sb.Website.MainPageSuffix = ""
|
||||
|
|
|
@ -24,9 +24,9 @@ func resourceStorageBucketAcl() *schema.Resource {
|
|||
ForceNew: true,
|
||||
},
|
||||
"predefined_acl": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"role_entity": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
|
@ -83,7 +83,7 @@ func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) er
|
|||
if len(predefined_acl) > 0 {
|
||||
if len(role_entity) > 0 {
|
||||
return fmt.Errorf("Error, you cannot specify both " +
|
||||
"\"predefined_acl\" and \"role_entity\"");
|
||||
"\"predefined_acl\" and \"role_entity\"")
|
||||
}
|
||||
|
||||
res, err := config.clientStorage.Buckets.Get(bucket).Do()
|
||||
|
@ -99,9 +99,9 @@ func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) er
|
|||
return fmt.Errorf("Error updating bucket %s: %v", bucket, err)
|
||||
}
|
||||
|
||||
return resourceStorageBucketAclRead(d, meta);
|
||||
return resourceStorageBucketAclRead(d, meta)
|
||||
} else if len(role_entity) > 0 {
|
||||
for _, v := range(role_entity) {
|
||||
for _, v := range role_entity {
|
||||
pair, err := getRoleEntityPair(v.(string))
|
||||
|
||||
bucketAccessControl := &storage.BucketAccessControl{
|
||||
|
@ -118,7 +118,7 @@ func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) er
|
|||
}
|
||||
}
|
||||
|
||||
return resourceStorageBucketAclRead(d, meta);
|
||||
return resourceStorageBucketAclRead(d, meta)
|
||||
}
|
||||
|
||||
if len(default_acl) > 0 {
|
||||
|
@ -135,13 +135,12 @@ func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) er
|
|||
return fmt.Errorf("Error updating bucket %s: %v", bucket, err)
|
||||
}
|
||||
|
||||
return resourceStorageBucketAclRead(d, meta);
|
||||
return resourceStorageBucketAclRead(d, meta)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
|
@ -153,7 +152,7 @@ func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) erro
|
|||
role_entity := make([]interface{}, 0)
|
||||
re_local := d.Get("role_entity").([]interface{})
|
||||
re_local_map := make(map[string]string)
|
||||
for _, v := range(re_local) {
|
||||
for _, v := range re_local {
|
||||
res, err := getRoleEntityPair(v.(string))
|
||||
|
||||
if err != nil {
|
||||
|
@ -170,7 +169,7 @@ func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) erro
|
|||
return err
|
||||
}
|
||||
|
||||
for _, v := range(res.Items) {
|
||||
for _, v := range res.Items {
|
||||
log.Printf("[DEBUG]: examining re %s-%s", v.Role, v.Entity)
|
||||
// We only store updates to the locally defined access controls
|
||||
if _, in := re_local_map[v.Entity]; in {
|
||||
|
@ -196,7 +195,7 @@ func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) er
|
|||
old_re, new_re := o.([]interface{}), n.([]interface{})
|
||||
|
||||
old_re_map := make(map[string]string)
|
||||
for _, v := range(old_re) {
|
||||
for _, v := range old_re {
|
||||
res, err := getRoleEntityPair(v.(string))
|
||||
|
||||
if err != nil {
|
||||
|
@ -207,7 +206,7 @@ func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) er
|
|||
old_re_map[res.Entity] = res.Role
|
||||
}
|
||||
|
||||
for _, v := range(new_re) {
|
||||
for _, v := range new_re {
|
||||
pair, err := getRoleEntityPair(v.(string))
|
||||
|
||||
bucketAccessControl := &storage.BucketAccessControl{
|
||||
|
@ -233,7 +232,7 @@ func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) er
|
|||
}
|
||||
}
|
||||
|
||||
for entity, _ := range(old_re_map) {
|
||||
for entity, _ := range old_re_map {
|
||||
log.Printf("[DEBUG]: removing entity %s", entity)
|
||||
err := config.clientStorage.BucketAccessControls.Delete(bucket, entity).Do()
|
||||
|
||||
|
@ -242,7 +241,7 @@ func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) er
|
|||
}
|
||||
}
|
||||
|
||||
return resourceStorageBucketAclRead(d, meta);
|
||||
return resourceStorageBucketAclRead(d, meta)
|
||||
}
|
||||
|
||||
if d.HasChange("default_acl") {
|
||||
|
@ -261,7 +260,7 @@ func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) er
|
|||
return fmt.Errorf("Error updating bucket %s: %v", bucket, err)
|
||||
}
|
||||
|
||||
return resourceStorageBucketAclRead(d, meta);
|
||||
return resourceStorageBucketAclRead(d, meta)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -273,7 +272,7 @@ func resourceStorageBucketAclDelete(d *schema.ResourceData, meta interface{}) er
|
|||
bucket := d.Get("bucket").(string)
|
||||
|
||||
re_local := d.Get("role_entity").([]interface{})
|
||||
for _, v := range(re_local) {
|
||||
for _, v := range re_local {
|
||||
res, err := getRoleEntityPair(v.(string))
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -2,8 +2,8 @@ package google
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
|
@ -24,13 +24,13 @@ var testAclBucketName = fmt.Sprintf("%s-%d", "tf-test-acl-bucket", rand.New(rand
|
|||
|
||||
func TestAccGoogleStorageBucketAcl_basic(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccGoogleStorageBucketAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testGoogleStorageBucketsAclBasic1,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic1),
|
||||
testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2),
|
||||
),
|
||||
|
@ -41,13 +41,13 @@ func TestAccGoogleStorageBucketAcl_basic(t *testing.T) {
|
|||
|
||||
func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccGoogleStorageBucketAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testGoogleStorageBucketsAclBasic1,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic1),
|
||||
testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2),
|
||||
),
|
||||
|
@ -55,7 +55,7 @@ func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) {
|
|||
|
||||
resource.TestStep{
|
||||
Config: testGoogleStorageBucketsAclBasic2,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2),
|
||||
testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic3_owner),
|
||||
),
|
||||
|
@ -63,7 +63,7 @@ func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) {
|
|||
|
||||
resource.TestStep{
|
||||
Config: testGoogleStorageBucketsAclBasicDelete,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic1),
|
||||
testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic2),
|
||||
testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic3_owner),
|
||||
|
@ -75,13 +75,13 @@ func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) {
|
|||
|
||||
func TestAccGoogleStorageBucketAcl_downgrade(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccGoogleStorageBucketAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testGoogleStorageBucketsAclBasic2,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2),
|
||||
testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic3_owner),
|
||||
),
|
||||
|
@ -89,7 +89,7 @@ func TestAccGoogleStorageBucketAcl_downgrade(t *testing.T) {
|
|||
|
||||
resource.TestStep{
|
||||
Config: testGoogleStorageBucketsAclBasic3,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2),
|
||||
testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic3_reader),
|
||||
),
|
||||
|
@ -97,7 +97,7 @@ func TestAccGoogleStorageBucketAcl_downgrade(t *testing.T) {
|
|||
|
||||
resource.TestStep{
|
||||
Config: testGoogleStorageBucketsAclBasicDelete,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic1),
|
||||
testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic2),
|
||||
testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic3_owner),
|
||||
|
@ -109,7 +109,7 @@ func TestAccGoogleStorageBucketAcl_downgrade(t *testing.T) {
|
|||
|
||||
func TestAccGoogleStorageBucketAcl_predefined(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccGoogleStorageBucketAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
|
@ -146,7 +146,7 @@ func testAccCheckGoogleStorageBucketAcl(bucket, roleEntityS string) resource.Tes
|
|||
return fmt.Errorf("Error retrieving contents of acl for bucket %s: %s", bucket, err)
|
||||
}
|
||||
|
||||
if (res.Role != roleEntity.Role) {
|
||||
if res.Role != roleEntity.Role {
|
||||
return fmt.Errorf("Error, Role mismatch %s != %s", res.Role, roleEntity.Role)
|
||||
}
|
||||
|
||||
|
@ -218,7 +218,6 @@ resource "google_storage_bucket_acl" "acl" {
|
|||
}
|
||||
`, testAclBucketName, roleEntityBasic2, roleEntityBasic3_reader)
|
||||
|
||||
|
||||
var testGoogleStorageBucketsAclPredefined = fmt.Sprintf(`
|
||||
resource "google_storage_bucket" "bucket" {
|
||||
name = "%s"
|
||||
|
|
|
@ -32,10 +32,10 @@ func resourceStorageBucketObject() *schema.Resource {
|
|||
ForceNew: true,
|
||||
},
|
||||
"predefined_acl": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Type: schema.TypeString,
|
||||
Deprecated: "Please use resource \"storage_object_acl.predefined_acl\" instead.",
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"md5hash": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
|
@ -75,7 +75,6 @@ func resourceStorageBucketObjectCreate(d *schema.ResourceData, meta interface{})
|
|||
insertCall.PredefinedAcl(v.(string))
|
||||
}
|
||||
|
||||
|
||||
_, err = insertCall.Do()
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"io/ioutil"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
|
@ -48,7 +48,6 @@ func testAccCheckGoogleStorageObject(bucket, object, md5 string) resource.TestCh
|
|||
|
||||
objectsService := storage.NewObjectsService(config.clientStorage)
|
||||
|
||||
|
||||
getCall := objectsService.Get(bucket, object)
|
||||
res, err := getCall.Do()
|
||||
|
||||
|
@ -56,7 +55,7 @@ func testAccCheckGoogleStorageObject(bucket, object, md5 string) resource.TestCh
|
|||
return fmt.Errorf("Error retrieving contents of object %s: %s", object, err)
|
||||
}
|
||||
|
||||
if (md5 != res.Md5Hash) {
|
||||
if md5 != res.Md5Hash {
|
||||
return fmt.Errorf("Error contents of %s garbled, md5 hashes don't match (%s, %s)", object, md5, res.Md5Hash)
|
||||
}
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ func resourceStorageObjectAclCreate(d *schema.ResourceData, meta interface{}) er
|
|||
if len(predefined_acl) > 0 {
|
||||
if len(role_entity) > 0 {
|
||||
return fmt.Errorf("Error, you cannot specify both " +
|
||||
"\"predefined_acl\" and \"role_entity\"");
|
||||
"\"predefined_acl\" and \"role_entity\"")
|
||||
}
|
||||
|
||||
res, err := config.clientStorage.Objects.Get(bucket, object).Do()
|
||||
|
@ -74,16 +74,16 @@ func resourceStorageObjectAclCreate(d *schema.ResourceData, meta interface{}) er
|
|||
return fmt.Errorf("Error reading object %s: %v", bucket, err)
|
||||
}
|
||||
|
||||
res, err = config.clientStorage.Objects.Update(bucket,object,
|
||||
res, err = config.clientStorage.Objects.Update(bucket, object,
|
||||
res).PredefinedAcl(predefined_acl).Do()
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating object %s: %v", bucket, err)
|
||||
}
|
||||
|
||||
return resourceStorageBucketAclRead(d, meta);
|
||||
return resourceStorageBucketAclRead(d, meta)
|
||||
} else if len(role_entity) > 0 {
|
||||
for _, v := range(role_entity) {
|
||||
for _, v := range role_entity {
|
||||
pair, err := getRoleEntityPair(v.(string))
|
||||
|
||||
objectAccessControl := &storage.ObjectAccessControl{
|
||||
|
@ -101,14 +101,13 @@ func resourceStorageObjectAclCreate(d *schema.ResourceData, meta interface{}) er
|
|||
}
|
||||
}
|
||||
|
||||
return resourceStorageObjectAclRead(d, meta);
|
||||
return resourceStorageObjectAclRead(d, meta)
|
||||
}
|
||||
|
||||
return fmt.Errorf("Error, you must specify either " +
|
||||
"\"predefined_acl\" or \"role_entity\"");
|
||||
"\"predefined_acl\" or \"role_entity\"")
|
||||
}
|
||||
|
||||
|
||||
func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
|
@ -121,7 +120,7 @@ func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) erro
|
|||
role_entity := make([]interface{}, 0)
|
||||
re_local := d.Get("role_entity").([]interface{})
|
||||
re_local_map := make(map[string]string)
|
||||
for _, v := range(re_local) {
|
||||
for _, v := range re_local {
|
||||
res, err := getRoleEntityPair(v.(string))
|
||||
|
||||
if err != nil {
|
||||
|
@ -138,10 +137,10 @@ func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) erro
|
|||
return err
|
||||
}
|
||||
|
||||
for _, v := range(res.Items) {
|
||||
for _, v := range res.Items {
|
||||
role := ""
|
||||
entity := ""
|
||||
for key, val := range (v.(map[string]interface{})) {
|
||||
for key, val := range v.(map[string]interface{}) {
|
||||
if key == "role" {
|
||||
role = val.(string)
|
||||
} else if key == "entity" {
|
||||
|
@ -172,7 +171,7 @@ func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) er
|
|||
old_re, new_re := o.([]interface{}), n.([]interface{})
|
||||
|
||||
old_re_map := make(map[string]string)
|
||||
for _, v := range(old_re) {
|
||||
for _, v := range old_re {
|
||||
res, err := getRoleEntityPair(v.(string))
|
||||
|
||||
if err != nil {
|
||||
|
@ -183,7 +182,7 @@ func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) er
|
|||
old_re_map[res.Entity] = res.Role
|
||||
}
|
||||
|
||||
for _, v := range(new_re) {
|
||||
for _, v := range new_re {
|
||||
pair, err := getRoleEntityPair(v.(string))
|
||||
|
||||
objectAccessControl := &storage.ObjectAccessControl{
|
||||
|
@ -209,7 +208,7 @@ func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) er
|
|||
}
|
||||
}
|
||||
|
||||
for entity, _ := range(old_re_map) {
|
||||
for entity, _ := range old_re_map {
|
||||
log.Printf("[DEBUG]: removing entity %s", entity)
|
||||
err := config.clientStorage.ObjectAccessControls.Delete(bucket, object, entity).Do()
|
||||
|
||||
|
@ -218,7 +217,7 @@ func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) er
|
|||
}
|
||||
}
|
||||
|
||||
return resourceStorageObjectAclRead(d, meta);
|
||||
return resourceStorageObjectAclRead(d, meta)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -231,7 +230,7 @@ func resourceStorageObjectAclDelete(d *schema.ResourceData, meta interface{}) er
|
|||
object := d.Get("object").(string)
|
||||
|
||||
re_local := d.Get("role_entity").([]interface{})
|
||||
for _, v := range(re_local) {
|
||||
for _, v := range re_local {
|
||||
res, err := getRoleEntityPair(v.(string))
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -2,9 +2,9 @@ package google
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"math/rand"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
|
@ -32,7 +32,7 @@ func TestAccGoogleStorageObjectAcl_basic(t *testing.T) {
|
|||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testGoogleStorageObjectsAclBasic1,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckGoogleStorageObjectAcl(testAclBucketName,
|
||||
testAclObjectName, roleEntityBasic1),
|
||||
testAccCheckGoogleStorageObjectAcl(testAclBucketName,
|
||||
|
@ -58,7 +58,7 @@ func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) {
|
|||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testGoogleStorageObjectsAclBasic1,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckGoogleStorageObjectAcl(testAclBucketName,
|
||||
testAclObjectName, roleEntityBasic1),
|
||||
testAccCheckGoogleStorageObjectAcl(testAclBucketName,
|
||||
|
@ -68,7 +68,7 @@ func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) {
|
|||
|
||||
resource.TestStep{
|
||||
Config: testGoogleStorageObjectsAclBasic2,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckGoogleStorageObjectAcl(testAclBucketName,
|
||||
testAclObjectName, roleEntityBasic2),
|
||||
testAccCheckGoogleStorageObjectAcl(testAclBucketName,
|
||||
|
@ -78,7 +78,7 @@ func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) {
|
|||
|
||||
resource.TestStep{
|
||||
Config: testGoogleStorageObjectsAclBasicDelete,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckGoogleStorageObjectAclDelete(testAclBucketName,
|
||||
testAclObjectName, roleEntityBasic1),
|
||||
testAccCheckGoogleStorageObjectAclDelete(testAclBucketName,
|
||||
|
@ -106,7 +106,7 @@ func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) {
|
|||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testGoogleStorageObjectsAclBasic2,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckGoogleStorageObjectAcl(testAclBucketName,
|
||||
testAclObjectName, roleEntityBasic2),
|
||||
testAccCheckGoogleStorageObjectAcl(testAclBucketName,
|
||||
|
@ -116,7 +116,7 @@ func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) {
|
|||
|
||||
resource.TestStep{
|
||||
Config: testGoogleStorageObjectsAclBasic3,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckGoogleStorageObjectAcl(testAclBucketName,
|
||||
testAclObjectName, roleEntityBasic2),
|
||||
testAccCheckGoogleStorageObjectAcl(testAclBucketName,
|
||||
|
@ -126,7 +126,7 @@ func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) {
|
|||
|
||||
resource.TestStep{
|
||||
Config: testGoogleStorageObjectsAclBasicDelete,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckGoogleStorageObjectAclDelete(testAclBucketName,
|
||||
testAclObjectName, roleEntityBasic1),
|
||||
testAccCheckGoogleStorageObjectAclDelete(testAclBucketName,
|
||||
|
@ -171,7 +171,7 @@ func testAccCheckGoogleStorageObjectAcl(bucket, object, roleEntityS string) reso
|
|||
return fmt.Errorf("Error retrieving contents of acl for bucket %s: %s", bucket, err)
|
||||
}
|
||||
|
||||
if (res.Role != roleEntity.Role) {
|
||||
if res.Role != roleEntity.Role {
|
||||
return fmt.Errorf("Error, Role mismatch %s != %s", res.Role, roleEntity.Role)
|
||||
}
|
||||
|
||||
|
@ -289,7 +289,7 @@ resource "google_storage_object_acl" "acl" {
|
|||
role_entity = ["%s", "%s"]
|
||||
}
|
||||
`, testAclBucketName, testAclObjectName, tfObjectAcl.Name(),
|
||||
roleEntityBasic2, roleEntityBasic3_reader)
|
||||
roleEntityBasic2, roleEntityBasic3_reader)
|
||||
|
||||
var testGoogleStorageObjectsAclPredefined = fmt.Sprintf(`
|
||||
resource "google_storage_bucket" "bucket" {
|
||||
|
|
|
@ -136,7 +136,7 @@ func resourceBlockStorageVolumeV1Create(d *schema.ResourceData, meta interface{}
|
|||
v.ID)
|
||||
|
||||
stateConf := &resource.StateChangeConf{
|
||||
Pending: []string{"downloading"},
|
||||
Pending: []string{"downloading"},
|
||||
Target: "available",
|
||||
Refresh: VolumeV1StateRefreshFunc(blockStorageClient, v.ID),
|
||||
Timeout: 10 * time.Minute,
|
||||
|
|
|
@ -610,7 +610,6 @@ func resourceComputeInstanceV2Update(d *schema.ResourceData, meta interface{}) e
|
|||
|
||||
log.Printf("[DEBUG] Security groups to remove: %v", secgroupsToRemove)
|
||||
|
||||
|
||||
for _, g := range secgroupsToRemove.List() {
|
||||
err := secgroups.RemoveServerFromGroup(computeClient, d.Id(), g.(string)).ExtractErr()
|
||||
if err != nil {
|
||||
|
|
|
@ -340,10 +340,10 @@ func jobFromResourceData(d *schema.ResourceData) (*rundeck.JobDetail, error) {
|
|||
LogLevel: d.Get("log_level").(string),
|
||||
AllowConcurrentExecutions: d.Get("allow_concurrent_executions").(bool),
|
||||
Dispatch: &rundeck.JobDispatch{
|
||||
MaxThreadCount: d.Get("max_thread_count").(int),
|
||||
ContinueOnError: d.Get("continue_on_error").(bool),
|
||||
RankAttribute: d.Get("rank_attribute").(string),
|
||||
RankOrder: d.Get("rank_order").(string),
|
||||
MaxThreadCount: d.Get("max_thread_count").(int),
|
||||
ContinueOnError: d.Get("continue_on_error").(bool),
|
||||
RankAttribute: d.Get("rank_attribute").(string),
|
||||
RankOrder: d.Get("rank_order").(string),
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -326,7 +326,6 @@ func (p *Provisioner) runChefClientFunc(
|
|||
cmd = fmt.Sprintf("%s -j %q -E %q", chefCmd, fb, p.Environment)
|
||||
}
|
||||
|
||||
|
||||
if p.LogToFile {
|
||||
if err := os.MkdirAll(logfileDir, 0755); err != nil {
|
||||
return fmt.Errorf("Error creating logfile directory %s: %v", logfileDir, err)
|
||||
|
|
Loading…
Reference in New Issue