removed extra parentheses

This commit is contained in:
Panagiotis Moustafellos 2015-10-08 15:48:04 +03:00
parent 4637a34825
commit e4845f75cc
43 changed files with 89 additions and 89 deletions

View File

@ -26,7 +26,7 @@ func unwrapAwsStringList(in []*string) []string {
ret := make([]string, len(in), len(in))
for i := 0; i < len(in); i++ {
if in[i] != nil {
ret[i] = *(in[i])
ret[i] = *in[i]
}
}
return ret

View File

@ -29,7 +29,7 @@ func expandNetworkAclEntries(configured []interface{}, entryType string) ([]*ec2
From: aws.Int64(int64(data["from_port"].(int))),
To: aws.Int64(int64(data["to_port"].(int))),
},
Egress: aws.Bool((entryType == "egress")),
Egress: aws.Bool(entryType == "egress"),
RuleAction: aws.String(data["action"].(string)),
RuleNumber: aws.Int64(int64(data["rule_no"].(int))),
CidrBlock: aws.String(data["cidr_block"].(string)),

View File

@ -130,7 +130,7 @@ func resourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error {
}
image := res.Images[0]
state := *(image.State)
state := *image.State
if state == "pending" {
// This could happen if a user manually adds an image we didn't create
@ -142,7 +142,7 @@ func resourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error {
if err != nil {
return err
}
state = *(image.State)
state = *image.State
}
if state == "deregistered" {
@ -170,22 +170,22 @@ func resourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error {
for _, blockDev := range image.BlockDeviceMappings {
if blockDev.Ebs != nil {
ebsBlockDev := map[string]interface{}{
"device_name": *(blockDev.DeviceName),
"delete_on_termination": *(blockDev.Ebs.DeleteOnTermination),
"encrypted": *(blockDev.Ebs.Encrypted),
"device_name": *blockDev.DeviceName,
"delete_on_termination": *blockDev.Ebs.DeleteOnTermination,
"encrypted": *blockDev.Ebs.Encrypted,
"iops": 0,
"snapshot_id": *(blockDev.Ebs.SnapshotId),
"volume_size": int(*(blockDev.Ebs.VolumeSize)),
"volume_type": *(blockDev.Ebs.VolumeType),
"snapshot_id": *blockDev.Ebs.SnapshotId,
"volume_size": int(*blockDev.Ebs.VolumeSize),
"volume_type": *blockDev.Ebs.VolumeType,
}
if blockDev.Ebs.Iops != nil {
ebsBlockDev["iops"] = int(*(blockDev.Ebs.Iops))
ebsBlockDev["iops"] = int(*blockDev.Ebs.Iops)
}
ebsBlockDevs = append(ebsBlockDevs, ebsBlockDev)
} else {
ephemeralBlockDevs = append(ephemeralBlockDevs, map[string]interface{}{
"device_name": *(blockDev.DeviceName),
"virtual_name": *(blockDev.VirtualName),
"device_name": *blockDev.DeviceName,
"virtual_name": *blockDev.VirtualName,
})
}
}
@ -301,7 +301,7 @@ func resourceAwsAmiWaitForAvailable(id string, client *ec2.EC2) (*ec2.Image, err
return nil, fmt.Errorf("new AMI vanished while pending")
}
state := *(res.Images[0].State)
state := *res.Images[0].State
if state == "pending" {
// Give it a few seconds before we poll again.
@ -316,7 +316,7 @@ func resourceAwsAmiWaitForAvailable(id string, client *ec2.EC2) (*ec2.Image, err
// If we're not pending or available then we're in one of the invalid/error
// states, so stop polling and bail out.
stateReason := *(res.Images[0].StateReason)
stateReason := *res.Images[0].StateReason
return nil, fmt.Errorf("new AMI became %s while pending: %s", state, stateReason)
}
}

View File

@ -384,7 +384,7 @@ func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) er
updates = append(updates, update)
// Hash key is required, range key isn't
hashkey_type, err := getAttributeType(d, *(gsi.KeySchema[0].AttributeName))
hashkey_type, err := getAttributeType(d, *gsi.KeySchema[0].AttributeName)
if err != nil {
return err
}
@ -396,7 +396,7 @@ func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) er
// If there's a range key, there will be 2 elements in KeySchema
if len(gsi.KeySchema) == 2 {
rangekey_type, err := getAttributeType(d, *(gsi.KeySchema[1].AttributeName))
rangekey_type, err := getAttributeType(d, *gsi.KeySchema[1].AttributeName)
if err != nil {
return err
}
@ -480,8 +480,8 @@ func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) er
capacityUpdated := false
if int64(gsiReadCapacity) != *(gsi.ProvisionedThroughput.ReadCapacityUnits) ||
int64(gsiWriteCapacity) != *(gsi.ProvisionedThroughput.WriteCapacityUnits) {
if int64(gsiReadCapacity) != *gsi.ProvisionedThroughput.ReadCapacityUnits ||
int64(gsiWriteCapacity) != *gsi.ProvisionedThroughput.WriteCapacityUnits {
capacityUpdated = true
}
@ -544,8 +544,8 @@ func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) erro
attributes := []interface{}{}
for _, attrdef := range table.AttributeDefinitions {
attribute := map[string]string{
"name": *(attrdef.AttributeName),
"type": *(attrdef.AttributeType),
"name": *attrdef.AttributeName,
"type": *attrdef.AttributeType,
}
attributes = append(attributes, attribute)
log.Printf("[DEBUG] Added Attribute: %s", attribute["name"])
@ -556,9 +556,9 @@ func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) erro
gsiList := make([]map[string]interface{}, 0, len(table.GlobalSecondaryIndexes))
for _, gsiObject := range table.GlobalSecondaryIndexes {
gsi := map[string]interface{}{
"write_capacity": *(gsiObject.ProvisionedThroughput.WriteCapacityUnits),
"read_capacity": *(gsiObject.ProvisionedThroughput.ReadCapacityUnits),
"name": *(gsiObject.IndexName),
"write_capacity": *gsiObject.ProvisionedThroughput.WriteCapacityUnits,
"read_capacity": *gsiObject.ProvisionedThroughput.ReadCapacityUnits,
"name": *gsiObject.IndexName,
}
for _, attribute := range gsiObject.KeySchema {
@ -571,7 +571,7 @@ func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) erro
}
}
gsi["projection_type"] = *(gsiObject.Projection.ProjectionType)
gsi["projection_type"] = *gsiObject.Projection.ProjectionType
gsi["non_key_attributes"] = gsiObject.Projection.NonKeyAttributes
gsiList = append(gsiList, gsi)
@ -647,7 +647,7 @@ func createGSIFromData(data *map[string]interface{}) dynamodb.GlobalSecondaryInd
func getGlobalSecondaryIndex(indexName string, indexList []*dynamodb.GlobalSecondaryIndexDescription) (*dynamodb.GlobalSecondaryIndexDescription, error) {
for _, gsi := range indexList {
if *(gsi.IndexName) == indexName {
if *gsi.IndexName == indexName {
return gsi, nil
}
}
@ -726,7 +726,7 @@ func waitForTableToBeActive(tableName string, meta interface{}) error {
return err
}
activeState = *(result.Table.TableStatus) == "ACTIVE"
activeState = *result.Table.TableStatus == "ACTIVE"
// Wait for a few seconds
if !activeState {

View File

@ -211,7 +211,7 @@ func dynamoDbAttributesToMap(attributes *[]*dynamodb.AttributeDefinition) map[st
attrmap := make(map[string]string)
for _, attrdef := range *attributes {
attrmap[*(attrdef.AttributeName)] = *(attrdef.AttributeType)
attrmap[*attrdef.AttributeName] = *attrdef.AttributeType
}
return attrmap

View File

@ -134,7 +134,7 @@ func resourceAwsEipRead(d *schema.ResourceData, meta interface{}) error {
// Verify AWS returned our EIP
if len(describeAddresses.Addresses) != 1 ||
(domain == "vpc" && *describeAddresses.Addresses[0].AllocationId != id) ||
domain == "vpc" && *describeAddresses.Addresses[0].AllocationId != id ||
*describeAddresses.Addresses[0].PublicIp != id {
if err != nil {
return fmt.Errorf("Unable to find EIP: %#v", describeAddresses.Addresses)

View File

@ -431,7 +431,7 @@ func TestResourceAwsElbListenerHash(t *testing.T) {
for tn, tc := range cases {
leftHash := resourceAwsElbListenerHash(tc.Left)
rightHash := resourceAwsElbListenerHash(tc.Right)
if (leftHash == rightHash) != tc.Match {
if leftHash == rightHash != tc.Match {
t.Fatalf("%s: expected match: %t, but did not get it", tn, tc.Match)
}
}

View File

@ -102,7 +102,7 @@ func testAccCheckAWSPolicyAttachmentAttributes(users []string, roles []string, g
}
}
if uc != 0 || rc != 0 || gc != 0 {
return fmt.Errorf("Error: Number of attached users, roles, or groups was incorrect:\n expected %d users and found %d\nexpected %d roles and found %d\nexpected %d groups and found %d", len(users), (len(users) - uc), len(roles), (len(roles) - rc), len(groups), (len(groups) - gc))
return fmt.Errorf("Error: Number of attached users, roles, or groups was incorrect:\n expected %d users and found %d\nexpected %d roles and found %d\nexpected %d groups and found %d", len(users), len(users)-uc, len(roles), len(roles)-rc, len(groups), len(groups)-gc)
}
return nil
}

View File

@ -695,7 +695,7 @@ func readBlockDevicesFromInstance(instance *ec2.Instance, conn *ec2.EC2) (map[st
instanceBlockDevices := make(map[string]*ec2.InstanceBlockDeviceMapping)
for _, bd := range instance.BlockDeviceMappings {
if bd.Ebs != nil {
instanceBlockDevices[*(bd.Ebs.VolumeId)] = bd
instanceBlockDevices[*bd.Ebs.VolumeId] = bd
}
}
@ -755,9 +755,9 @@ func readBlockDevicesFromInstance(instance *ec2.Instance, conn *ec2.EC2) (map[st
}
func blockDeviceIsRoot(bd *ec2.InstanceBlockDeviceMapping, instance *ec2.Instance) bool {
return (bd.DeviceName != nil &&
return bd.DeviceName != nil &&
instance.RootDeviceName != nil &&
*bd.DeviceName == *instance.RootDeviceName)
*bd.DeviceName == *instance.RootDeviceName
}
func fetchRootDeviceName(ami string, conn *ec2.EC2) (*string, error) {

View File

@ -165,7 +165,7 @@ func verifyIPAddressParams(d *schema.ResourceData) error {
_, network := d.GetOk("network")
_, vpc := d.GetOk("vpc")
if (network && vpc) || (!network && !vpc) {
if network && vpc || !network && !vpc {
return fmt.Errorf(
"You must supply a value for either (so not both) the 'network' or 'vpc' parameter")
}

View File

@ -148,7 +148,7 @@ func resourceDockerContainerRead(d *schema.ResourceData, meta interface{}) error
}
if container.State.Running ||
(!container.State.Running && !d.Get("must_run").(bool)) {
!container.State.Running && !d.Get("must_run").(bool) {
break
}

View File

@ -376,7 +376,7 @@ func testAccCheckComputeInstanceDisk(instance *compute.Instance, source string,
}
for _, disk := range instance.Disks {
if strings.LastIndex(disk.Source, "/"+source) == (len(disk.Source)-len(source)-1) && disk.AutoDelete == delete && disk.Boot == boot {
if strings.LastIndex(disk.Source, "/"+source) == len(disk.Source)-len(source)-1 && disk.AutoDelete == delete && disk.Boot == boot {
return nil
}
}

View File

@ -219,7 +219,7 @@ func resourceComputeSecGroupV2Delete(d *schema.ResourceData, meta interface{}) e
}
func resourceSecGroupRulesV2(d *schema.ResourceData) []secgroups.CreateRuleOpts {
rawRules := (d.Get("rule")).([]interface{})
rawRules := d.Get("rule").([]interface{})
createRuleOptsList := make([]secgroups.CreateRuleOpts, len(rawRules))
for i, raw := range rawRules {
rawMap := raw.(map[string]interface{})

View File

@ -292,7 +292,7 @@ func resourcePoolMonitorIDsV1(d *schema.ResourceData) []string {
}
func resourcePoolMembersV1(d *schema.ResourceData) []members.CreateOpts {
memberOptsRaw := (d.Get("member")).(*schema.Set)
memberOptsRaw := d.Get("member").(*schema.Set)
memberOpts := make([]members.CreateOpts, memberOptsRaw.Len())
for i, raw := range memberOptsRaw.List() {
rawMap := raw.(map[string]interface{})

View File

@ -45,7 +45,7 @@ func TestFlagKV(t *testing.T) {
for _, tc := range cases {
f := new(FlagKV)
err := f.Set(tc.Input)
if (err != nil) != tc.Error {
if err != nil != tc.Error {
t.Fatalf("bad error. Input: %#v", tc.Input)
}
@ -95,7 +95,7 @@ foo = "bar"
f := new(FlagKVFile)
err := f.Set(path)
if (err != nil) != tc.Error {
if err != nil != tc.Error {
t.Fatalf("bad error. Input: %#v", tc.Input)
}

View File

@ -99,7 +99,7 @@ func safeDuration(dur string, defaultDur time.Duration) time.Duration {
func formatDuration(duration time.Duration) string {
h := int(duration.Hours())
m := int(duration.Minutes()) - (h * 60)
m := int(duration.Minutes()) - h*60
s := int(duration.Seconds()) - (h*3600 + m*60)
res := "PT"

View File

@ -91,7 +91,7 @@ func TestAppend(t *testing.T) {
for i, tc := range cases {
actual, err := Append(tc.c1, tc.c2)
if (err != nil) != tc.err {
if err != nil != tc.err {
t.Fatalf("%d: error fail", i)
}

View File

@ -636,7 +636,7 @@ func testFunction(t *testing.T, config testFunctionConfig) {
}
out, _, err := lang.Eval(ast, langEvalConfig(config.Vars))
if (err != nil) != tc.Error {
if err != nil != tc.Error {
t.Fatalf("Case #%d:\ninput: %#v\nerr: %s", i, tc.Input, err)
}

View File

@ -66,7 +66,7 @@ func TestNewInterpolatedVariable(t *testing.T) {
for i, tc := range cases {
actual, err := NewInterpolatedVariable(tc.Input)
if (err != nil) != tc.Error {
if err != nil != tc.Error {
t.Fatalf("%d. Error: %s", i, err)
}
if !reflect.DeepEqual(actual, tc.Result) {

View File

@ -134,7 +134,7 @@ func TestIdentifierCheck(t *testing.T) {
visitor := &IdentifierCheck{Scope: tc.Scope}
err = visitor.Visit(node)
if (err != nil) != tc.Error {
if err != nil != tc.Error {
t.Fatalf("Error: %s\n\nInput: %s", err, tc.Input)
}
}

View File

@ -169,7 +169,7 @@ func TestTypeCheck(t *testing.T) {
visitor := &TypeCheck{Scope: tc.Scope}
err = visitor.Visit(node)
if (err != nil) != tc.Error {
if err != nil != tc.Error {
t.Fatalf("Error: %s\n\nInput: %s", err, tc.Input)
}
}
@ -247,7 +247,7 @@ func TestTypeCheck_implicit(t *testing.T) {
// Do the first pass...
visitor := &TypeCheck{Scope: tc.Scope, Implicit: implicitMap}
err = visitor.Visit(node)
if (err != nil) != tc.Error {
if err != nil != tc.Error {
t.Fatalf("Error: %s\n\nInput: %s", err, tc.Input)
}
if err != nil {

View File

@ -260,7 +260,7 @@ func TestEval(t *testing.T) {
}
out, outType, err := Eval(node, &EvalConfig{GlobalScope: tc.Scope})
if (err != nil) != tc.Error {
if err != nil != tc.Error {
t.Fatalf("Error: %s\n\nInput: %s", err, tc.Input)
}
if outType != tc.ResultType {

View File

@ -353,7 +353,7 @@ func TestParse(t *testing.T) {
for _, tc := range cases {
actual, err := Parse(tc.Input)
if (err != nil) != tc.Error {
if err != nil != tc.Error {
t.Fatalf("Error: %s\n\nInput: %s", err, tc.Input)
}
if !reflect.DeepEqual(actual, tc.Result) {

View File

@ -210,5 +210,5 @@ func dirFiles(dir string) ([]string, []string, error) {
func isIgnoredFile(name string) bool {
return strings.HasPrefix(name, ".") || // Unix-like hidden files
strings.HasSuffix(name, "~") || // vim
(strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs
strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs
}

View File

@ -157,7 +157,7 @@ func TestMerge(t *testing.T) {
for i, tc := range cases {
actual, err := Merge(tc.c1, tc.c2)
if (err != nil) != tc.err {
if err != nil != tc.err {
t.Fatalf("%d: error fail", i)
}

View File

@ -74,7 +74,7 @@ func TestFileDetector_noPwd(t *testing.T) {
f := new(FileDetector)
for i, tc := range noPwdFileTests {
out, ok, err := f.Detect(tc.in, tc.pwd)
if (err != nil) != tc.err {
if err != nil != tc.err {
t.Fatalf("%d: err: %s", i, err)
}
if !ok {

View File

@ -41,7 +41,7 @@ func TestDetect(t *testing.T) {
for i, tc := range cases {
output, err := Detect(tc.Input, tc.Pwd)
if (err != nil) != tc.Err {
if err != nil != tc.Err {
t.Fatalf("%d: bad err: %s", i, err)
}
if output != tc.Output {

View File

@ -122,7 +122,7 @@ func TestConfigFieldReader_DefaultHandling(t *testing.T) {
Config: tc.Config,
}
out, err := r.ReadField(tc.Addr)
if (err != nil) != tc.Err {
if err != nil != tc.Err {
t.Fatalf("%s: err: %s", name, err)
}
if s, ok := out.Value.(*Set); ok {
@ -192,7 +192,7 @@ func TestConfigFieldReader_ComputedMap(t *testing.T) {
Config: tc.Config,
}
out, err := r.ReadField(tc.Addr)
if (err != nil) != tc.Err {
if err != nil != tc.Err {
t.Fatalf("%s: err: %s", name, err)
}
if s, ok := out.Value.(*Set); ok {
@ -283,7 +283,7 @@ func TestConfigFieldReader_ComputedSet(t *testing.T) {
Config: tc.Config,
}
out, err := r.ReadField(tc.Addr)
if (err != nil) != tc.Err {
if err != nil != tc.Err {
t.Fatalf("%s: err: %s", name, err)
}
if s, ok := out.Value.(*Set); ok {

View File

@ -237,7 +237,7 @@ func TestDiffFieldReader_extra(t *testing.T) {
for name, tc := range cases {
out, err := r.ReadField(tc.Addr)
if (err != nil) != tc.Err {
if err != nil != tc.Err {
t.Fatalf("%s: err: %s", name, err)
}
if s, ok := out.Value.(*Set); ok {

View File

@ -86,7 +86,7 @@ func TestMapFieldReader_extra(t *testing.T) {
for name, tc := range cases {
out, err := r.ReadField(tc.Addr)
if (err != nil) != tc.OutErr {
if err != nil != tc.OutErr {
t.Fatalf("%s: err: %s", name, err)
}
if out.Computed != tc.OutComputed {

View File

@ -387,7 +387,7 @@ func testFieldReader(t *testing.T, f func(map[string]*Schema) FieldReader) {
for name, tc := range cases {
r := f(schema)
out, err := r.ReadField(tc.Addr)
if (err != nil) != tc.Err {
if err != nil != tc.Err {
t.Fatalf("%s: err: %s", name, err)
}
if s, ok := out.Value.(*Set); ok {

View File

@ -242,7 +242,7 @@ func TestMapFieldWriter(t *testing.T) {
for name, tc := range cases {
w := &MapFieldWriter{Schema: schema}
err := w.WriteField(tc.Addr, tc.Value)
if (err != nil) != tc.Err {
if err != nil != tc.Err {
t.Fatalf("%s: err: %s", name, err)
}

View File

@ -79,7 +79,7 @@ func TestProviderConfigure(t *testing.T) {
}
err = tc.P.Configure(terraform.NewResourceConfig(c))
if (err != nil) != tc.Err {
if err != nil != tc.Err {
t.Fatalf("%d: %s", i, err)
}
}
@ -141,7 +141,7 @@ func TestProviderValidate(t *testing.T) {
}
_, es := tc.P.Validate(terraform.NewResourceConfig(c))
if (len(es) > 0) != tc.Err {
if len(es) > 0 != tc.Err {
t.Fatalf("%d: %#v", i, es)
}
}
@ -180,7 +180,7 @@ func TestProviderValidateResource(t *testing.T) {
}
_, es := tc.P.ValidateResource(tc.Type, terraform.NewResourceConfig(c))
if (len(es) > 0) != tc.Err {
if len(es) > 0 != tc.Err {
t.Fatalf("%d: %#v", i, es)
}
}

View File

@ -247,7 +247,7 @@ func (r *Resource) InternalValidate(topSchemaMap schemaMap) error {
} else {
nonUpdateableAttrs := make([]string, 0)
for k, v := range r.Schema {
if v.ForceNew || (v.Computed && !v.Optional) {
if v.ForceNew || v.Computed && !v.Optional {
nonUpdateableAttrs = append(nonUpdateableAttrs, k)
}
}

View File

@ -1736,7 +1736,7 @@ func TestResourceDataSet(t *testing.T) {
}
err = d.Set(tc.Key, tc.Value)
if (err != nil) != tc.Err {
if err != nil != tc.Err {
t.Fatalf("%d err: %s", i, err)
}

View File

@ -369,7 +369,7 @@ func TestResourceInternalValidate(t *testing.T) {
for i, tc := range cases {
err := tc.In.InternalValidate(schemaMap{})
if (err != nil) != tc.Err {
if err != nil != tc.Err {
t.Fatalf("%d: bad: %s", i, err)
}
}
@ -585,7 +585,7 @@ func TestResourceRefresh_needsMigration(t *testing.T) {
if err != nil {
t.Fatalf("err: %#v", err)
}
s.Attributes["newfoo"] = strconv.Itoa((int(oldfoo * 10)))
s.Attributes["newfoo"] = strconv.Itoa(int(oldfoo * 10))
delete(s.Attributes, "oldfoo")
return s, nil

View File

@ -827,7 +827,7 @@ func (m schemaMap) diffSet(
newStr := strconv.Itoa(newLen)
// If the set computed then say that the # is computed
if computedSet || (schema.Computed && !nSet) {
if computedSet || schema.Computed && !nSet {
// If # already exists, equals 0 and no new set is supplied, there
// is nothing to record in the diff
count, ok := d.GetOk(k + ".#")

View File

@ -2437,7 +2437,7 @@ func TestSchemaMap_Diff(t *testing.T) {
d, err := schemaMap(tc.Schema).Diff(
tc.State, terraform.NewResourceConfig(c))
if (err != nil) != tc.Err {
if err != nil != tc.Err {
t.Fatalf("#%d err: %s", i, err)
}
@ -2595,7 +2595,7 @@ func TestSchemaMap_Input(t *testing.T) {
rc.Config = make(map[string]interface{})
actual, err := schemaMap(tc.Schema).Input(input, rc)
if (err != nil) != tc.Err {
if err != nil != tc.Err {
t.Fatalf("#%v err: %s", i, err)
}
@ -2916,7 +2916,7 @@ func TestSchemaMap_InternalValidate(t *testing.T) {
for i, tc := range cases {
err := schemaMap(tc.In).InternalValidate(schemaMap{})
if (err != nil) != tc.Err {
if err != nil != tc.Err {
if tc.Err {
t.Fatalf("%d: Expected error did not occur:\n\n%#v", i, tc.In)
}
@ -3652,7 +3652,7 @@ func TestSchemaMap_Validate(t *testing.T) {
}
ws, es := schemaMap(tc.Schema).Validate(terraform.NewResourceConfig(c))
if (len(es) > 0) != tc.Err {
if len(es) > 0 != tc.Err {
if len(es) == 0 {
t.Errorf("%q: no errors", tn)
}

View File

@ -49,7 +49,7 @@ func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) {
// Flag if we're creating a new instance
if n.CreateNew != nil {
*n.CreateNew = (state.ID == "" && !diff.Destroy) || diff.RequiresNew()
*n.CreateNew = state.ID == "" && !diff.Destroy || diff.RequiresNew()
}
{

View File

@ -107,7 +107,7 @@ func (b *BuiltinGraphBuilder) Steps(path []string) []GraphTransformer {
&OrphanTransformer{
State: b.State,
Module: b.Root,
Targeting: (len(b.Targets) > 0),
Targeting: len(b.Targets) > 0,
},
// Output-related transformations

View File

@ -165,7 +165,7 @@ func (n *GraphNodeConfigResource) DynamicExpand(ctx EvalContext) (*Graph, error)
steps = append(steps, &OrphanTransformer{
State: state,
View: n.Resource.Id(),
Targeting: (len(n.Targets) > 0),
Targeting: len(n.Targets) > 0,
})
steps = append(steps, &DeposedTransformer{

View File

@ -210,13 +210,13 @@ digraph {
for tn, tc := range cases {
actual, err := GraphDot(tc.Graph(), &tc.Opts)
if (err == nil) && tc.Error != "" {
if err == nil && tc.Error != "" {
t.Fatalf("%s: expected err: %s, got none", tn, tc.Error)
}
if (err != nil) && (tc.Error == "") {
if err != nil && tc.Error == "" {
t.Fatalf("%s: unexpected err: %s", tn, err)
}
if (err != nil) && (tc.Error != "") {
if err != nil && tc.Error != "" {
if !strings.Contains(err.Error(), tc.Error) {
t.Fatalf("%s: expected err: %s\nto contain: %s", tn, err, tc.Error)
}

View File

@ -53,26 +53,26 @@ func (addr *ResourceAddress) Equals(raw interface{}) bool {
return false
}
pathMatch := ((len(addr.Path) == 0 && len(other.Path) == 0) ||
reflect.DeepEqual(addr.Path, other.Path))
pathMatch := len(addr.Path) == 0 && len(other.Path) == 0 ||
reflect.DeepEqual(addr.Path, other.Path)
indexMatch := (addr.Index == -1 ||
indexMatch := addr.Index == -1 ||
other.Index == -1 ||
addr.Index == other.Index)
addr.Index == other.Index
nameMatch := (addr.Name == "" ||
nameMatch := addr.Name == "" ||
other.Name == "" ||
addr.Name == other.Name)
addr.Name == other.Name
typeMatch := (addr.Type == "" ||
typeMatch := addr.Type == "" ||
other.Type == "" ||
addr.Type == other.Type)
addr.Type == other.Type
return (pathMatch &&
return pathMatch &&
indexMatch &&
addr.InstanceType == other.InstanceType &&
nameMatch &&
typeMatch)
typeMatch
}
func ParseResourceIndex(s string) (int, error) {