Merge pull request #20657 from hashicorp/jbardin/diff-apply-shim

sdk shim normalization
This commit is contained in:
James Bardin 2019-03-12 18:16:07 -04:00 committed by GitHub
commit ccd62283fb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 523 additions and 194 deletions

View File

@ -163,6 +163,15 @@ func testResourceRead(d *schema.ResourceData, meta interface{}) error {
d.Set("computed_map", map[string]string{"key1": "value1"}) d.Set("computed_map", map[string]string{"key1": "value1"})
d.Set("computed_list", []string{"listval1", "listval2"}) d.Set("computed_list", []string{"listval1", "listval2"})
d.Set("computed_set", []string{"setval1", "setval2"}) d.Set("computed_set", []string{"setval1", "setval2"})
// if there is no "set" value, erroneously set it to an empty set. This
// might change a null value to an empty set, but we should be able to
// ignore that.
s := d.Get("set")
if s == nil || s.(*schema.Set).Len() == 0 {
d.Set("set", []interface{}{})
}
return nil return nil
} }

View File

@ -0,0 +1,496 @@
package test
import (
"reflect"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
"github.com/hashicorp/terraform/terraform"
)
var dataprocClusterSchema = map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"project": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"region": {
Type: schema.TypeString,
Optional: true,
Default: "global",
ForceNew: true,
},
"labels": {
Type: schema.TypeMap,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
// GCP automatically adds two labels
// 'goog-dataproc-cluster-uuid'
// 'goog-dataproc-cluster-name'
Computed: true,
DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool {
if old != "" {
return true
}
return false
},
},
"tag_set": {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"cluster_config": {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"delete_autogen_bucket": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Removed: "If you need a bucket that can be deleted, please create" +
"a new one and set the `staging_bucket` field",
},
"staging_bucket": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"bucket": {
Type: schema.TypeString,
Computed: true,
},
"gce_cluster_config": {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"zone": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"network": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
ConflictsWith: []string{"cluster_config.0.gce_cluster_config.0.subnetwork"},
},
"subnetwork": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ConflictsWith: []string{"cluster_config.0.gce_cluster_config.0.network"},
},
"tags": {
Type: schema.TypeSet,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"service_account": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"service_account_scopes": {
Type: schema.TypeSet,
Optional: true,
Computed: true,
ForceNew: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"internal_ip_only": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
Default: false,
},
"metadata": {
Type: schema.TypeMap,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
ForceNew: true,
},
},
},
},
"master_config": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"num_instances": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"image_uri": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"machine_type": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"disk_config": {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"num_local_ssds": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
},
"boot_disk_size_gb": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validation.IntAtLeast(10),
},
"boot_disk_type": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice([]string{"pd-standard", "pd-ssd", ""}, false),
Default: "pd-standard",
},
},
},
},
"accelerators": {
Type: schema.TypeSet,
Optional: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"accelerator_type": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"accelerator_count": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
},
},
},
"instance_names": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
},
},
"preemptible_worker_config": {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"num_instances": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"disk_config": {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"num_local_ssds": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
},
"boot_disk_size_gb": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ForceNew: true,
ValidateFunc: validation.IntAtLeast(10),
},
"boot_disk_type": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice([]string{"pd-standard", "pd-ssd", ""}, false),
Default: "pd-standard",
},
},
},
},
"instance_names": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
},
},
"software_config": {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"image_version": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"override_properties": {
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"properties": {
Type: schema.TypeMap,
Computed: true,
},
},
},
},
"initialization_action": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"script": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"timeout_sec": {
Type: schema.TypeInt,
Optional: true,
Default: 300,
ForceNew: true,
},
},
},
},
"encryption_config": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"kms_key_name": {
Type: schema.TypeString,
Required: true,
},
},
},
},
},
},
},
}
func TestDiffApply_dataprocCluster(t *testing.T) {
priorAttrs := map[string]string{
"cluster_config.#": "1",
"cluster_config.0.bucket": "dataproc-1dc18cb2-116e-4e92-85ea-ff63a1bf2745-us-central1",
"cluster_config.0.delete_autogen_bucket": "false",
"cluster_config.0.encryption_config.#": "0",
"cluster_config.0.gce_cluster_config.#": "1",
"cluster_config.0.gce_cluster_config.0.internal_ip_only": "false",
"cluster_config.0.gce_cluster_config.0.metadata.%": "0",
"cluster_config.0.gce_cluster_config.0.network": "https://www.googleapis.com/compute/v1/projects/hc-terraform-testing/global/networks/default",
"cluster_config.0.gce_cluster_config.0.service_account": "",
"cluster_config.0.gce_cluster_config.0.service_account_scopes.#": "7",
"cluster_config.0.gce_cluster_config.0.service_account_scopes.1245378569": "https://www.googleapis.com/auth/bigtable.admin.table",
"cluster_config.0.gce_cluster_config.0.service_account_scopes.1328717722": "https://www.googleapis.com/auth/devstorage.read_write",
"cluster_config.0.gce_cluster_config.0.service_account_scopes.1693978638": "https://www.googleapis.com/auth/devstorage.full_control",
"cluster_config.0.gce_cluster_config.0.service_account_scopes.172152165": "https://www.googleapis.com/auth/logging.write",
"cluster_config.0.gce_cluster_config.0.service_account_scopes.2401844655": "https://www.googleapis.com/auth/bigquery",
"cluster_config.0.gce_cluster_config.0.service_account_scopes.299921284": "https://www.googleapis.com/auth/bigtable.data",
"cluster_config.0.gce_cluster_config.0.service_account_scopes.3804780973": "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
"cluster_config.0.gce_cluster_config.0.subnetwork": "",
"cluster_config.0.gce_cluster_config.0.tags.#": "0",
"cluster_config.0.gce_cluster_config.0.zone": "us-central1-f",
"cluster_config.0.initialization_action.#": "0",
"cluster_config.0.master_config.#": "1",
"cluster_config.0.master_config.0.accelerators.#": "0",
"cluster_config.0.master_config.0.disk_config.#": "1",
"cluster_config.0.master_config.0.disk_config.0.boot_disk_size_gb": "500",
"cluster_config.0.master_config.0.disk_config.0.boot_disk_type": "pd-standard",
"cluster_config.0.master_config.0.disk_config.0.num_local_ssds": "0",
"cluster_config.0.master_config.0.image_uri": "https://www.googleapis.com/compute/v1/projects/cloud-dataproc/global/images/dataproc-1-3-deb9-20190228-000000-rc01",
"cluster_config.0.master_config.0.instance_names.#": "1",
"cluster_config.0.master_config.0.instance_names.0": "dproc-cluster-test-2ww3c60iww-m",
"cluster_config.0.master_config.0.machine_type": "n1-standard-4",
"cluster_config.0.master_config.0.num_instances": "1",
"cluster_config.0.preemptible_worker_config.#": "1",
"cluster_config.0.preemptible_worker_config.0.disk_config.#": "1",
"cluster_config.0.preemptible_worker_config.0.instance_names.#": "0",
"cluster_config.0.preemptible_worker_config.0.num_instances": "0",
"cluster_config.0.software_config.#": "1",
"cluster_config.0.software_config.0.image_version": "1.3.28-deb9",
"cluster_config.0.software_config.0.override_properties.%": "0",
"cluster_config.0.software_config.0.properties.%": "14",
"cluster_config.0.software_config.0.properties.capacity-scheduler:yarn.scheduler.capacity.root.default.ordering-policy": "fair",
"cluster_config.0.software_config.0.properties.core:fs.gs.block.size": "134217728",
"cluster_config.0.software_config.0.properties.core:fs.gs.metadata.cache.enable": "false",
"cluster_config.0.software_config.0.properties.core:hadoop.ssl.enabled.protocols": "TLSv1,TLSv1.1,TLSv1.2",
"cluster_config.0.software_config.0.properties.distcp:mapreduce.map.java.opts": "-Xmx768m",
"cluster_config.0.software_config.0.properties.distcp:mapreduce.map.memory.mb": "1024",
"cluster_config.0.software_config.0.properties.distcp:mapreduce.reduce.java.opts": "-Xmx768m",
"cluster_config.0.software_config.0.properties.distcp:mapreduce.reduce.memory.mb": "1024",
"cluster_config.0.software_config.0.properties.hdfs:dfs.datanode.address": "0.0.0.0:9866",
"cluster_config.0.software_config.0.properties.hdfs:dfs.datanode.http.address": "0.0.0.0:9864",
"cluster_config.0.software_config.0.properties.hdfs:dfs.datanode.https.address": "0.0.0.0:9865",
"cluster_config.0.software_config.0.properties.hdfs:dfs.datanode.ipc.address": "0.0.0.0:9867",
"cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.handler.count": "20",
"cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.http-address": "0.0.0.0:9870",
"cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.https-address": "0.0.0.0:9871",
"cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.lifeline.rpc-address": "dproc-cluster-test-2ww3c60iww-m:8050",
"cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.secondary.http-address": "0.0.0.0:9868",
"cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.secondary.https-address": "0.0.0.0:9869",
"cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.service.handler.count": "10",
"cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.servicerpc-address": "dproc-cluster-test-2ww3c60iww-m:8051",
"cluster_config.0.software_config.0.properties.mapred-env:HADOOP_JOB_HISTORYSERVER_HEAPSIZE": "3840",
"cluster_config.0.software_config.0.properties.mapred:mapreduce.job.maps": "21",
"cluster_config.0.software_config.0.properties.mapred:mapreduce.job.reduce.slowstart.completedmaps": "0.95",
"cluster_config.0.software_config.0.properties.mapred:mapreduce.job.reduces": "7",
"cluster_config.0.software_config.0.properties.mapred:mapreduce.map.cpu.vcores": "1",
"cluster_config.0.software_config.0.properties.mapred:mapreduce.map.java.opts": "-Xmx2457m",
"cluster_config.0.software_config.0.properties.mapred:mapreduce.map.memory.mb": "3072",
"cluster_config.0.software_config.0.properties.mapred:mapreduce.reduce.cpu.vcores": "1",
"cluster_config.0.software_config.0.properties.mapred:mapreduce.reduce.java.opts": "-Xmx2457m",
"cluster_config.0.software_config.0.properties.mapred:mapreduce.reduce.memory.mb": "3072",
"cluster_config.0.software_config.0.properties.mapred:mapreduce.task.io.sort.mb": "256",
"cluster_config.0.software_config.0.properties.mapred:yarn.app.mapreduce.am.command-opts": "-Xmx2457m",
"cluster_config.0.software_config.0.properties.mapred:yarn.app.mapreduce.am.resource.cpu-vcores": "1",
"cluster_config.0.software_config.0.properties.mapred:yarn.app.mapreduce.am.resource.mb": "3072",
"cluster_config.0.software_config.0.properties.presto-jvm:MaxHeapSize": "12288m",
"cluster_config.0.software_config.0.properties.presto:query.max-memory-per-node": "7372MB",
"cluster_config.0.software_config.0.properties.presto:query.max-total-memory-per-node": "7372MB",
"cluster_config.0.software_config.0.properties.spark-env:SPARK_DAEMON_MEMORY": "3840m",
"cluster_config.0.software_config.0.properties.spark:spark.driver.maxResultSize": "1920m",
"cluster_config.0.software_config.0.properties.spark:spark.driver.memory": "3840m",
"cluster_config.0.software_config.0.properties.spark:spark.executor.cores": "2",
"cluster_config.0.software_config.0.properties.spark:spark.executor.instances": "2",
"cluster_config.0.software_config.0.properties.spark:spark.executor.memory": "5586m",
"cluster_config.0.software_config.0.properties.spark:spark.executorEnv.OPENBLAS_NUM_THREADS": "1",
"cluster_config.0.software_config.0.properties.spark:spark.scheduler.mode": "FAIR",
"cluster_config.0.software_config.0.properties.spark:spark.sql.cbo.enabled": "true",
"cluster_config.0.software_config.0.properties.spark:spark.yarn.am.memory": "640m",
"cluster_config.0.software_config.0.properties.yarn-env:YARN_TIMELINESERVER_HEAPSIZE": "3840",
"cluster_config.0.software_config.0.properties.yarn:yarn.nodemanager.resource.memory-mb": "12288",
"cluster_config.0.software_config.0.properties.yarn:yarn.resourcemanager.nodemanager-graceful-decommission-timeout-secs": "86400",
"cluster_config.0.software_config.0.properties.yarn:yarn.scheduler.maximum-allocation-mb": "12288",
"cluster_config.0.software_config.0.properties.yarn:yarn.scheduler.minimum-allocation-mb": "1024",
"cluster_config.0.staging_bucket": "",
"id": "dproc-cluster-test-ktbyrniu4e",
"labels.%": "4",
"labels.goog-dataproc-cluster-name": "dproc-cluster-test-ktbyrniu4e",
"labels.goog-dataproc-cluster-uuid": "d576c4e0-8fda-4ad1-abf5-ec951ab25855",
"labels.goog-dataproc-location": "us-central1",
"labels.key1": "value1",
"tag_set.#": "0",
}
diff := &terraform.InstanceDiff{
Attributes: map[string]*terraform.ResourceAttrDiff{
"labels.%": &terraform.ResourceAttrDiff{Old: "4", New: "1", NewComputed: false, NewRemoved: false, NewExtra: interface{}(nil), RequiresNew: false, Sensitive: false, Type: 0x0},
"labels.goog-dataproc-cluster-name": &terraform.ResourceAttrDiff{Old: "dproc-cluster-test-ktbyrniu4e", New: "", NewComputed: false, NewRemoved: true, NewExtra: interface{}(nil), RequiresNew: false, Sensitive: false, Type: 0x0},
"labels.goog-dataproc-cluster-uuid": &terraform.ResourceAttrDiff{Old: "d576c4e0-8fda-4ad1-abf5-ec951ab25855", New: "", NewComputed: false, NewRemoved: true, NewExtra: interface{}(nil), RequiresNew: false, Sensitive: false, Type: 0x0},
"labels.goog-dataproc-location": &terraform.ResourceAttrDiff{Old: "us-central1", New: "", NewComputed: false, NewRemoved: true, NewExtra: interface{}(nil), RequiresNew: false, Sensitive: false, Type: 0x0},
},
}
newAttrs, err := diff.Apply(priorAttrs, (&schema.Resource{Schema: dataprocClusterSchema}).CoreConfigSchema())
if err != nil {
t.Fatal(err)
}
// the diff'ed labale elements should be removed
delete(priorAttrs, "labels.goog-dataproc-cluster-name")
delete(priorAttrs, "labels.goog-dataproc-cluster-uuid")
delete(priorAttrs, "labels.goog-dataproc-location")
priorAttrs["labels.%"] = "1"
// the missing required "name" should be added
priorAttrs["name"] = ""
if !reflect.DeepEqual(priorAttrs, newAttrs) {
t.Fatal(cmp.Diff(priorAttrs, newAttrs))
}
}

View File

@ -4,8 +4,6 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"regexp"
"sort"
"strconv" "strconv"
"strings" "strings"
@ -427,13 +425,6 @@ func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadReso
return resp, nil return resp, nil
} }
if newInstanceState != nil {
// here we use the prior state to check for unknown/zero containers values
// when normalizing the flatmap.
stateAttrs := hcl2shim.FlatmapValueFromHCL2(stateVal)
newInstanceState.Attributes = normalizeFlatmapContainers(stateAttrs, newInstanceState.Attributes)
}
if newInstanceState == nil || newInstanceState.ID == "" { if newInstanceState == nil || newInstanceState.ID == "" {
// The old provider API used an empty id to signal that the remote // The old provider API used an empty id to signal that the remote
// object appears to have been deleted, but our new protocol expects // object appears to have been deleted, but our new protocol expects
@ -457,6 +448,7 @@ func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadReso
return resp, nil return resp, nil
} }
newStateVal = normalizeNullValues(newStateVal, stateVal, false)
newStateVal = copyTimeoutValues(newStateVal, stateVal) newStateVal = copyTimeoutValues(newStateVal, stateVal)
newStateMP, err := msgpack.Marshal(newStateVal, block.ImpliedType()) newStateMP, err := msgpack.Marshal(newStateVal, block.ImpliedType())
@ -572,8 +564,6 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl
// now we need to apply the diff to the prior state, so get the planned state // now we need to apply the diff to the prior state, so get the planned state
plannedAttrs, err := diff.Apply(priorState.Attributes, block) plannedAttrs, err := diff.Apply(priorState.Attributes, block)
plannedAttrs = normalizeFlatmapContainers(priorState.Attributes, plannedAttrs)
plannedStateVal, err := hcl2shim.HCL2ValueFromFlatmap(plannedAttrs, block.ImpliedType()) plannedStateVal, err := hcl2shim.HCL2ValueFromFlatmap(plannedAttrs, block.ImpliedType())
if err != nil { if err != nil {
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
@ -817,8 +807,6 @@ func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.A
// here we use the planned state to check for unknown/zero containers values // here we use the planned state to check for unknown/zero containers values
// when normalizing the flatmap. // when normalizing the flatmap.
plannedState := hcl2shim.FlatmapValueFromHCL2(plannedStateVal)
newInstanceState.Attributes = normalizeFlatmapContainers(plannedState, newInstanceState.Attributes)
// We keep the null val if we destroyed the resource, otherwise build the // We keep the null val if we destroyed the resource, otherwise build the
// entire object, even if the new state was nil. // entire object, even if the new state was nil.
@ -1000,131 +988,6 @@ func pathToAttributePath(path cty.Path) *proto.AttributePath {
return &proto.AttributePath{Steps: steps} return &proto.AttributePath{Steps: steps}
} }
// normalizeFlatmapContainers removes empty containers, and fixes counts in a
// set of flatmapped attributes. The prior value is used to determine if there
// could be zero-length flatmap containers which we need to preserve. This
// allows a provider to set an empty computed container in the state without
// creating perpetual diff. This can differ slightly between plan and apply, so
// the apply flag is passed when called from ApplyResourceChange.
func normalizeFlatmapContainers(prior map[string]string, attrs map[string]string) map[string]string {
isCount := regexp.MustCompile(`.\.[%#]$`).MatchString
// While we can't determine if the value was actually computed here, we will
// trust that our shims stored and retrieved a zero-value container
// correctly.
zeros := map[string]bool{}
// Empty blocks have a count of 1 with no other attributes. Just record all
// "1"s here to override 0-length blocks when setting the count below.
ones := map[string]bool{}
for k, v := range prior {
if isCount(k) && (v == "0" || v == hcl2shim.UnknownVariableValue) {
zeros[k] = true
}
}
for k, v := range attrs {
// store any "1" values, since if the length was 1 and there are no
// items, it was probably an empty set block. Hopefully checking for a 1
// value with no items is sufficient, without cross-referencing the
// schema.
if isCount(k) && v == "1" {
ones[k] = true
// make sure we don't have the same key under both categories.
delete(zeros, k)
}
}
// The "ones" were stored to look for sets with an empty value, so we need
// to verify that we only store ones with no attrs.
expectedEmptySets := map[string]bool{}
for one := range ones {
prefix := one[:len(one)-1]
found := 0
for k := range attrs {
// since this can be recursive, we check that the attrs isn't also a #.
if strings.HasPrefix(k, prefix) && !isCount(k) {
found++
}
}
if found == 0 {
expectedEmptySets[one] = true
}
}
// find container keys
var keys []string
for k, v := range attrs {
if !isCount(k) {
continue
}
if v == hcl2shim.UnknownVariableValue {
// if the index value indicates the container is unknown, skip
// updating the counts.
continue
}
keys = append(keys, k)
}
// sort the keys in reverse, so that we check the longest subkeys first
sort.Slice(keys, func(i, j int) bool {
a, b := keys[i], keys[j]
if strings.HasPrefix(a, b) {
return true
}
if strings.HasPrefix(b, a) {
return false
}
return a > b
})
for _, k := range keys {
prefix := k[:len(k)-1]
indexes := map[string]int{}
for cand := range attrs {
if cand == k {
continue
}
if strings.HasPrefix(cand, prefix) {
idx := cand[len(prefix):]
dot := strings.Index(idx, ".")
if dot > 0 {
idx = idx[:dot]
}
indexes[idx]++
}
}
switch {
case len(indexes) == 0 && zeros[k]:
// if there were no keys, but the value was known to be zero, the provider
// must have set the computed value to an empty container, and we
// need to leave it in the flatmap.
attrs[k] = "0"
case len(indexes) == 0 && ones[k]:
// We need to retain any empty blocks that had a 1 count with no attributes.
attrs[k] = "1"
case len(indexes) > 0:
attrs[k] = strconv.Itoa(len(indexes))
}
}
for k := range expectedEmptySets {
if _, ok := attrs[k]; !ok {
attrs[k] = "1"
}
}
return attrs
}
// helper/schema throws away timeout values from the config and stores them in // helper/schema throws away timeout values from the config and stores them in
// the Private/Meta fields. we need to copy those values into the planned state // the Private/Meta fields. we need to copy those values into the planned state
// so that core doesn't see a perpetual diff with the timeout block. // so that core doesn't see a perpetual diff with the timeout block.

View File

@ -3,15 +3,12 @@ package plugin
import ( import (
"context" "context"
"fmt" "fmt"
"reflect"
"strconv"
"strings" "strings"
"testing" "testing"
"time" "time"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts" "github.com/google/go-cmp/cmp/cmpopts"
"github.com/hashicorp/terraform/config/hcl2shim"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
proto "github.com/hashicorp/terraform/internal/tfplugin5" proto "github.com/hashicorp/terraform/internal/tfplugin5"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
@ -665,57 +662,6 @@ func TestGetSchemaTimeouts(t *testing.T) {
} }
} }
func TestNormalizeFlatmapContainers(t *testing.T) {
for i, tc := range []struct {
prior map[string]string
attrs map[string]string
expect map[string]string
}{
{
attrs: map[string]string{"id": "1", "multi.2.set.#": "2", "multi.2.set.1.foo": "bar", "multi.1.set.#": "0", "single.#": "0"},
expect: map[string]string{"id": "1", "multi.2.set.#": "1", "multi.2.set.1.foo": "bar", "multi.1.set.#": "0", "single.#": "0"},
},
{
attrs: map[string]string{"id": "78629a0f5f3f164f", "multi.#": "1"},
expect: map[string]string{"id": "78629a0f5f3f164f", "multi.#": "1"},
},
{
attrs: map[string]string{"multi.529860700.set.#": "0", "multi.#": "1", "id": "78629a0f5f3f164f"},
expect: map[string]string{"multi.529860700.set.#": "0", "multi.#": "1", "id": "78629a0f5f3f164f"},
},
{
attrs: map[string]string{"set.2.required": "bar", "set.2.list.#": "1", "set.2.list.0": "x", "set.1.list.#": "0", "set.#": "2"},
expect: map[string]string{"set.2.required": "bar", "set.2.list.#": "1", "set.2.list.0": "x", "set.1.list.#": "0", "set.#": "2"},
},
{
attrs: map[string]string{"map.%": hcl2shim.UnknownVariableValue, "list.#": hcl2shim.UnknownVariableValue, "id": "1"},
expect: map[string]string{"id": "1", "map.%": hcl2shim.UnknownVariableValue, "list.#": hcl2shim.UnknownVariableValue},
},
{
prior: map[string]string{"map.%": "0"},
attrs: map[string]string{"map.%": "0", "list.#": "0", "id": "1"},
expect: map[string]string{"id": "1", "list.#": "0", "map.%": "0"},
},
{
prior: map[string]string{"map.%": hcl2shim.UnknownVariableValue, "list.#": "0"},
attrs: map[string]string{"map.%": "0", "list.#": "0", "id": "1"},
expect: map[string]string{"id": "1", "map.%": "0", "list.#": "0"},
},
{
prior: map[string]string{"list.#": "1", "list.0": "old value"},
attrs: map[string]string{"list.#": "0"},
expect: map[string]string{"list.#": "0"},
},
} {
t.Run(strconv.Itoa(i), func(t *testing.T) {
got := normalizeFlatmapContainers(tc.prior, tc.attrs)
if !reflect.DeepEqual(tc.expect, got) {
t.Fatalf("expected:\n%#v\ngot:\n%#v\n", tc.expect, got)
}
})
}
}
func TestNormalizeNullValues(t *testing.T) { func TestNormalizeNullValues(t *testing.T) {
for i, tc := range []struct { for i, tc := range []struct {
Src, Dst, Expect cty.Value Src, Dst, Expect cty.Value
@ -743,6 +689,19 @@ func TestNormalizeNullValues(t *testing.T) {
}), }),
}), }),
}, },
{
// A zero set value is kept
Src: cty.ObjectVal(map[string]cty.Value{
"set": cty.SetValEmpty(cty.String),
}),
Dst: cty.ObjectVal(map[string]cty.Value{
"set": cty.SetValEmpty(cty.String),
}),
Expect: cty.ObjectVal(map[string]cty.Value{
"set": cty.SetValEmpty(cty.String),
}),
Plan: true,
},
{ {
// The known set value is copied over the null set value // The known set value is copied over the null set value
Src: cty.ObjectVal(map[string]cty.Value{ Src: cty.ObjectVal(map[string]cty.Value{

View File

@ -460,7 +460,6 @@ func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block
func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, schema *configschema.Block) (map[string]string, error) { func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, schema *configschema.Block) (map[string]string, error) {
result := map[string]string{} result := map[string]string{}
name := "" name := ""
if len(path) > 0 { if len(path) > 0 {
name = path[len(path)-1] name = path[len(path)-1]
@ -597,7 +596,8 @@ func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, sc
} }
} }
if countDiff, ok := d.Attributes[strings.Join(append(path, n, "#"), ".")]; ok { countAddr := strings.Join(append(path, n, "#"), ".")
if countDiff, ok := d.Attributes[countAddr]; ok {
if countDiff.NewComputed { if countDiff.NewComputed {
result[localBlockPrefix+"#"] = hcl2shim.UnknownVariableValue result[localBlockPrefix+"#"] = hcl2shim.UnknownVariableValue
} else { } else {
@ -633,6 +633,8 @@ func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, sc
} }
} }
} }
} else if origCount, ok := attrs[countAddr]; ok && keepBlock {
result[localBlockPrefix+"#"] = origCount
} else { } else {
result[localBlockPrefix+"#"] = countFlatmapContainerValues(localBlockPrefix+"#", result) result[localBlockPrefix+"#"] = countFlatmapContainerValues(localBlockPrefix+"#", result)
} }