2018-08-08 23:02:24 +02:00
|
|
|
package plugin
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/json"
|
2018-11-30 20:51:52 +01:00
|
|
|
"fmt"
|
2019-05-10 23:56:23 +02:00
|
|
|
"log"
|
2018-08-08 23:02:24 +02:00
|
|
|
"strconv"
|
2019-06-13 22:53:36 +02:00
|
|
|
"strings"
|
2018-08-08 23:02:24 +02:00
|
|
|
|
2018-08-28 01:47:27 +02:00
|
|
|
"github.com/zclconf/go-cty/cty"
|
2018-10-18 17:40:47 +02:00
|
|
|
ctyconvert "github.com/zclconf/go-cty/cty/convert"
|
2018-08-28 01:47:27 +02:00
|
|
|
"github.com/zclconf/go-cty/cty/msgpack"
|
2018-08-08 23:02:24 +02:00
|
|
|
context "golang.org/x/net/context"
|
|
|
|
|
|
|
|
"github.com/hashicorp/terraform/config/hcl2shim"
|
|
|
|
"github.com/hashicorp/terraform/configs/configschema"
|
|
|
|
"github.com/hashicorp/terraform/helper/schema"
|
2018-11-19 18:39:16 +01:00
|
|
|
proto "github.com/hashicorp/terraform/internal/tfplugin5"
|
2019-06-13 00:27:50 +02:00
|
|
|
"github.com/hashicorp/terraform/plans/objchange"
|
2018-08-15 18:03:39 +02:00
|
|
|
"github.com/hashicorp/terraform/plugin/convert"
|
2018-08-08 23:02:24 +02:00
|
|
|
"github.com/hashicorp/terraform/terraform"
|
|
|
|
)
|
|
|
|
|
2018-12-03 18:47:31 +01:00
|
|
|
const newExtraKey = "_new_extra_shim"
|
|
|
|
|
2018-08-14 22:39:30 +02:00
|
|
|
// NewGRPCProviderServerShim wraps a terraform.ResourceProvider in a
|
|
|
|
// proto.ProviderServer implementation. If the provided provider is not a
|
|
|
|
// *schema.Provider, this will return nil,
|
|
|
|
func NewGRPCProviderServerShim(p terraform.ResourceProvider) *GRPCProviderServer {
|
|
|
|
sp, ok := p.(*schema.Provider)
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return &GRPCProviderServer{
|
|
|
|
provider: sp,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-08 23:02:24 +02:00
|
|
|
// GRPCProviderServer handles the server, or plugin side of the rpc connection.
|
|
|
|
type GRPCProviderServer struct {
|
|
|
|
provider *schema.Provider
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GRPCProviderServer) GetSchema(_ context.Context, req *proto.GetProviderSchema_Request) (*proto.GetProviderSchema_Response, error) {
|
2019-02-05 01:12:20 +01:00
|
|
|
// Here we are certain that the provider is being called through grpc, so
|
|
|
|
// make sure the feature flag for helper/schema is set
|
|
|
|
schema.SetProto5()
|
|
|
|
|
2018-08-08 23:02:24 +02:00
|
|
|
resp := &proto.GetProviderSchema_Response{
|
|
|
|
ResourceSchemas: make(map[string]*proto.Schema),
|
|
|
|
DataSourceSchemas: make(map[string]*proto.Schema),
|
|
|
|
}
|
|
|
|
|
|
|
|
resp.Provider = &proto.Schema{
|
2019-05-15 00:12:57 +02:00
|
|
|
Block: convert.ConfigSchemaToProto(s.getProviderSchemaBlock()),
|
2018-08-08 23:02:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for typ, res := range s.provider.ResourcesMap {
|
|
|
|
resp.ResourceSchemas[typ] = &proto.Schema{
|
|
|
|
Version: int64(res.SchemaVersion),
|
2018-08-15 18:03:39 +02:00
|
|
|
Block: convert.ConfigSchemaToProto(res.CoreConfigSchema()),
|
2018-08-08 23:02:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for typ, dat := range s.provider.DataSourcesMap {
|
|
|
|
resp.DataSourceSchemas[typ] = &proto.Schema{
|
|
|
|
Version: int64(dat.SchemaVersion),
|
2018-08-15 18:03:39 +02:00
|
|
|
Block: convert.ConfigSchemaToProto(dat.CoreConfigSchema()),
|
2018-08-08 23:02:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
func (s *GRPCProviderServer) getProviderSchemaBlock() *configschema.Block {
|
2018-08-08 23:02:24 +02:00
|
|
|
return schema.InternalMap(s.provider.Schema).CoreConfigSchema()
|
|
|
|
}
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
func (s *GRPCProviderServer) getResourceSchemaBlock(name string) *configschema.Block {
|
2018-08-08 23:02:24 +02:00
|
|
|
res := s.provider.ResourcesMap[name]
|
|
|
|
return res.CoreConfigSchema()
|
|
|
|
}
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
func (s *GRPCProviderServer) getDatasourceSchemaBlock(name string) *configschema.Block {
|
2018-08-08 23:02:24 +02:00
|
|
|
dat := s.provider.DataSourcesMap[name]
|
|
|
|
return dat.CoreConfigSchema()
|
|
|
|
}
|
|
|
|
|
2018-10-18 03:30:04 +02:00
|
|
|
func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *proto.PrepareProviderConfig_Request) (*proto.PrepareProviderConfig_Response, error) {
|
|
|
|
resp := &proto.PrepareProviderConfig_Response{}
|
2018-08-08 23:02:24 +02:00
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
schemaBlock := s.getProviderSchemaBlock()
|
2018-08-08 23:02:24 +02:00
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2018-10-18 17:40:47 +02:00
|
|
|
// lookup any required, top-level attributes that are Null, and see if we
|
|
|
|
// have a Default value available.
|
2018-11-30 20:51:52 +01:00
|
|
|
configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) {
|
2018-10-18 17:40:47 +02:00
|
|
|
// we're only looking for top-level attributes
|
|
|
|
if len(path) != 1 {
|
|
|
|
return val, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// nothing to do if we already have a value
|
|
|
|
if !val.IsNull() {
|
|
|
|
return val, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// get the Schema definition for this attribute
|
|
|
|
getAttr, ok := path[0].(cty.GetAttrStep)
|
|
|
|
// these should all exist, but just ignore anything strange
|
|
|
|
if !ok {
|
|
|
|
return val, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
attrSchema := s.provider.Schema[getAttr.Name]
|
|
|
|
// continue to ignore anything that doesn't match
|
|
|
|
if attrSchema == nil {
|
|
|
|
return val, nil
|
|
|
|
}
|
|
|
|
|
2018-10-18 18:45:55 +02:00
|
|
|
// this is deprecated, so don't set it
|
|
|
|
if attrSchema.Deprecated != "" || attrSchema.Removed != "" {
|
|
|
|
return val, nil
|
|
|
|
}
|
|
|
|
|
2018-10-18 17:40:47 +02:00
|
|
|
// find a default value if it exists
|
|
|
|
def, err := attrSchema.DefaultValue()
|
|
|
|
if err != nil {
|
2018-11-30 20:51:52 +01:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error getting default for %q: %s", getAttr.Name, err))
|
2018-10-18 17:40:47 +02:00
|
|
|
return val, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// no default
|
|
|
|
if def == nil {
|
2018-11-30 20:51:52 +01:00
|
|
|
return val, nil
|
2018-10-18 17:40:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// create a cty.Value and make sure it's the correct type
|
|
|
|
tmpVal := hcl2shim.HCL2ValueFromConfigValue(def)
|
2018-11-30 20:51:52 +01:00
|
|
|
|
|
|
|
// helper/schema used to allow setting "" to a bool
|
|
|
|
if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) {
|
|
|
|
// return a warning about the conversion
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, "provider set empty string as default value for bool "+getAttr.Name)
|
|
|
|
tmpVal = cty.False
|
|
|
|
}
|
|
|
|
|
2018-10-18 17:40:47 +02:00
|
|
|
val, err = ctyconvert.Convert(tmpVal, val.Type())
|
2018-11-30 20:51:52 +01:00
|
|
|
if err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error setting default for %q: %s", getAttr.Name, err))
|
|
|
|
}
|
2018-10-18 18:45:55 +02:00
|
|
|
|
2018-10-18 17:40:47 +02:00
|
|
|
return val, err
|
|
|
|
})
|
2018-11-30 20:51:52 +01:00
|
|
|
if err != nil {
|
|
|
|
// any error here was already added to the diagnostics
|
|
|
|
return resp, nil
|
|
|
|
}
|
2018-10-18 17:40:47 +02:00
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
configVal, err = schemaBlock.CoerceValue(configVal)
|
2018-10-18 17:40:47 +02:00
|
|
|
if err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-04-02 22:53:51 +02:00
|
|
|
// Ensure there are no nulls that will cause helper/schema to panic.
|
|
|
|
if err := validateConfigNulls(configVal, nil); err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
config := terraform.NewResourceConfigShimmed(configVal, schemaBlock)
|
2018-08-08 23:02:24 +02:00
|
|
|
|
|
|
|
warns, errs := s.provider.Validate(config)
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs))
|
2018-08-08 23:02:24 +02:00
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
preparedConfigMP, err := msgpack.Marshal(configVal, schemaBlock.ImpliedType())
|
2018-10-18 17:40:47 +02:00
|
|
|
if err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
resp.PreparedConfig = &proto.DynamicValue{Msgpack: preparedConfigMP}
|
2018-10-18 03:30:04 +02:00
|
|
|
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GRPCProviderServer) ValidateResourceTypeConfig(_ context.Context, req *proto.ValidateResourceTypeConfig_Request) (*proto.ValidateResourceTypeConfig_Response, error) {
|
|
|
|
resp := &proto.ValidateResourceTypeConfig_Response{}
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
schemaBlock := s.getResourceSchemaBlock(req.TypeName)
|
2018-08-08 23:02:24 +02:00
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
config := terraform.NewResourceConfigShimmed(configVal, schemaBlock)
|
2018-08-08 23:02:24 +02:00
|
|
|
|
|
|
|
warns, errs := s.provider.ValidateResource(req.TypeName, config)
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs))
|
2018-08-08 23:02:24 +02:00
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GRPCProviderServer) ValidateDataSourceConfig(_ context.Context, req *proto.ValidateDataSourceConfig_Request) (*proto.ValidateDataSourceConfig_Response, error) {
|
|
|
|
resp := &proto.ValidateDataSourceConfig_Response{}
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
schemaBlock := s.getDatasourceSchemaBlock(req.TypeName)
|
2018-08-08 23:02:24 +02:00
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-04-02 22:53:51 +02:00
|
|
|
// Ensure there are no nulls that will cause helper/schema to panic.
|
|
|
|
if err := validateConfigNulls(configVal, nil); err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
config := terraform.NewResourceConfigShimmed(configVal, schemaBlock)
|
2018-08-08 23:02:24 +02:00
|
|
|
|
2018-09-21 13:33:28 +02:00
|
|
|
warns, errs := s.provider.ValidateDataSource(req.TypeName, config)
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs))
|
2018-08-08 23:02:24 +02:00
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GRPCProviderServer) UpgradeResourceState(_ context.Context, req *proto.UpgradeResourceState_Request) (*proto.UpgradeResourceState_Response, error) {
|
|
|
|
resp := &proto.UpgradeResourceState_Response{}
|
|
|
|
|
|
|
|
res := s.provider.ResourcesMap[req.TypeName]
|
2019-05-15 00:12:57 +02:00
|
|
|
schemaBlock := s.getResourceSchemaBlock(req.TypeName)
|
2018-08-08 23:02:24 +02:00
|
|
|
|
|
|
|
version := int(req.Version)
|
|
|
|
|
2019-05-14 21:43:47 +02:00
|
|
|
jsonMap := map[string]interface{}{}
|
2018-08-08 23:02:24 +02:00
|
|
|
var err error
|
|
|
|
|
2019-05-14 21:43:47 +02:00
|
|
|
switch {
|
|
|
|
// We first need to upgrade a flatmap state if it exists.
|
|
|
|
// There should never be both a JSON and Flatmap state in the request.
|
|
|
|
case len(req.RawState.Flatmap) > 0:
|
|
|
|
jsonMap, version, err = s.upgradeFlatmapState(version, req.RawState.Flatmap, res)
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
2019-05-14 21:43:47 +02:00
|
|
|
// if there's a JSON state, we need to decode it.
|
|
|
|
case len(req.RawState.Json) > 0:
|
|
|
|
err = json.Unmarshal(req.RawState.Json, &jsonMap)
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
2019-05-14 21:43:47 +02:00
|
|
|
default:
|
|
|
|
log.Println("[DEBUG] no state provided to upgrade")
|
|
|
|
return resp, nil
|
2018-08-08 23:02:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// complete the upgrade of the JSON states
|
|
|
|
jsonMap, err = s.upgradeJSONState(version, jsonMap, res)
|
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-05-10 23:56:23 +02:00
|
|
|
// The provider isn't required to clean out removed fields
|
2019-05-15 00:12:57 +02:00
|
|
|
s.removeAttributes(jsonMap, schemaBlock.ImpliedType())
|
2019-05-10 23:56:23 +02:00
|
|
|
|
2018-08-08 23:02:24 +02:00
|
|
|
// now we need to turn the state into the default json representation, so
|
|
|
|
// that it can be re-decoded using the actual schema.
|
2019-05-15 00:12:57 +02:00
|
|
|
val, err := schema.JSONMapToStateValue(jsonMap, schemaBlock)
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-06-13 00:27:50 +02:00
|
|
|
// Now we need to make sure blocks are represented correctly, which means
|
|
|
|
// that missing blocks are empty collections, rather than null.
|
|
|
|
// First we need to CoerceValue to ensure that all object types match.
|
|
|
|
val, err = schemaBlock.CoerceValue(val)
|
|
|
|
if err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
// Normalize the value and fill in any missing blocks.
|
|
|
|
val = objchange.NormalizeObjectFromLegacySDK(val, schemaBlock)
|
|
|
|
|
2018-08-08 23:02:24 +02:00
|
|
|
// encode the final state to the expected msgpack format
|
2019-05-15 00:12:57 +02:00
|
|
|
newStateMP, err := msgpack.Marshal(val, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
resp.UpgradedState = &proto.DynamicValue{Msgpack: newStateMP}
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// upgradeFlatmapState takes a legacy flatmap state, upgrades it using Migrate
|
|
|
|
// state if necessary, and converts it to the new JSON state format decoded as a
|
|
|
|
// map[string]interface{}.
|
|
|
|
// upgradeFlatmapState returns the json map along with the corresponding schema
|
|
|
|
// version.
|
|
|
|
func (s *GRPCProviderServer) upgradeFlatmapState(version int, m map[string]string, res *schema.Resource) (map[string]interface{}, int, error) {
|
|
|
|
// this will be the version we've upgraded so, defaulting to the given
|
|
|
|
// version in case no migration was called.
|
|
|
|
upgradedVersion := version
|
|
|
|
|
|
|
|
// first determine if we need to call the legacy MigrateState func
|
|
|
|
requiresMigrate := version < res.SchemaVersion
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
schemaType := res.CoreConfigSchema().ImpliedType()
|
2018-08-08 23:02:24 +02:00
|
|
|
|
|
|
|
// if there are any StateUpgraders, then we need to only compare
|
|
|
|
// against the first version there
|
|
|
|
if len(res.StateUpgraders) > 0 {
|
|
|
|
requiresMigrate = version < res.StateUpgraders[0].Version
|
|
|
|
}
|
|
|
|
|
2019-06-06 05:23:45 +02:00
|
|
|
if requiresMigrate && res.MigrateState == nil {
|
|
|
|
// Providers were previously allowed to bump the version
|
|
|
|
// without declaring MigrateState.
|
|
|
|
// If there are further upgraders, then we've only updated that far.
|
|
|
|
if len(res.StateUpgraders) > 0 {
|
|
|
|
schemaType = res.StateUpgraders[0].Type
|
|
|
|
upgradedVersion = res.StateUpgraders[0].Version
|
2018-08-08 23:02:24 +02:00
|
|
|
}
|
2019-06-06 05:23:45 +02:00
|
|
|
} else if requiresMigrate {
|
2018-08-08 23:02:24 +02:00
|
|
|
is := &terraform.InstanceState{
|
|
|
|
ID: m["id"],
|
|
|
|
Attributes: m,
|
|
|
|
Meta: map[string]interface{}{
|
|
|
|
"schema_version": strconv.Itoa(version),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
is, err := res.MigrateState(version, is, s.provider.Meta())
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// re-assign the map in case there was a copy made, making sure to keep
|
|
|
|
// the ID
|
|
|
|
m := is.Attributes
|
|
|
|
m["id"] = is.ID
|
|
|
|
|
|
|
|
// if there are further upgraders, then we've only updated that far
|
|
|
|
if len(res.StateUpgraders) > 0 {
|
|
|
|
schemaType = res.StateUpgraders[0].Type
|
|
|
|
upgradedVersion = res.StateUpgraders[0].Version
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// the schema version may be newer than the MigrateState functions
|
|
|
|
// handled and older than the current, but still stored in the flatmap
|
|
|
|
// form. If that's the case, we need to find the correct schema type to
|
|
|
|
// convert the state.
|
|
|
|
for _, upgrader := range res.StateUpgraders {
|
|
|
|
if upgrader.Version == version {
|
|
|
|
schemaType = upgrader.Type
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// now we know the state is up to the latest version that handled the
|
|
|
|
// flatmap format state. Now we can upgrade the format and continue from
|
|
|
|
// there.
|
|
|
|
newConfigVal, err := hcl2shim.HCL2ValueFromFlatmap(m, schemaType)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
jsonMap, err := schema.StateValueToJSONMap(newConfigVal, schemaType)
|
|
|
|
return jsonMap, upgradedVersion, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GRPCProviderServer) upgradeJSONState(version int, m map[string]interface{}, res *schema.Resource) (map[string]interface{}, error) {
|
|
|
|
var err error
|
|
|
|
|
|
|
|
for _, upgrader := range res.StateUpgraders {
|
|
|
|
if version != upgrader.Version {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
m, err = upgrader.Upgrade(m, s.provider.Meta())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
version++
|
|
|
|
}
|
|
|
|
|
|
|
|
return m, nil
|
|
|
|
}
|
|
|
|
|
2019-05-10 23:56:23 +02:00
|
|
|
// Remove any attributes no longer present in the schema, so that the json can
|
|
|
|
// be correctly decoded.
|
|
|
|
func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) {
|
|
|
|
// we're only concerned with finding maps that corespond to object
|
|
|
|
// attributes
|
|
|
|
switch v := v.(type) {
|
|
|
|
case []interface{}:
|
|
|
|
// If these aren't blocks the next call will be a noop
|
|
|
|
if ty.IsListType() || ty.IsSetType() {
|
|
|
|
eTy := ty.ElementType()
|
|
|
|
for _, eV := range v {
|
|
|
|
s.removeAttributes(eV, eTy)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
case map[string]interface{}:
|
|
|
|
// map blocks aren't yet supported, but handle this just in case
|
|
|
|
if ty.IsMapType() {
|
|
|
|
eTy := ty.ElementType()
|
|
|
|
for _, eV := range v {
|
|
|
|
s.removeAttributes(eV, eTy)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-05-14 21:43:47 +02:00
|
|
|
if ty == cty.DynamicPseudoType {
|
|
|
|
log.Printf("[DEBUG] ignoring dynamic block: %#v\n", v)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-05-10 23:56:23 +02:00
|
|
|
if !ty.IsObjectType() {
|
|
|
|
// This shouldn't happen, and will fail to decode further on, so
|
|
|
|
// there's no need to handle it here.
|
|
|
|
log.Printf("[WARN] unexpected type %#v for map in json state", ty)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
attrTypes := ty.AttributeTypes()
|
|
|
|
for attr, attrV := range v {
|
|
|
|
attrTy, ok := attrTypes[attr]
|
|
|
|
if !ok {
|
|
|
|
log.Printf("[DEBUG] attribute %q no longer present in schema", attr)
|
|
|
|
delete(v, attr)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
s.removeAttributes(attrV, attrTy)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-08 23:02:24 +02:00
|
|
|
func (s *GRPCProviderServer) Stop(_ context.Context, _ *proto.Stop_Request) (*proto.Stop_Response, error) {
|
|
|
|
resp := &proto.Stop_Response{}
|
|
|
|
|
|
|
|
err := s.provider.Stop()
|
|
|
|
if err != nil {
|
|
|
|
resp.Error = err.Error()
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GRPCProviderServer) Configure(_ context.Context, req *proto.Configure_Request) (*proto.Configure_Response, error) {
|
|
|
|
resp := &proto.Configure_Response{}
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
schemaBlock := s.getProviderSchemaBlock()
|
2018-08-08 23:02:24 +02:00
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-01-28 20:27:58 +01:00
|
|
|
s.provider.TerraformVersion = req.TerraformVersion
|
|
|
|
|
2019-04-02 22:53:51 +02:00
|
|
|
// Ensure there are no nulls that will cause helper/schema to panic.
|
|
|
|
if err := validateConfigNulls(configVal, nil); err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
config := terraform.NewResourceConfigShimmed(configVal, schemaBlock)
|
2018-08-08 23:02:24 +02:00
|
|
|
err = s.provider.Configure(config)
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadResource_Request) (*proto.ReadResource_Response, error) {
|
2019-06-20 04:44:38 +02:00
|
|
|
resp := &proto.ReadResource_Response{
|
|
|
|
// helper/schema did previously handle private data during refresh, but
|
|
|
|
// core is now going to expect this to be maintained in order to
|
|
|
|
// persist it in the state.
|
|
|
|
Private: req.Private,
|
|
|
|
}
|
2018-08-08 23:02:24 +02:00
|
|
|
|
|
|
|
res := s.provider.ResourcesMap[req.TypeName]
|
2019-05-15 00:12:57 +02:00
|
|
|
schemaBlock := s.getResourceSchemaBlock(req.TypeName)
|
2018-08-08 23:02:24 +02:00
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
stateVal, err := msgpack.Unmarshal(req.CurrentState.Msgpack, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2018-11-14 05:00:02 +01:00
|
|
|
instanceState, err := res.ShimInstanceStateFromValue(stateVal)
|
|
|
|
if err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
2018-08-08 23:02:24 +02:00
|
|
|
|
2019-06-20 04:44:38 +02:00
|
|
|
private := make(map[string]interface{})
|
|
|
|
if len(req.Private) > 0 {
|
|
|
|
if err := json.Unmarshal(req.Private, &private); err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
instanceState.Meta = private
|
|
|
|
|
2018-08-08 23:02:24 +02:00
|
|
|
newInstanceState, err := res.RefreshWithoutUpgrade(instanceState, s.provider.Meta())
|
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2018-08-28 02:43:37 +02:00
|
|
|
if newInstanceState == nil || newInstanceState.ID == "" {
|
|
|
|
// The old provider API used an empty id to signal that the remote
|
|
|
|
// object appears to have been deleted, but our new protocol expects
|
|
|
|
// to see a null value (in the cty sense) in that case.
|
2019-05-15 00:12:57 +02:00
|
|
|
newStateMP, err := msgpack.Marshal(cty.NullVal(schemaBlock.ImpliedType()), schemaBlock.ImpliedType())
|
2018-08-28 02:43:37 +02:00
|
|
|
if err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
}
|
|
|
|
resp.NewState = &proto.DynamicValue{
|
2018-10-31 18:43:50 +01:00
|
|
|
Msgpack: newStateMP,
|
2018-08-28 02:43:37 +02:00
|
|
|
}
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2018-08-08 23:02:24 +02:00
|
|
|
// helper/schema should always copy the ID over, but do it again just to be safe
|
|
|
|
newInstanceState.Attributes["id"] = newInstanceState.ID
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(newInstanceState.Attributes, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-04-23 18:59:30 +02:00
|
|
|
newStateVal = normalizeNullValues(newStateVal, stateVal, false)
|
2018-10-30 17:59:45 +01:00
|
|
|
newStateVal = copyTimeoutValues(newStateVal, stateVal)
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
resp.NewState = &proto.DynamicValue{
|
2018-10-30 17:59:45 +01:00
|
|
|
Msgpack: newStateMP,
|
2018-08-08 23:02:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.PlanResourceChange_Request) (*proto.PlanResourceChange_Response, error) {
|
|
|
|
resp := &proto.PlanResourceChange_Response{}
|
|
|
|
|
2019-02-09 05:32:44 +01:00
|
|
|
// This is a signal to Terraform Core that we're doing the best we can to
|
|
|
|
// shim the legacy type system of the SDK onto the Terraform type system
|
|
|
|
// but we need it to cut us some slack. This setting should not be taken
|
|
|
|
// forward to any new SDK implementations, since setting it prevents us
|
|
|
|
// from catching certain classes of provider bug that can lead to
|
|
|
|
// confusing downstream errors.
|
|
|
|
resp.LegacyTypeSystem = true
|
|
|
|
|
2018-08-08 23:02:24 +02:00
|
|
|
res := s.provider.ResourcesMap[req.TypeName]
|
2019-05-15 00:12:57 +02:00
|
|
|
schemaBlock := s.getResourceSchemaBlock(req.TypeName)
|
2018-08-08 23:02:24 +02:00
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-02-28 23:49:42 +01:00
|
|
|
create := priorStateVal.IsNull()
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
proposedNewStateVal, err := msgpack.Unmarshal(req.ProposedNewState.Msgpack, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-02-14 01:01:55 +01:00
|
|
|
// We don't usually plan destroys, but this can return early in any case.
|
|
|
|
if proposedNewStateVal.IsNull() {
|
|
|
|
resp.PlannedState = req.ProposedNewState
|
2019-06-04 16:42:46 +02:00
|
|
|
resp.PlannedPrivate = req.PriorPrivate
|
2019-02-14 01:01:55 +01:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2018-08-08 23:02:24 +02:00
|
|
|
info := &terraform.InstanceInfo{
|
|
|
|
Type: req.TypeName,
|
|
|
|
}
|
|
|
|
|
2018-11-14 05:00:02 +01:00
|
|
|
priorState, err := res.ShimInstanceStateFromValue(priorStateVal)
|
|
|
|
if err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
2018-10-06 18:38:00 +02:00
|
|
|
priorPrivate := make(map[string]interface{})
|
|
|
|
if len(req.PriorPrivate) > 0 {
|
|
|
|
if err := json.Unmarshal(req.PriorPrivate, &priorPrivate); err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
}
|
2018-10-30 17:59:45 +01:00
|
|
|
|
2018-10-06 18:38:00 +02:00
|
|
|
priorState.Meta = priorPrivate
|
2018-08-08 23:02:24 +02:00
|
|
|
|
2019-04-02 22:53:51 +02:00
|
|
|
// Ensure there are no nulls that will cause helper/schema to panic.
|
|
|
|
if err := validateConfigNulls(proposedNewStateVal, nil); err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2018-10-29 21:11:19 +01:00
|
|
|
// turn the proposed state into a legacy configuration
|
2019-05-15 00:12:57 +02:00
|
|
|
cfg := terraform.NewResourceConfigShimmed(proposedNewStateVal, schemaBlock)
|
2019-02-08 02:29:24 +01:00
|
|
|
|
2018-11-27 00:07:23 +01:00
|
|
|
diff, err := s.provider.SimpleDiff(info, priorState, cfg)
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-02-08 02:29:24 +01:00
|
|
|
// if this is a new instance, we need to make sure ID is going to be computed
|
2019-02-28 23:49:42 +01:00
|
|
|
if create {
|
2019-02-08 02:29:24 +01:00
|
|
|
if diff == nil {
|
|
|
|
diff = terraform.NewInstanceDiff()
|
|
|
|
}
|
|
|
|
|
|
|
|
diff.Attributes["id"] = &terraform.ResourceAttrDiff{
|
|
|
|
NewComputed: true,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-18 00:58:56 +01:00
|
|
|
if diff == nil || len(diff.Attributes) == 0 {
|
2018-10-30 20:58:00 +01:00
|
|
|
// schema.Provider.Diff returns nil if it ends up making a diff with no
|
|
|
|
// changes, but our new interface wants us to return an actual change
|
2019-02-08 02:29:24 +01:00
|
|
|
// description that _shows_ there are no changes. This is always the
|
|
|
|
// prior state, because we force a diff above if this is a new instance.
|
|
|
|
resp.PlannedState = req.PriorState
|
2019-06-20 04:44:38 +02:00
|
|
|
resp.PlannedPrivate = req.PriorPrivate
|
2018-08-28 02:34:14 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2018-11-14 03:04:56 +01:00
|
|
|
if priorState == nil {
|
|
|
|
priorState = &terraform.InstanceState{}
|
|
|
|
}
|
|
|
|
|
2018-08-08 23:02:24 +02:00
|
|
|
// now we need to apply the diff to the prior state, so get the planned state
|
2019-05-15 00:12:57 +02:00
|
|
|
plannedAttrs, err := diff.Apply(priorState.Attributes, schemaBlock)
|
2018-11-16 15:55:34 +01:00
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
plannedStateVal, err := hcl2shim.HCL2ValueFromFlatmap(plannedAttrs, schemaBlock.ImpliedType())
|
2018-11-14 03:04:56 +01:00
|
|
|
if err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
plannedStateVal, err = schemaBlock.CoerceValue(plannedStateVal)
|
2018-11-14 03:04:56 +01:00
|
|
|
if err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
2019-01-31 21:40:56 +01:00
|
|
|
|
2019-04-23 18:59:30 +02:00
|
|
|
plannedStateVal = normalizeNullValues(plannedStateVal, proposedNewStateVal, false)
|
2019-01-31 21:40:56 +01:00
|
|
|
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2018-10-30 17:59:45 +01:00
|
|
|
plannedStateVal = copyTimeoutValues(plannedStateVal, proposedNewStateVal)
|
|
|
|
|
2019-02-08 02:29:24 +01:00
|
|
|
// The old SDK code has some imprecisions that cause it to sometimes
|
2019-01-23 00:11:14 +01:00
|
|
|
// generate differences that the SDK itself does not consider significant
|
|
|
|
// but Terraform Core would. To avoid producing weird do-nothing diffs
|
|
|
|
// in that case, we'll check if the provider as produced something we
|
|
|
|
// think is "equivalent" to the prior state and just return the prior state
|
|
|
|
// itself if so, thus ensuring that Terraform Core will treat this as
|
|
|
|
// a no-op. See the docs for ValuesSDKEquivalent for some caveats on its
|
|
|
|
// accuracy.
|
|
|
|
forceNoChanges := false
|
|
|
|
if hcl2shim.ValuesSDKEquivalent(priorStateVal, plannedStateVal) {
|
|
|
|
plannedStateVal = priorStateVal
|
|
|
|
forceNoChanges = true
|
|
|
|
}
|
|
|
|
|
2019-02-08 02:29:24 +01:00
|
|
|
// if this was creating the resource, we need to set any remaining computed
|
|
|
|
// fields
|
2019-02-28 23:49:42 +01:00
|
|
|
if create {
|
2019-05-15 00:12:57 +02:00
|
|
|
plannedStateVal = SetUnknowns(plannedStateVal, schemaBlock)
|
2019-02-08 02:29:24 +01:00
|
|
|
}
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
plannedMP, err := msgpack.Marshal(plannedStateVal, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
2018-08-27 22:43:35 +02:00
|
|
|
resp.PlannedState = &proto.DynamicValue{
|
|
|
|
Msgpack: plannedMP,
|
|
|
|
}
|
2018-08-08 23:02:24 +02:00
|
|
|
|
2019-06-20 04:44:38 +02:00
|
|
|
// encode any timeouts into the diff Meta
|
|
|
|
t := &schema.ResourceTimeout{}
|
|
|
|
if err := t.ConfigDecode(res, cfg); err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := t.DiffEncode(diff); err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2018-12-03 18:47:31 +01:00
|
|
|
// Now we need to store any NewExtra values, which are where any actual
|
|
|
|
// StateFunc modified config fields are hidden.
|
|
|
|
privateMap := diff.Meta
|
|
|
|
if privateMap == nil {
|
|
|
|
privateMap = map[string]interface{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
newExtra := map[string]interface{}{}
|
|
|
|
|
|
|
|
for k, v := range diff.Attributes {
|
|
|
|
if v.NewExtra != nil {
|
|
|
|
newExtra[k] = v.NewExtra
|
2018-10-30 17:59:45 +01:00
|
|
|
}
|
2018-08-08 23:02:24 +02:00
|
|
|
}
|
2018-12-03 18:47:31 +01:00
|
|
|
privateMap[newExtraKey] = newExtra
|
|
|
|
|
|
|
|
// the Meta field gets encoded into PlannedPrivate
|
|
|
|
plannedPrivate, err := json.Marshal(privateMap)
|
|
|
|
if err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
resp.PlannedPrivate = plannedPrivate
|
2018-08-08 23:02:24 +02:00
|
|
|
|
|
|
|
// collect the attributes that require instance replacement, and convert
|
|
|
|
// them to cty.Paths.
|
|
|
|
var requiresNew []string
|
2019-01-23 00:11:14 +01:00
|
|
|
if !forceNoChanges {
|
|
|
|
for attr, d := range diff.Attributes {
|
|
|
|
if d.RequiresNew {
|
|
|
|
requiresNew = append(requiresNew, attr)
|
|
|
|
}
|
2018-08-08 23:02:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-16 01:33:33 +02:00
|
|
|
// If anything requires a new resource already, or the "id" field indicates
|
|
|
|
// that we will be creating a new resource, then we need to add that to
|
|
|
|
// RequiresReplace so that core can tell if the instance is being replaced
|
|
|
|
// even if changes are being suppressed via "ignore_changes".
|
|
|
|
id := plannedStateVal.GetAttr("id")
|
|
|
|
if len(requiresNew) > 0 || id.IsNull() || !id.IsKnown() {
|
|
|
|
requiresNew = append(requiresNew, "id")
|
|
|
|
}
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// convert these to the protocol structures
|
|
|
|
for _, p := range requiresReplace {
|
|
|
|
resp.RequiresReplace = append(resp.RequiresReplace, pathToAttributePath(p))
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.ApplyResourceChange_Request) (*proto.ApplyResourceChange_Response, error) {
|
2019-02-11 21:35:46 +01:00
|
|
|
resp := &proto.ApplyResourceChange_Response{
|
|
|
|
// Start with the existing state as a fallback
|
|
|
|
NewState: req.PriorState,
|
|
|
|
}
|
2018-08-08 23:02:24 +02:00
|
|
|
|
|
|
|
res := s.provider.ResourcesMap[req.TypeName]
|
2019-05-15 00:12:57 +02:00
|
|
|
schemaBlock := s.getResourceSchemaBlock(req.TypeName)
|
2018-08-08 23:02:24 +02:00
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
plannedStateVal, err := msgpack.Unmarshal(req.PlannedState.Msgpack, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
info := &terraform.InstanceInfo{
|
|
|
|
Type: req.TypeName,
|
|
|
|
}
|
|
|
|
|
2018-11-14 05:00:02 +01:00
|
|
|
priorState, err := res.ShimInstanceStateFromValue(priorStateVal)
|
|
|
|
if err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
2018-08-08 23:02:24 +02:00
|
|
|
|
2018-10-06 18:38:00 +02:00
|
|
|
private := make(map[string]interface{})
|
|
|
|
if len(req.PlannedPrivate) > 0 {
|
|
|
|
if err := json.Unmarshal(req.PlannedPrivate, &private); err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
2018-08-08 23:02:24 +02:00
|
|
|
}
|
|
|
|
|
2018-10-16 03:13:05 +02:00
|
|
|
var diff *terraform.InstanceDiff
|
|
|
|
destroy := false
|
|
|
|
|
|
|
|
// a null state means we are destroying the instance
|
|
|
|
if plannedStateVal.IsNull() {
|
|
|
|
destroy = true
|
|
|
|
diff = &terraform.InstanceDiff{
|
|
|
|
Attributes: make(map[string]*terraform.ResourceAttrDiff),
|
|
|
|
Meta: make(map[string]interface{}),
|
|
|
|
Destroy: true,
|
|
|
|
}
|
|
|
|
} else {
|
2019-04-08 21:43:32 +02:00
|
|
|
diff, err = schema.DiffFromValues(priorStateVal, plannedStateVal, stripResourceModifiers(res))
|
2018-10-16 03:13:05 +02:00
|
|
|
if err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
2018-08-08 23:02:24 +02:00
|
|
|
}
|
|
|
|
|
2018-10-06 19:06:41 +02:00
|
|
|
if diff == nil {
|
|
|
|
diff = &terraform.InstanceDiff{
|
|
|
|
Attributes: make(map[string]*terraform.ResourceAttrDiff),
|
|
|
|
Meta: make(map[string]interface{}),
|
|
|
|
}
|
|
|
|
}
|
2018-10-30 17:59:45 +01:00
|
|
|
|
2018-12-03 18:47:31 +01:00
|
|
|
// add NewExtra Fields that may have been stored in the private data
|
|
|
|
if newExtra := private[newExtraKey]; newExtra != nil {
|
|
|
|
for k, v := range newExtra.(map[string]interface{}) {
|
|
|
|
d := diff.Attributes[k]
|
|
|
|
|
|
|
|
if d == nil {
|
|
|
|
d = &terraform.ResourceAttrDiff{}
|
|
|
|
}
|
|
|
|
|
|
|
|
d.NewExtra = v
|
|
|
|
diff.Attributes[k] = d
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-30 17:59:45 +01:00
|
|
|
if private != nil {
|
|
|
|
diff.Meta = private
|
|
|
|
}
|
2018-10-06 19:06:41 +02:00
|
|
|
|
2019-06-13 22:53:36 +02:00
|
|
|
var newRemoved []string
|
2019-01-26 00:29:28 +01:00
|
|
|
for k, d := range diff.Attributes {
|
2019-04-10 15:39:45 +02:00
|
|
|
// We need to turn off any RequiresNew. There could be attributes
|
|
|
|
// without changes in here inserted by helper/schema, but if they have
|
|
|
|
// RequiresNew then the state will be dropped from the ResourceData.
|
|
|
|
d.RequiresNew = false
|
|
|
|
|
|
|
|
// Check that any "removed" attributes that don't actually exist in the
|
2019-06-13 22:53:36 +02:00
|
|
|
// prior state, or helper/schema will confuse itself, and record them
|
|
|
|
// to make sure they are actually removed from the state.
|
2019-01-26 00:29:28 +01:00
|
|
|
if d.NewRemoved {
|
2019-06-13 22:53:36 +02:00
|
|
|
newRemoved = append(newRemoved, k)
|
2019-01-26 00:29:28 +01:00
|
|
|
if _, ok := priorState.Attributes[k]; !ok {
|
|
|
|
delete(diff.Attributes, k)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-08 23:02:24 +02:00
|
|
|
newInstanceState, err := s.provider.Apply(info, priorState, diff)
|
2019-02-11 21:35:46 +01:00
|
|
|
// we record the error here, but continue processing any returned state.
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
}
|
2019-05-15 00:12:57 +02:00
|
|
|
newStateVal := cty.NullVal(schemaBlock.ImpliedType())
|
2018-10-16 03:13:05 +02:00
|
|
|
|
2019-02-11 21:35:46 +01:00
|
|
|
// Always return a null value for destroy.
|
|
|
|
// While this is usually indicated by a nil state, check for missing ID or
|
|
|
|
// attributes in the case of a provider failure.
|
|
|
|
if destroy || newInstanceState == nil || newInstanceState.Attributes == nil || newInstanceState.ID == "" {
|
2019-05-15 00:12:57 +02:00
|
|
|
newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType())
|
2018-10-16 03:13:05 +02:00
|
|
|
if err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
2019-01-15 18:15:39 +01:00
|
|
|
resp.NewState = &proto.DynamicValue{
|
|
|
|
Msgpack: newStateMP,
|
|
|
|
}
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-06-13 22:53:36 +02:00
|
|
|
// Now remove any primitive zero values that were left from NewRemoved
|
|
|
|
// attributes. Any attempt to reconcile more complex structures to the best
|
|
|
|
// of our abilities happens in normalizeNullValues.
|
|
|
|
for _, r := range newRemoved {
|
|
|
|
if strings.HasSuffix(r, ".#") || strings.HasSuffix(r, ".%") {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
switch newInstanceState.Attributes[r] {
|
|
|
|
case "", "0", "false":
|
|
|
|
delete(newInstanceState.Attributes, r)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-15 18:15:39 +01:00
|
|
|
// We keep the null val if we destroyed the resource, otherwise build the
|
|
|
|
// entire object, even if the new state was nil.
|
2019-05-15 00:12:57 +02:00
|
|
|
newStateVal, err = schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType())
|
2019-01-15 18:15:39 +01:00
|
|
|
if err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
2018-08-08 23:02:24 +02:00
|
|
|
}
|
|
|
|
|
2019-04-23 18:59:30 +02:00
|
|
|
newStateVal = normalizeNullValues(newStateVal, plannedStateVal, true)
|
2019-03-05 20:14:18 +01:00
|
|
|
|
2018-10-30 17:59:45 +01:00
|
|
|
newStateVal = copyTimeoutValues(newStateVal, plannedStateVal)
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
2018-08-28 01:47:27 +02:00
|
|
|
resp.NewState = &proto.DynamicValue{
|
|
|
|
Msgpack: newStateMP,
|
|
|
|
}
|
2018-08-08 23:02:24 +02:00
|
|
|
|
2019-01-15 18:15:39 +01:00
|
|
|
meta, err := json.Marshal(newInstanceState.Meta)
|
|
|
|
if err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
2018-08-08 23:02:24 +02:00
|
|
|
}
|
2019-01-15 18:15:39 +01:00
|
|
|
resp.Private = meta
|
2019-02-05 23:28:58 +01:00
|
|
|
|
|
|
|
// This is a signal to Terraform Core that we're doing the best we can to
|
|
|
|
// shim the legacy type system of the SDK onto the Terraform type system
|
|
|
|
// but we need it to cut us some slack. This setting should not be taken
|
|
|
|
// forward to any new SDK implementations, since setting it prevents us
|
|
|
|
// from catching certain classes of provider bug that can lead to
|
|
|
|
// confusing downstream errors.
|
|
|
|
resp.LegacyTypeSystem = true
|
|
|
|
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GRPCProviderServer) ImportResourceState(_ context.Context, req *proto.ImportResourceState_Request) (*proto.ImportResourceState_Response, error) {
|
|
|
|
resp := &proto.ImportResourceState_Response{}
|
|
|
|
|
|
|
|
info := &terraform.InstanceInfo{
|
|
|
|
Type: req.TypeName,
|
|
|
|
}
|
|
|
|
|
|
|
|
newInstanceStates, err := s.provider.ImportState(info, req.Id)
|
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, is := range newInstanceStates {
|
|
|
|
// copy the ID again just to be sure it wasn't missed
|
|
|
|
is.Attributes["id"] = is.ID
|
|
|
|
|
2019-02-01 23:58:52 +01:00
|
|
|
resourceType := is.Ephemeral.Type
|
|
|
|
if resourceType == "" {
|
|
|
|
resourceType = req.TypeName
|
|
|
|
}
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
schemaBlock := s.getResourceSchemaBlock(resourceType)
|
|
|
|
newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-06-18 02:27:12 +02:00
|
|
|
// Normalize the value and fill in any missing blocks.
|
|
|
|
newStateVal = objchange.NormalizeObjectFromLegacySDK(newStateVal, schemaBlock)
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
meta, err := json.Marshal(is.Meta)
|
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
importedResource := &proto.ImportResourceState_ImportedResource{
|
2019-01-30 01:49:02 +01:00
|
|
|
TypeName: resourceType,
|
2018-08-08 23:02:24 +02:00
|
|
|
State: &proto.DynamicValue{
|
|
|
|
Msgpack: newStateMP,
|
|
|
|
},
|
|
|
|
Private: meta,
|
|
|
|
}
|
|
|
|
|
|
|
|
resp.ImportedResources = append(resp.ImportedResources, importedResource)
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GRPCProviderServer) ReadDataSource(_ context.Context, req *proto.ReadDataSource_Request) (*proto.ReadDataSource_Response, error) {
|
|
|
|
resp := &proto.ReadDataSource_Response{}
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
schemaBlock := s.getDatasourceSchemaBlock(req.TypeName)
|
2018-08-08 23:02:24 +02:00
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
info := &terraform.InstanceInfo{
|
|
|
|
Type: req.TypeName,
|
|
|
|
}
|
|
|
|
|
2019-04-02 22:53:51 +02:00
|
|
|
// Ensure there are no nulls that will cause helper/schema to panic.
|
|
|
|
if err := validateConfigNulls(configVal, nil); err != nil {
|
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
config := terraform.NewResourceConfigShimmed(configVal, schemaBlock)
|
2018-08-08 23:02:24 +02:00
|
|
|
|
|
|
|
// we need to still build the diff separately with the Read method to match
|
|
|
|
// the old behavior
|
|
|
|
diff, err := s.provider.ReadDataDiff(info, config)
|
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// now we can get the new complete data source
|
|
|
|
newInstanceState, err := s.provider.ReadDataApply(info, diff)
|
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
newStateVal, err := schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2018-10-30 17:59:45 +01:00
|
|
|
newStateVal = copyTimeoutValues(newStateVal, configVal)
|
|
|
|
|
2019-05-15 00:12:57 +02:00
|
|
|
newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType())
|
2018-08-08 23:02:24 +02:00
|
|
|
if err != nil {
|
2018-08-15 18:03:39 +02:00
|
|
|
resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
2018-10-02 22:22:03 +02:00
|
|
|
resp.State = &proto.DynamicValue{
|
|
|
|
Msgpack: newStateMP,
|
|
|
|
}
|
2018-08-08 23:02:24 +02:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func pathToAttributePath(path cty.Path) *proto.AttributePath {
|
|
|
|
var steps []*proto.AttributePath_Step
|
|
|
|
|
|
|
|
for _, step := range path {
|
|
|
|
switch s := step.(type) {
|
|
|
|
case cty.GetAttrStep:
|
|
|
|
steps = append(steps, &proto.AttributePath_Step{
|
|
|
|
Selector: &proto.AttributePath_Step_AttributeName{
|
|
|
|
AttributeName: s.Name,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
case cty.IndexStep:
|
|
|
|
ty := s.Key.Type()
|
|
|
|
switch ty {
|
|
|
|
case cty.Number:
|
|
|
|
i, _ := s.Key.AsBigFloat().Int64()
|
|
|
|
steps = append(steps, &proto.AttributePath_Step{
|
|
|
|
Selector: &proto.AttributePath_Step_ElementKeyInt{
|
|
|
|
ElementKeyInt: i,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
case cty.String:
|
|
|
|
steps = append(steps, &proto.AttributePath_Step{
|
|
|
|
Selector: &proto.AttributePath_Step_ElementKeyString{
|
|
|
|
ElementKeyString: s.Key.AsString(),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &proto.AttributePath{Steps: steps}
|
|
|
|
}
|
2018-10-30 17:59:45 +01:00
|
|
|
|
|
|
|
// helper/schema throws away timeout values from the config and stores them in
|
|
|
|
// the Private/Meta fields. we need to copy those values into the planned state
|
|
|
|
// so that core doesn't see a perpetual diff with the timeout block.
|
|
|
|
func copyTimeoutValues(to cty.Value, from cty.Value) cty.Value {
|
2019-03-02 17:30:37 +01:00
|
|
|
// if `to` is null we are planning to remove it altogether.
|
|
|
|
if to.IsNull() {
|
2018-10-30 17:59:45 +01:00
|
|
|
return to
|
|
|
|
}
|
2019-03-02 17:30:37 +01:00
|
|
|
toAttrs := to.AsValueMap()
|
|
|
|
// We need to remove the key since the hcl2shims will add a non-null block
|
|
|
|
// because we can't determine if a single block was null from the flatmapped
|
|
|
|
// values. This needs to conform to the correct schema for marshaling, so
|
|
|
|
// change the value to null rather than deleting it from the object map.
|
|
|
|
timeouts, ok := toAttrs[schema.TimeoutsConfigKey]
|
|
|
|
if ok {
|
|
|
|
toAttrs[schema.TimeoutsConfigKey] = cty.NullVal(timeouts.Type())
|
|
|
|
}
|
|
|
|
|
|
|
|
// if from is null then there are no timeouts to copy
|
|
|
|
if from.IsNull() {
|
|
|
|
return cty.ObjectVal(toAttrs)
|
|
|
|
}
|
2018-10-30 17:59:45 +01:00
|
|
|
|
|
|
|
fromAttrs := from.AsValueMap()
|
2019-03-02 17:30:37 +01:00
|
|
|
timeouts, ok = fromAttrs[schema.TimeoutsConfigKey]
|
2018-10-30 17:59:45 +01:00
|
|
|
|
2019-03-02 17:30:37 +01:00
|
|
|
// timeouts shouldn't be unknown, but don't copy possibly invalid values either
|
2018-10-30 17:59:45 +01:00
|
|
|
if !ok || timeouts.IsNull() || !timeouts.IsWhollyKnown() {
|
2019-03-02 17:30:37 +01:00
|
|
|
// no timeouts block to copy
|
|
|
|
return cty.ObjectVal(toAttrs)
|
2018-10-30 17:59:45 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
toAttrs[schema.TimeoutsConfigKey] = timeouts
|
|
|
|
|
|
|
|
return cty.ObjectVal(toAttrs)
|
|
|
|
}
|
2018-12-03 18:47:31 +01:00
|
|
|
|
|
|
|
// stripResourceModifiers takes a *schema.Resource and returns a deep copy with all
|
|
|
|
// StateFuncs and CustomizeDiffs removed. This will be used during apply to
|
|
|
|
// create a diff from a planned state where the diff modifications have already
|
|
|
|
// been applied.
|
|
|
|
func stripResourceModifiers(r *schema.Resource) *schema.Resource {
|
|
|
|
if r == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// start with a shallow copy
|
|
|
|
newResource := new(schema.Resource)
|
|
|
|
*newResource = *r
|
|
|
|
|
|
|
|
newResource.CustomizeDiff = nil
|
|
|
|
newResource.Schema = map[string]*schema.Schema{}
|
|
|
|
|
|
|
|
for k, s := range r.Schema {
|
|
|
|
newResource.Schema[k] = stripSchema(s)
|
|
|
|
}
|
|
|
|
|
|
|
|
return newResource
|
|
|
|
}
|
|
|
|
|
|
|
|
func stripSchema(s *schema.Schema) *schema.Schema {
|
|
|
|
if s == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// start with a shallow copy
|
|
|
|
newSchema := new(schema.Schema)
|
|
|
|
*newSchema = *s
|
|
|
|
|
|
|
|
newSchema.StateFunc = nil
|
|
|
|
|
|
|
|
switch e := newSchema.Elem.(type) {
|
|
|
|
case *schema.Schema:
|
|
|
|
newSchema.Elem = stripSchema(e)
|
|
|
|
case *schema.Resource:
|
|
|
|
newSchema.Elem = stripResourceModifiers(e)
|
|
|
|
}
|
|
|
|
|
|
|
|
return newSchema
|
|
|
|
}
|
2019-01-08 19:50:23 +01:00
|
|
|
|
2019-01-31 21:40:56 +01:00
|
|
|
// Zero values and empty containers may be interchanged by the apply process.
|
|
|
|
// When there is a discrepency between src and dst value being null or empty,
|
|
|
|
// prefer the src value. This takes a little more liberty with set types, since
|
|
|
|
// we can't correlate modified set values. In the case of sets, if the src set
|
|
|
|
// was wholly known we assume the value was correctly applied and copy that
|
|
|
|
// entirely to the new value.
|
|
|
|
// While apply prefers the src value, during plan we prefer dst whenever there
|
|
|
|
// is an unknown or a set is involved, since the plan can alter the value
|
|
|
|
// however it sees fit. This however means that a CustomizeDiffFunction may not
|
|
|
|
// be able to change a null to an empty value or vice versa, but that should be
|
|
|
|
// very uncommon nor was it reliable before 0.12 either.
|
2019-04-23 18:59:30 +02:00
|
|
|
func normalizeNullValues(dst, src cty.Value, apply bool) cty.Value {
|
2019-01-08 19:50:23 +01:00
|
|
|
ty := dst.Type()
|
2019-01-31 21:40:56 +01:00
|
|
|
if !src.IsNull() && !src.IsKnown() {
|
2019-04-23 18:59:30 +02:00
|
|
|
// Return src during plan to retain unknown interpolated placeholders,
|
|
|
|
// which could be lost if we're only updating a resource. If this is a
|
2019-05-14 00:39:55 +02:00
|
|
|
// read scenario, then there shouldn't be any unknowns at all.
|
2019-04-23 18:59:30 +02:00
|
|
|
if dst.IsNull() && !apply {
|
2019-03-29 14:36:27 +01:00
|
|
|
return src
|
|
|
|
}
|
2019-01-31 21:40:56 +01:00
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
2019-05-14 00:39:55 +02:00
|
|
|
// Handle null/empty changes for collections during apply.
|
|
|
|
// A change between null and empty values prefers src to make sure the state
|
|
|
|
// is consistent between plan and apply.
|
|
|
|
if ty.IsCollectionType() && apply {
|
|
|
|
dstEmpty := !dst.IsNull() && dst.IsKnown() && dst.LengthInt() == 0
|
|
|
|
srcEmpty := !src.IsNull() && src.IsKnown() && src.LengthInt() == 0
|
2019-01-31 21:40:56 +01:00
|
|
|
|
2019-05-14 00:39:55 +02:00
|
|
|
if (src.IsNull() && dstEmpty) || (srcEmpty && dst.IsNull()) {
|
|
|
|
return src
|
2019-01-31 21:40:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-19 23:14:14 +02:00
|
|
|
// check the invariants that we need below, to ensure we are working with
|
|
|
|
// non-null and known values.
|
2019-01-08 19:50:23 +01:00
|
|
|
if src.IsNull() || !src.IsKnown() || !dst.IsKnown() {
|
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case ty.IsMapType(), ty.IsObjectType():
|
|
|
|
var dstMap map[string]cty.Value
|
2019-02-14 00:15:15 +01:00
|
|
|
if !dst.IsNull() {
|
2019-01-08 19:50:23 +01:00
|
|
|
dstMap = dst.AsValueMap()
|
|
|
|
}
|
2019-02-14 00:15:15 +01:00
|
|
|
if dstMap == nil {
|
|
|
|
dstMap = map[string]cty.Value{}
|
|
|
|
}
|
2019-01-08 19:50:23 +01:00
|
|
|
|
2019-03-29 16:24:14 +01:00
|
|
|
srcMap := src.AsValueMap()
|
|
|
|
for key, v := range srcMap {
|
2019-04-23 18:47:19 +02:00
|
|
|
dstVal, ok := dstMap[key]
|
2019-04-23 18:59:30 +02:00
|
|
|
if !ok && apply && ty.IsMapType() {
|
2019-04-23 18:47:19 +02:00
|
|
|
// don't transfer old map values to dst during apply
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-01-08 19:50:23 +01:00
|
|
|
if dstVal == cty.NilVal {
|
2019-04-23 18:59:30 +02:00
|
|
|
if !apply && ty.IsMapType() {
|
2019-01-31 21:40:56 +01:00
|
|
|
// let plan shape this map however it wants
|
|
|
|
continue
|
|
|
|
}
|
2019-02-14 00:15:15 +01:00
|
|
|
dstVal = cty.NullVal(v.Type())
|
2019-01-08 19:50:23 +01:00
|
|
|
}
|
2019-05-14 00:39:55 +02:00
|
|
|
|
2019-04-23 18:59:30 +02:00
|
|
|
dstMap[key] = normalizeNullValues(dstVal, v, apply)
|
2019-01-08 19:50:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// you can't call MapVal/ObjectVal with empty maps, but nothing was
|
|
|
|
// copied in anyway. If the dst is nil, and the src is known, assume the
|
|
|
|
// src is correct.
|
|
|
|
if len(dstMap) == 0 {
|
2019-04-23 18:59:30 +02:00
|
|
|
if dst.IsNull() && src.IsWhollyKnown() && apply {
|
2019-01-08 19:50:23 +01:00
|
|
|
return src
|
|
|
|
}
|
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
|
|
|
if ty.IsMapType() {
|
2019-03-29 16:24:14 +01:00
|
|
|
// helper/schema will populate an optional+computed map with
|
|
|
|
// unknowns which we have to fixup here.
|
|
|
|
// It would be preferable to simply prevent any known value from
|
|
|
|
// becoming unknown, but concessions have to be made to retain the
|
|
|
|
// broken legacy behavior when possible.
|
|
|
|
for k, srcVal := range srcMap {
|
|
|
|
if !srcVal.IsNull() && srcVal.IsKnown() {
|
|
|
|
dstVal, ok := dstMap[k]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if !dstVal.IsNull() && !dstVal.IsKnown() {
|
|
|
|
dstMap[k] = srcVal
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-08 19:50:23 +01:00
|
|
|
return cty.MapVal(dstMap)
|
|
|
|
}
|
|
|
|
|
|
|
|
return cty.ObjectVal(dstMap)
|
|
|
|
|
|
|
|
case ty.IsSetType():
|
|
|
|
// If the original was wholly known, then we expect that is what the
|
|
|
|
// provider applied. The apply process loses too much information to
|
|
|
|
// reliably re-create the set.
|
2019-04-23 18:59:30 +02:00
|
|
|
if src.IsWhollyKnown() && apply {
|
2019-01-08 19:50:23 +01:00
|
|
|
return src
|
|
|
|
}
|
|
|
|
|
|
|
|
case ty.IsListType(), ty.IsTupleType():
|
2019-03-29 19:54:54 +01:00
|
|
|
// If the dst is null, and the src is known, then we lost an empty value
|
2019-01-31 21:40:56 +01:00
|
|
|
// so take the original.
|
|
|
|
if dst.IsNull() {
|
2019-04-23 18:59:30 +02:00
|
|
|
if src.IsWhollyKnown() && src.LengthInt() == 0 && apply {
|
2019-01-31 21:40:56 +01:00
|
|
|
return src
|
|
|
|
}
|
2019-03-29 19:54:54 +01:00
|
|
|
|
|
|
|
// if dst is null and src only contains unknown values, then we lost
|
2019-04-23 18:59:30 +02:00
|
|
|
// those during a read or plan.
|
|
|
|
if !apply && !src.IsNull() {
|
2019-03-29 19:54:54 +01:00
|
|
|
allUnknown := true
|
|
|
|
for _, v := range src.AsValueSlice() {
|
|
|
|
if v.IsKnown() {
|
|
|
|
allUnknown = false
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if allUnknown {
|
|
|
|
return src
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-31 21:40:56 +01:00
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
|
|
|
// if the lengths are identical, then iterate over each element in succession.
|
|
|
|
srcLen := src.LengthInt()
|
|
|
|
dstLen := dst.LengthInt()
|
|
|
|
if srcLen == dstLen && srcLen > 0 {
|
|
|
|
srcs := src.AsValueSlice()
|
|
|
|
dsts := dst.AsValueSlice()
|
|
|
|
|
|
|
|
for i := 0; i < srcLen; i++ {
|
2019-04-23 18:59:30 +02:00
|
|
|
dsts[i] = normalizeNullValues(dsts[i], srcs[i], apply)
|
2019-01-31 21:40:56 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if ty.IsTupleType() {
|
|
|
|
return cty.TupleVal(dsts)
|
|
|
|
}
|
|
|
|
return cty.ListVal(dsts)
|
2019-01-08 19:50:23 +01:00
|
|
|
}
|
|
|
|
|
2019-06-19 23:14:14 +02:00
|
|
|
case ty == cty.String:
|
|
|
|
// The legacy SDK should not be able to remove a value during plan or
|
|
|
|
// apply, however we are only going to overwrite this if the source was
|
|
|
|
// an empty string, since that is what is often equated with unset and
|
|
|
|
// lost in the diff process.
|
|
|
|
if dst.IsNull() && src.AsString() == "" {
|
2019-01-08 19:50:23 +01:00
|
|
|
return src
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return dst
|
|
|
|
}
|
2019-04-02 22:53:51 +02:00
|
|
|
|
|
|
|
// validateConfigNulls checks a config value for unsupported nulls before
|
|
|
|
// attempting to shim the value. While null values can mostly be ignored in the
|
|
|
|
// configuration, since they're not supported in HCL1, the case where a null
|
|
|
|
// appears in a list-like attribute (list, set, tuple) will present a nil value
|
|
|
|
// to helper/schema which can panic. Return an error to the user in this case,
|
|
|
|
// indicating the attribute with the null value.
|
|
|
|
func validateConfigNulls(v cty.Value, path cty.Path) []*proto.Diagnostic {
|
|
|
|
var diags []*proto.Diagnostic
|
|
|
|
if v.IsNull() || !v.IsKnown() {
|
|
|
|
return diags
|
|
|
|
}
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType():
|
|
|
|
it := v.ElementIterator()
|
|
|
|
for it.Next() {
|
|
|
|
kv, ev := it.Element()
|
|
|
|
if ev.IsNull() {
|
|
|
|
diags = append(diags, &proto.Diagnostic{
|
|
|
|
Severity: proto.Diagnostic_ERROR,
|
2019-04-02 23:04:09 +02:00
|
|
|
Summary: "Null value found in list",
|
|
|
|
Detail: "Null values are not allowed for this attribute value.",
|
2019-04-02 22:53:51 +02:00
|
|
|
Attribute: convert.PathToAttributePath(append(path, cty.IndexStep{Key: kv})),
|
|
|
|
})
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
d := validateConfigNulls(ev, append(path, cty.IndexStep{Key: kv}))
|
|
|
|
diags = convert.AppendProtoDiag(diags, d)
|
|
|
|
}
|
|
|
|
|
|
|
|
case v.Type().IsMapType() || v.Type().IsObjectType():
|
|
|
|
it := v.ElementIterator()
|
|
|
|
for it.Next() {
|
|
|
|
kv, ev := it.Element()
|
|
|
|
var step cty.PathStep
|
|
|
|
switch {
|
|
|
|
case v.Type().IsMapType():
|
|
|
|
step = cty.IndexStep{Key: kv}
|
|
|
|
case v.Type().IsObjectType():
|
|
|
|
step = cty.GetAttrStep{Name: kv.AsString()}
|
|
|
|
}
|
|
|
|
d := validateConfigNulls(ev, append(path, step))
|
|
|
|
diags = convert.AppendProtoDiag(diags, d)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return diags
|
|
|
|
}
|