plugin6: new plugin package to support plugin protocol v6
plugin6 includes a `convert` package to handle conversion between the plugin protocol and configschema, and the GRPCProviderPlugin interface implementation for protocol v6.
This commit is contained in:
parent
da6ac9d6cd
commit
1a8d873c22
|
@ -0,0 +1,132 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
proto "github.com/hashicorp/terraform/internal/tfplugin6"
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// WarnsAndErrorsToProto converts the warnings and errors return by the legacy
|
||||
// provider to protobuf diagnostics.
|
||||
func WarnsAndErrsToProto(warns []string, errs []error) (diags []*proto.Diagnostic) {
|
||||
for _, w := range warns {
|
||||
diags = AppendProtoDiag(diags, w)
|
||||
}
|
||||
|
||||
for _, e := range errs {
|
||||
diags = AppendProtoDiag(diags, e)
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
// AppendProtoDiag appends a new diagnostic from a warning string or an error.
|
||||
// This panics if d is not a string or error.
|
||||
func AppendProtoDiag(diags []*proto.Diagnostic, d interface{}) []*proto.Diagnostic {
|
||||
switch d := d.(type) {
|
||||
case cty.PathError:
|
||||
ap := PathToAttributePath(d.Path)
|
||||
diags = append(diags, &proto.Diagnostic{
|
||||
Severity: proto.Diagnostic_ERROR,
|
||||
Summary: d.Error(),
|
||||
Attribute: ap,
|
||||
})
|
||||
case error:
|
||||
diags = append(diags, &proto.Diagnostic{
|
||||
Severity: proto.Diagnostic_ERROR,
|
||||
Summary: d.Error(),
|
||||
})
|
||||
case string:
|
||||
diags = append(diags, &proto.Diagnostic{
|
||||
Severity: proto.Diagnostic_WARNING,
|
||||
Summary: d,
|
||||
})
|
||||
case *proto.Diagnostic:
|
||||
diags = append(diags, d)
|
||||
case []*proto.Diagnostic:
|
||||
diags = append(diags, d...)
|
||||
}
|
||||
return diags
|
||||
}
|
||||
|
||||
// ProtoToDiagnostics converts a list of proto.Diagnostics to a tf.Diagnostics.
|
||||
func ProtoToDiagnostics(ds []*proto.Diagnostic) tfdiags.Diagnostics {
|
||||
var diags tfdiags.Diagnostics
|
||||
for _, d := range ds {
|
||||
var severity tfdiags.Severity
|
||||
|
||||
switch d.Severity {
|
||||
case proto.Diagnostic_ERROR:
|
||||
severity = tfdiags.Error
|
||||
case proto.Diagnostic_WARNING:
|
||||
severity = tfdiags.Warning
|
||||
}
|
||||
|
||||
var newDiag tfdiags.Diagnostic
|
||||
|
||||
// if there's an attribute path, we need to create a AttributeValue diagnostic
|
||||
if d.Attribute != nil {
|
||||
path := AttributePathToPath(d.Attribute)
|
||||
newDiag = tfdiags.AttributeValue(severity, d.Summary, d.Detail, path)
|
||||
} else {
|
||||
newDiag = tfdiags.WholeContainingBody(severity, d.Summary, d.Detail)
|
||||
}
|
||||
|
||||
diags = diags.Append(newDiag)
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
// AttributePathToPath takes the proto encoded path and converts it to a cty.Path
|
||||
func AttributePathToPath(ap *proto.AttributePath) cty.Path {
|
||||
var p cty.Path
|
||||
for _, step := range ap.Steps {
|
||||
switch selector := step.Selector.(type) {
|
||||
case *proto.AttributePath_Step_AttributeName:
|
||||
p = p.GetAttr(selector.AttributeName)
|
||||
case *proto.AttributePath_Step_ElementKeyString:
|
||||
p = p.Index(cty.StringVal(selector.ElementKeyString))
|
||||
case *proto.AttributePath_Step_ElementKeyInt:
|
||||
p = p.Index(cty.NumberIntVal(selector.ElementKeyInt))
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// AttributePathToPath takes a cty.Path and converts it to a proto-encoded path.
|
||||
func PathToAttributePath(p cty.Path) *proto.AttributePath {
|
||||
ap := &proto.AttributePath{}
|
||||
for _, step := range p {
|
||||
switch selector := step.(type) {
|
||||
case cty.GetAttrStep:
|
||||
ap.Steps = append(ap.Steps, &proto.AttributePath_Step{
|
||||
Selector: &proto.AttributePath_Step_AttributeName{
|
||||
AttributeName: selector.Name,
|
||||
},
|
||||
})
|
||||
case cty.IndexStep:
|
||||
key := selector.Key
|
||||
switch key.Type() {
|
||||
case cty.String:
|
||||
ap.Steps = append(ap.Steps, &proto.AttributePath_Step{
|
||||
Selector: &proto.AttributePath_Step_ElementKeyString{
|
||||
ElementKeyString: key.AsString(),
|
||||
},
|
||||
})
|
||||
case cty.Number:
|
||||
v, _ := key.AsBigFloat().Int64()
|
||||
ap.Steps = append(ap.Steps, &proto.AttributePath_Step{
|
||||
Selector: &proto.AttributePath_Step_ElementKeyInt{
|
||||
ElementKeyInt: v,
|
||||
},
|
||||
})
|
||||
default:
|
||||
// We'll bail early if we encounter anything else, and just
|
||||
// return the valid prefix.
|
||||
return ap
|
||||
}
|
||||
}
|
||||
}
|
||||
return ap
|
||||
}
|
|
@ -0,0 +1,367 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
proto "github.com/hashicorp/terraform/internal/tfplugin6"
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
var ignoreUnexported = cmpopts.IgnoreUnexported(
|
||||
proto.Diagnostic{},
|
||||
proto.Schema_Block{},
|
||||
proto.Schema_NestedBlock{},
|
||||
proto.Schema_Attribute{},
|
||||
)
|
||||
|
||||
func TestProtoDiagnostics(t *testing.T) {
|
||||
diags := WarnsAndErrsToProto(
|
||||
[]string{
|
||||
"warning 1",
|
||||
"warning 2",
|
||||
},
|
||||
[]error{
|
||||
errors.New("error 1"),
|
||||
errors.New("error 2"),
|
||||
},
|
||||
)
|
||||
|
||||
expected := []*proto.Diagnostic{
|
||||
{
|
||||
Severity: proto.Diagnostic_WARNING,
|
||||
Summary: "warning 1",
|
||||
},
|
||||
{
|
||||
Severity: proto.Diagnostic_WARNING,
|
||||
Summary: "warning 2",
|
||||
},
|
||||
{
|
||||
Severity: proto.Diagnostic_ERROR,
|
||||
Summary: "error 1",
|
||||
},
|
||||
{
|
||||
Severity: proto.Diagnostic_ERROR,
|
||||
Summary: "error 2",
|
||||
},
|
||||
}
|
||||
|
||||
if !cmp.Equal(expected, diags, ignoreUnexported) {
|
||||
t.Fatal(cmp.Diff(expected, diags, ignoreUnexported))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiagnostics(t *testing.T) {
|
||||
type diagFlat struct {
|
||||
Severity tfdiags.Severity
|
||||
Attr []interface{}
|
||||
Summary string
|
||||
Detail string
|
||||
}
|
||||
|
||||
tests := map[string]struct {
|
||||
Cons func([]*proto.Diagnostic) []*proto.Diagnostic
|
||||
Want []diagFlat
|
||||
}{
|
||||
"nil": {
|
||||
func(diags []*proto.Diagnostic) []*proto.Diagnostic {
|
||||
return diags
|
||||
},
|
||||
nil,
|
||||
},
|
||||
"error": {
|
||||
func(diags []*proto.Diagnostic) []*proto.Diagnostic {
|
||||
return append(diags, &proto.Diagnostic{
|
||||
Severity: proto.Diagnostic_ERROR,
|
||||
Summary: "simple error",
|
||||
})
|
||||
},
|
||||
[]diagFlat{
|
||||
{
|
||||
Severity: tfdiags.Error,
|
||||
Summary: "simple error",
|
||||
},
|
||||
},
|
||||
},
|
||||
"detailed error": {
|
||||
func(diags []*proto.Diagnostic) []*proto.Diagnostic {
|
||||
return append(diags, &proto.Diagnostic{
|
||||
Severity: proto.Diagnostic_ERROR,
|
||||
Summary: "simple error",
|
||||
Detail: "detailed error",
|
||||
})
|
||||
},
|
||||
[]diagFlat{
|
||||
{
|
||||
Severity: tfdiags.Error,
|
||||
Summary: "simple error",
|
||||
Detail: "detailed error",
|
||||
},
|
||||
},
|
||||
},
|
||||
"warning": {
|
||||
func(diags []*proto.Diagnostic) []*proto.Diagnostic {
|
||||
return append(diags, &proto.Diagnostic{
|
||||
Severity: proto.Diagnostic_WARNING,
|
||||
Summary: "simple warning",
|
||||
})
|
||||
},
|
||||
[]diagFlat{
|
||||
{
|
||||
Severity: tfdiags.Warning,
|
||||
Summary: "simple warning",
|
||||
},
|
||||
},
|
||||
},
|
||||
"detailed warning": {
|
||||
func(diags []*proto.Diagnostic) []*proto.Diagnostic {
|
||||
return append(diags, &proto.Diagnostic{
|
||||
Severity: proto.Diagnostic_WARNING,
|
||||
Summary: "simple warning",
|
||||
Detail: "detailed warning",
|
||||
})
|
||||
},
|
||||
[]diagFlat{
|
||||
{
|
||||
Severity: tfdiags.Warning,
|
||||
Summary: "simple warning",
|
||||
Detail: "detailed warning",
|
||||
},
|
||||
},
|
||||
},
|
||||
"multi error": {
|
||||
func(diags []*proto.Diagnostic) []*proto.Diagnostic {
|
||||
diags = append(diags, &proto.Diagnostic{
|
||||
Severity: proto.Diagnostic_ERROR,
|
||||
Summary: "first error",
|
||||
}, &proto.Diagnostic{
|
||||
Severity: proto.Diagnostic_ERROR,
|
||||
Summary: "second error",
|
||||
})
|
||||
return diags
|
||||
},
|
||||
[]diagFlat{
|
||||
{
|
||||
Severity: tfdiags.Error,
|
||||
Summary: "first error",
|
||||
},
|
||||
{
|
||||
Severity: tfdiags.Error,
|
||||
Summary: "second error",
|
||||
},
|
||||
},
|
||||
},
|
||||
"warning and error": {
|
||||
func(diags []*proto.Diagnostic) []*proto.Diagnostic {
|
||||
diags = append(diags, &proto.Diagnostic{
|
||||
Severity: proto.Diagnostic_WARNING,
|
||||
Summary: "warning",
|
||||
}, &proto.Diagnostic{
|
||||
Severity: proto.Diagnostic_ERROR,
|
||||
Summary: "error",
|
||||
})
|
||||
return diags
|
||||
},
|
||||
[]diagFlat{
|
||||
{
|
||||
Severity: tfdiags.Warning,
|
||||
Summary: "warning",
|
||||
},
|
||||
{
|
||||
Severity: tfdiags.Error,
|
||||
Summary: "error",
|
||||
},
|
||||
},
|
||||
},
|
||||
"attr error": {
|
||||
func(diags []*proto.Diagnostic) []*proto.Diagnostic {
|
||||
diags = append(diags, &proto.Diagnostic{
|
||||
Severity: proto.Diagnostic_ERROR,
|
||||
Summary: "error",
|
||||
Detail: "error detail",
|
||||
Attribute: &proto.AttributePath{
|
||||
Steps: []*proto.AttributePath_Step{
|
||||
{
|
||||
Selector: &proto.AttributePath_Step_AttributeName{
|
||||
AttributeName: "attribute_name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
return diags
|
||||
},
|
||||
[]diagFlat{
|
||||
{
|
||||
Severity: tfdiags.Error,
|
||||
Summary: "error",
|
||||
Detail: "error detail",
|
||||
Attr: []interface{}{"attribute_name"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"multi attr": {
|
||||
func(diags []*proto.Diagnostic) []*proto.Diagnostic {
|
||||
diags = append(diags,
|
||||
&proto.Diagnostic{
|
||||
Severity: proto.Diagnostic_ERROR,
|
||||
Summary: "error 1",
|
||||
Detail: "error 1 detail",
|
||||
Attribute: &proto.AttributePath{
|
||||
Steps: []*proto.AttributePath_Step{
|
||||
{
|
||||
Selector: &proto.AttributePath_Step_AttributeName{
|
||||
AttributeName: "attr",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&proto.Diagnostic{
|
||||
Severity: proto.Diagnostic_ERROR,
|
||||
Summary: "error 2",
|
||||
Detail: "error 2 detail",
|
||||
Attribute: &proto.AttributePath{
|
||||
Steps: []*proto.AttributePath_Step{
|
||||
{
|
||||
Selector: &proto.AttributePath_Step_AttributeName{
|
||||
AttributeName: "attr",
|
||||
},
|
||||
},
|
||||
{
|
||||
Selector: &proto.AttributePath_Step_AttributeName{
|
||||
AttributeName: "sub",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&proto.Diagnostic{
|
||||
Severity: proto.Diagnostic_WARNING,
|
||||
Summary: "warning",
|
||||
Detail: "warning detail",
|
||||
Attribute: &proto.AttributePath{
|
||||
Steps: []*proto.AttributePath_Step{
|
||||
{
|
||||
Selector: &proto.AttributePath_Step_AttributeName{
|
||||
AttributeName: "attr",
|
||||
},
|
||||
},
|
||||
{
|
||||
Selector: &proto.AttributePath_Step_ElementKeyInt{
|
||||
ElementKeyInt: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
Selector: &proto.AttributePath_Step_AttributeName{
|
||||
AttributeName: "sub",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&proto.Diagnostic{
|
||||
Severity: proto.Diagnostic_ERROR,
|
||||
Summary: "error 3",
|
||||
Detail: "error 3 detail",
|
||||
Attribute: &proto.AttributePath{
|
||||
Steps: []*proto.AttributePath_Step{
|
||||
{
|
||||
Selector: &proto.AttributePath_Step_AttributeName{
|
||||
AttributeName: "attr",
|
||||
},
|
||||
},
|
||||
{
|
||||
Selector: &proto.AttributePath_Step_ElementKeyString{
|
||||
ElementKeyString: "idx",
|
||||
},
|
||||
},
|
||||
{
|
||||
Selector: &proto.AttributePath_Step_AttributeName{
|
||||
AttributeName: "sub",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
return diags
|
||||
},
|
||||
[]diagFlat{
|
||||
{
|
||||
Severity: tfdiags.Error,
|
||||
Summary: "error 1",
|
||||
Detail: "error 1 detail",
|
||||
Attr: []interface{}{"attr"},
|
||||
},
|
||||
{
|
||||
Severity: tfdiags.Error,
|
||||
Summary: "error 2",
|
||||
Detail: "error 2 detail",
|
||||
Attr: []interface{}{"attr", "sub"},
|
||||
},
|
||||
{
|
||||
Severity: tfdiags.Warning,
|
||||
Summary: "warning",
|
||||
Detail: "warning detail",
|
||||
Attr: []interface{}{"attr", 1, "sub"},
|
||||
},
|
||||
{
|
||||
Severity: tfdiags.Error,
|
||||
Summary: "error 3",
|
||||
Detail: "error 3 detail",
|
||||
Attr: []interface{}{"attr", "idx", "sub"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
flattenTFDiags := func(ds tfdiags.Diagnostics) []diagFlat {
|
||||
var flat []diagFlat
|
||||
for _, item := range ds {
|
||||
desc := item.Description()
|
||||
|
||||
var attr []interface{}
|
||||
|
||||
for _, a := range tfdiags.GetAttribute(item) {
|
||||
switch step := a.(type) {
|
||||
case cty.GetAttrStep:
|
||||
attr = append(attr, step.Name)
|
||||
case cty.IndexStep:
|
||||
switch step.Key.Type() {
|
||||
case cty.Number:
|
||||
i, _ := step.Key.AsBigFloat().Int64()
|
||||
attr = append(attr, int(i))
|
||||
case cty.String:
|
||||
attr = append(attr, step.Key.AsString())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
flat = append(flat, diagFlat{
|
||||
Severity: item.Severity(),
|
||||
Attr: attr,
|
||||
Summary: desc.Summary,
|
||||
Detail: desc.Detail,
|
||||
})
|
||||
}
|
||||
return flat
|
||||
}
|
||||
|
||||
for name, tc := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
// we take the
|
||||
tfDiags := ProtoToDiagnostics(tc.Cons(nil))
|
||||
|
||||
flat := flattenTFDiags(tfDiags)
|
||||
|
||||
if !cmp.Equal(flat, tc.Want, typeComparer, valueComparer, equateEmpty) {
|
||||
t.Fatal(cmp.Diff(flat, tc.Want, typeComparer, valueComparer, equateEmpty))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,301 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"github.com/hashicorp/terraform/configs/configschema"
|
||||
proto "github.com/hashicorp/terraform/internal/tfplugin6"
|
||||
"github.com/hashicorp/terraform/providers"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// ConfigSchemaToProto takes a *configschema.Block and converts it to a
|
||||
// proto.Schema_Block for a grpc response.
|
||||
func ConfigSchemaToProto(b *configschema.Block) *proto.Schema_Block {
|
||||
block := &proto.Schema_Block{
|
||||
Description: b.Description,
|
||||
DescriptionKind: protoStringKind(b.DescriptionKind),
|
||||
Deprecated: b.Deprecated,
|
||||
}
|
||||
|
||||
for _, name := range sortedKeys(b.Attributes) {
|
||||
a := b.Attributes[name]
|
||||
|
||||
attr := &proto.Schema_Attribute{
|
||||
Name: name,
|
||||
Description: a.Description,
|
||||
DescriptionKind: protoStringKind(a.DescriptionKind),
|
||||
Optional: a.Optional,
|
||||
Computed: a.Computed,
|
||||
Required: a.Required,
|
||||
Sensitive: a.Sensitive,
|
||||
Deprecated: a.Deprecated,
|
||||
}
|
||||
|
||||
if a.Type != cty.NilType {
|
||||
ty, err := json.Marshal(a.Type)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
attr.Type = ty
|
||||
}
|
||||
|
||||
if a.NestedType != nil {
|
||||
attr.NestedType = configschemaObjectToProto(a.NestedType)
|
||||
}
|
||||
|
||||
block.Attributes = append(block.Attributes, attr)
|
||||
}
|
||||
|
||||
for _, name := range sortedKeys(b.BlockTypes) {
|
||||
b := b.BlockTypes[name]
|
||||
block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(name, b))
|
||||
}
|
||||
|
||||
return block
|
||||
}
|
||||
|
||||
func protoStringKind(k configschema.StringKind) proto.StringKind {
|
||||
switch k {
|
||||
default:
|
||||
return proto.StringKind_PLAIN
|
||||
case configschema.StringMarkdown:
|
||||
return proto.StringKind_MARKDOWN
|
||||
}
|
||||
}
|
||||
|
||||
func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *proto.Schema_NestedBlock {
|
||||
var nesting proto.Schema_NestedBlock_NestingMode
|
||||
switch b.Nesting {
|
||||
case configschema.NestingSingle:
|
||||
nesting = proto.Schema_NestedBlock_SINGLE
|
||||
case configschema.NestingGroup:
|
||||
nesting = proto.Schema_NestedBlock_GROUP
|
||||
case configschema.NestingList:
|
||||
nesting = proto.Schema_NestedBlock_LIST
|
||||
case configschema.NestingSet:
|
||||
nesting = proto.Schema_NestedBlock_SET
|
||||
case configschema.NestingMap:
|
||||
nesting = proto.Schema_NestedBlock_MAP
|
||||
default:
|
||||
nesting = proto.Schema_NestedBlock_INVALID
|
||||
}
|
||||
return &proto.Schema_NestedBlock{
|
||||
TypeName: name,
|
||||
Block: ConfigSchemaToProto(&b.Block),
|
||||
Nesting: nesting,
|
||||
MinItems: int64(b.MinItems),
|
||||
MaxItems: int64(b.MaxItems),
|
||||
}
|
||||
}
|
||||
|
||||
// ProtoToProviderSchema takes a proto.Schema and converts it to a providers.Schema.
|
||||
func ProtoToProviderSchema(s *proto.Schema) providers.Schema {
|
||||
return providers.Schema{
|
||||
Version: s.Version,
|
||||
Block: ProtoToConfigSchema(s.Block),
|
||||
}
|
||||
}
|
||||
|
||||
// ProtoToConfigSchema takes the GetSchcema_Block from a grpc response and converts it
|
||||
// to a terraform *configschema.Block.
|
||||
func ProtoToConfigSchema(b *proto.Schema_Block) *configschema.Block {
|
||||
block := &configschema.Block{
|
||||
Attributes: make(map[string]*configschema.Attribute),
|
||||
BlockTypes: make(map[string]*configschema.NestedBlock),
|
||||
|
||||
Description: b.Description,
|
||||
DescriptionKind: schemaStringKind(b.DescriptionKind),
|
||||
Deprecated: b.Deprecated,
|
||||
}
|
||||
|
||||
for _, a := range b.Attributes {
|
||||
attr := &configschema.Attribute{
|
||||
Description: a.Description,
|
||||
DescriptionKind: schemaStringKind(a.DescriptionKind),
|
||||
Required: a.Required,
|
||||
Optional: a.Optional,
|
||||
Computed: a.Computed,
|
||||
Sensitive: a.Sensitive,
|
||||
Deprecated: a.Deprecated,
|
||||
}
|
||||
|
||||
if a.Type != nil {
|
||||
if err := json.Unmarshal(a.Type, &attr.Type); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
if a.NestedType != nil {
|
||||
attr.NestedType = protoObjectToConfigSchema(a.NestedType)
|
||||
}
|
||||
|
||||
block.Attributes[a.Name] = attr
|
||||
}
|
||||
|
||||
for _, b := range b.BlockTypes {
|
||||
block.BlockTypes[b.TypeName] = schemaNestedBlock(b)
|
||||
}
|
||||
|
||||
return block
|
||||
}
|
||||
|
||||
func schemaStringKind(k proto.StringKind) configschema.StringKind {
|
||||
switch k {
|
||||
default:
|
||||
return configschema.StringPlain
|
||||
case proto.StringKind_MARKDOWN:
|
||||
return configschema.StringMarkdown
|
||||
}
|
||||
}
|
||||
|
||||
func schemaNestedBlock(b *proto.Schema_NestedBlock) *configschema.NestedBlock {
|
||||
var nesting configschema.NestingMode
|
||||
switch b.Nesting {
|
||||
case proto.Schema_NestedBlock_SINGLE:
|
||||
nesting = configschema.NestingSingle
|
||||
case proto.Schema_NestedBlock_GROUP:
|
||||
nesting = configschema.NestingGroup
|
||||
case proto.Schema_NestedBlock_LIST:
|
||||
nesting = configschema.NestingList
|
||||
case proto.Schema_NestedBlock_MAP:
|
||||
nesting = configschema.NestingMap
|
||||
case proto.Schema_NestedBlock_SET:
|
||||
nesting = configschema.NestingSet
|
||||
default:
|
||||
// In all other cases we'll leave it as the zero value (invalid) and
|
||||
// let the caller validate it and deal with this.
|
||||
}
|
||||
|
||||
nb := &configschema.NestedBlock{
|
||||
Nesting: nesting,
|
||||
MinItems: int(b.MinItems),
|
||||
MaxItems: int(b.MaxItems),
|
||||
}
|
||||
|
||||
nested := ProtoToConfigSchema(b.Block)
|
||||
nb.Block = *nested
|
||||
return nb
|
||||
}
|
||||
|
||||
func protoObjectToConfigSchema(b *proto.Schema_Object) *configschema.Object {
|
||||
var nesting configschema.NestingMode
|
||||
switch b.Nesting {
|
||||
case proto.Schema_Object_SINGLE:
|
||||
nesting = configschema.NestingSingle
|
||||
case proto.Schema_Object_LIST:
|
||||
nesting = configschema.NestingList
|
||||
case proto.Schema_Object_MAP:
|
||||
nesting = configschema.NestingMap
|
||||
case proto.Schema_Object_SET:
|
||||
nesting = configschema.NestingSet
|
||||
default:
|
||||
// In all other cases we'll leave it as the zero value (invalid) and
|
||||
// let the caller validate it and deal with this.
|
||||
}
|
||||
|
||||
object := &configschema.Object{
|
||||
Attributes: make(map[string]*configschema.Attribute),
|
||||
Nesting: nesting,
|
||||
MinItems: int(b.MinItems),
|
||||
MaxItems: int(b.MaxItems),
|
||||
}
|
||||
|
||||
for _, a := range b.Attributes {
|
||||
attr := &configschema.Attribute{
|
||||
Description: a.Description,
|
||||
DescriptionKind: schemaStringKind(a.DescriptionKind),
|
||||
Required: a.Required,
|
||||
Optional: a.Optional,
|
||||
Computed: a.Computed,
|
||||
Sensitive: a.Sensitive,
|
||||
Deprecated: a.Deprecated,
|
||||
}
|
||||
|
||||
if a.Type != nil {
|
||||
if err := json.Unmarshal(a.Type, &attr.Type); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
if a.NestedType != nil {
|
||||
attr.NestedType = protoObjectToConfigSchema(a.NestedType)
|
||||
}
|
||||
|
||||
object.Attributes[a.Name] = attr
|
||||
}
|
||||
|
||||
return object
|
||||
}
|
||||
|
||||
// sortedKeys returns the lexically sorted keys from the given map. This is
|
||||
// used to make schema conversions are deterministic. This panics if map keys
|
||||
// are not a string.
|
||||
func sortedKeys(m interface{}) []string {
|
||||
v := reflect.ValueOf(m)
|
||||
keys := make([]string, v.Len())
|
||||
|
||||
mapKeys := v.MapKeys()
|
||||
for i, k := range mapKeys {
|
||||
keys[i] = k.Interface().(string)
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
func configschemaObjectToProto(b *configschema.Object) *proto.Schema_Object {
|
||||
var nesting proto.Schema_Object_NestingMode
|
||||
switch b.Nesting {
|
||||
case configschema.NestingSingle:
|
||||
nesting = proto.Schema_Object_SINGLE
|
||||
case configschema.NestingList:
|
||||
nesting = proto.Schema_Object_LIST
|
||||
case configschema.NestingSet:
|
||||
nesting = proto.Schema_Object_SET
|
||||
case configschema.NestingMap:
|
||||
nesting = proto.Schema_Object_MAP
|
||||
default:
|
||||
nesting = proto.Schema_Object_INVALID
|
||||
}
|
||||
|
||||
attributes := make([]*proto.Schema_Attribute, len(b.Attributes))
|
||||
|
||||
for _, name := range sortedKeys(b.Attributes) {
|
||||
a := b.Attributes[name]
|
||||
|
||||
attr := &proto.Schema_Attribute{
|
||||
Name: name,
|
||||
Description: a.Description,
|
||||
DescriptionKind: protoStringKind(a.DescriptionKind),
|
||||
Optional: a.Optional,
|
||||
Computed: a.Computed,
|
||||
Required: a.Required,
|
||||
Sensitive: a.Sensitive,
|
||||
Deprecated: a.Deprecated,
|
||||
}
|
||||
|
||||
if a.Type != cty.NilType {
|
||||
ty, err := json.Marshal(a.Type)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
attr.Type = ty
|
||||
}
|
||||
|
||||
if a.NestedType != nil {
|
||||
attr.NestedType = configschemaObjectToProto(a.NestedType)
|
||||
}
|
||||
|
||||
attributes = append(attributes, attr)
|
||||
}
|
||||
|
||||
return &proto.Schema_Object{
|
||||
Attributes: attributes,
|
||||
Nesting: nesting,
|
||||
MinItems: int64(b.MinItems),
|
||||
MaxItems: int64(b.MaxItems),
|
||||
}
|
||||
}
|
|
@ -0,0 +1,568 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/hashicorp/terraform/configs/configschema"
|
||||
proto "github.com/hashicorp/terraform/internal/tfplugin6"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
var (
|
||||
equateEmpty = cmpopts.EquateEmpty()
|
||||
typeComparer = cmp.Comparer(cty.Type.Equals)
|
||||
valueComparer = cmp.Comparer(cty.Value.RawEquals)
|
||||
)
|
||||
|
||||
// Test that we can convert configschema to protobuf types and back again.
|
||||
func TestConvertSchemaBlocks(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
Block *proto.Schema_Block
|
||||
Want *configschema.Block
|
||||
}{
|
||||
"attributes": {
|
||||
&proto.Schema_Block{
|
||||
Attributes: []*proto.Schema_Attribute{
|
||||
{
|
||||
Name: "computed",
|
||||
Type: []byte(`["list","bool"]`),
|
||||
Computed: true,
|
||||
},
|
||||
{
|
||||
Name: "optional",
|
||||
Type: []byte(`"string"`),
|
||||
Optional: true,
|
||||
},
|
||||
{
|
||||
Name: "optional_computed",
|
||||
Type: []byte(`["map","bool"]`),
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
{
|
||||
Name: "required",
|
||||
Type: []byte(`"number"`),
|
||||
Required: true,
|
||||
},
|
||||
{
|
||||
Name: "nested_type",
|
||||
NestedType: &proto.Schema_Object{
|
||||
Nesting: proto.Schema_Object_SINGLE,
|
||||
Attributes: []*proto.Schema_Attribute{
|
||||
{
|
||||
Name: "computed",
|
||||
Type: []byte(`["list","bool"]`),
|
||||
Computed: true,
|
||||
},
|
||||
{
|
||||
Name: "optional",
|
||||
Type: []byte(`"string"`),
|
||||
Optional: true,
|
||||
},
|
||||
{
|
||||
Name: "optional_computed",
|
||||
Type: []byte(`["map","bool"]`),
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
{
|
||||
Name: "required",
|
||||
Type: []byte(`"number"`),
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: true,
|
||||
},
|
||||
{
|
||||
Name: "deeply_nested_type",
|
||||
NestedType: &proto.Schema_Object{
|
||||
Nesting: proto.Schema_Object_SINGLE,
|
||||
Attributes: []*proto.Schema_Attribute{
|
||||
{
|
||||
Name: "first_level",
|
||||
NestedType: &proto.Schema_Object{
|
||||
Nesting: proto.Schema_Object_SINGLE,
|
||||
Attributes: []*proto.Schema_Attribute{
|
||||
{
|
||||
Name: "computed",
|
||||
Type: []byte(`["list","bool"]`),
|
||||
Computed: true,
|
||||
},
|
||||
{
|
||||
Name: "optional",
|
||||
Type: []byte(`"string"`),
|
||||
Optional: true,
|
||||
},
|
||||
{
|
||||
Name: "optional_computed",
|
||||
Type: []byte(`["map","bool"]`),
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
{
|
||||
Name: "required",
|
||||
Type: []byte(`"number"`),
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: true,
|
||||
},
|
||||
{
|
||||
Name: "nested_list",
|
||||
NestedType: &proto.Schema_Object{
|
||||
Nesting: proto.Schema_Object_LIST,
|
||||
Attributes: []*proto.Schema_Attribute{
|
||||
{
|
||||
Name: "required",
|
||||
Type: []byte(`"string"`),
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
MinItems: 3,
|
||||
},
|
||||
Required: true,
|
||||
},
|
||||
{
|
||||
Name: "nested_set",
|
||||
NestedType: &proto.Schema_Object{
|
||||
Nesting: proto.Schema_Object_SET,
|
||||
Attributes: []*proto.Schema_Attribute{
|
||||
{
|
||||
Name: "required",
|
||||
Type: []byte(`"string"`),
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: true,
|
||||
},
|
||||
{
|
||||
Name: "nested_map",
|
||||
NestedType: &proto.Schema_Object{
|
||||
Nesting: proto.Schema_Object_MAP,
|
||||
Attributes: []*proto.Schema_Attribute{
|
||||
{
|
||||
Name: "required",
|
||||
Type: []byte(`"string"`),
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
&configschema.Block{
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"computed": {
|
||||
Type: cty.List(cty.Bool),
|
||||
Computed: true,
|
||||
},
|
||||
"optional": {
|
||||
Type: cty.String,
|
||||
Optional: true,
|
||||
},
|
||||
"optional_computed": {
|
||||
Type: cty.Map(cty.Bool),
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"required": {
|
||||
Type: cty.Number,
|
||||
Required: true,
|
||||
},
|
||||
"nested_type": {
|
||||
NestedType: &configschema.Object{
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"computed": {
|
||||
Type: cty.List(cty.Bool),
|
||||
Computed: true,
|
||||
},
|
||||
"optional": {
|
||||
Type: cty.String,
|
||||
Optional: true,
|
||||
},
|
||||
"optional_computed": {
|
||||
Type: cty.Map(cty.Bool),
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"required": {
|
||||
Type: cty.Number,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
Nesting: configschema.NestingSingle,
|
||||
},
|
||||
Required: true,
|
||||
},
|
||||
"deeply_nested_type": {
|
||||
NestedType: &configschema.Object{
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"first_level": {
|
||||
NestedType: &configschema.Object{
|
||||
Nesting: configschema.NestingSingle,
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"computed": {
|
||||
Type: cty.List(cty.Bool),
|
||||
Computed: true,
|
||||
},
|
||||
"optional": {
|
||||
Type: cty.String,
|
||||
Optional: true,
|
||||
},
|
||||
"optional_computed": {
|
||||
Type: cty.Map(cty.Bool),
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"required": {
|
||||
Type: cty.Number,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
Nesting: configschema.NestingSingle,
|
||||
},
|
||||
Required: true,
|
||||
},
|
||||
"nested_list": {
|
||||
NestedType: &configschema.Object{
|
||||
Nesting: configschema.NestingList,
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"required": {
|
||||
Type: cty.String,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
MinItems: 3,
|
||||
},
|
||||
Required: true,
|
||||
},
|
||||
"nested_map": {
|
||||
NestedType: &configschema.Object{
|
||||
Nesting: configschema.NestingMap,
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"required": {
|
||||
Type: cty.String,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: true,
|
||||
},
|
||||
"nested_set": {
|
||||
NestedType: &configschema.Object{
|
||||
Nesting: configschema.NestingSet,
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"required": {
|
||||
Type: cty.String,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"blocks": {
|
||||
&proto.Schema_Block{
|
||||
BlockTypes: []*proto.Schema_NestedBlock{
|
||||
{
|
||||
TypeName: "list",
|
||||
Nesting: proto.Schema_NestedBlock_LIST,
|
||||
Block: &proto.Schema_Block{},
|
||||
},
|
||||
{
|
||||
TypeName: "map",
|
||||
Nesting: proto.Schema_NestedBlock_MAP,
|
||||
Block: &proto.Schema_Block{},
|
||||
},
|
||||
{
|
||||
TypeName: "set",
|
||||
Nesting: proto.Schema_NestedBlock_SET,
|
||||
Block: &proto.Schema_Block{},
|
||||
},
|
||||
{
|
||||
TypeName: "single",
|
||||
Nesting: proto.Schema_NestedBlock_SINGLE,
|
||||
Block: &proto.Schema_Block{
|
||||
Attributes: []*proto.Schema_Attribute{
|
||||
{
|
||||
Name: "foo",
|
||||
Type: []byte(`"dynamic"`),
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&configschema.Block{
|
||||
BlockTypes: map[string]*configschema.NestedBlock{
|
||||
"list": &configschema.NestedBlock{
|
||||
Nesting: configschema.NestingList,
|
||||
},
|
||||
"map": &configschema.NestedBlock{
|
||||
Nesting: configschema.NestingMap,
|
||||
},
|
||||
"set": &configschema.NestedBlock{
|
||||
Nesting: configschema.NestingSet,
|
||||
},
|
||||
"single": &configschema.NestedBlock{
|
||||
Nesting: configschema.NestingSingle,
|
||||
Block: configschema.Block{
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"foo": {
|
||||
Type: cty.DynamicPseudoType,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"deep block nesting": {
|
||||
&proto.Schema_Block{
|
||||
BlockTypes: []*proto.Schema_NestedBlock{
|
||||
{
|
||||
TypeName: "single",
|
||||
Nesting: proto.Schema_NestedBlock_SINGLE,
|
||||
Block: &proto.Schema_Block{
|
||||
BlockTypes: []*proto.Schema_NestedBlock{
|
||||
{
|
||||
TypeName: "list",
|
||||
Nesting: proto.Schema_NestedBlock_LIST,
|
||||
Block: &proto.Schema_Block{
|
||||
BlockTypes: []*proto.Schema_NestedBlock{
|
||||
{
|
||||
TypeName: "set",
|
||||
Nesting: proto.Schema_NestedBlock_SET,
|
||||
Block: &proto.Schema_Block{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&configschema.Block{
|
||||
BlockTypes: map[string]*configschema.NestedBlock{
|
||||
"single": &configschema.NestedBlock{
|
||||
Nesting: configschema.NestingSingle,
|
||||
Block: configschema.Block{
|
||||
BlockTypes: map[string]*configschema.NestedBlock{
|
||||
"list": &configschema.NestedBlock{
|
||||
Nesting: configschema.NestingList,
|
||||
Block: configschema.Block{
|
||||
BlockTypes: map[string]*configschema.NestedBlock{
|
||||
"set": &configschema.NestedBlock{
|
||||
Nesting: configschema.NestingSet,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
converted := ProtoToConfigSchema(tc.Block)
|
||||
if !cmp.Equal(converted, tc.Want, typeComparer, valueComparer, equateEmpty) {
|
||||
t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, valueComparer, equateEmpty))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test that we can convert configschema to protobuf types and back again.
|
||||
func TestConvertProtoSchemaBlocks(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
Want *proto.Schema_Block
|
||||
Block *configschema.Block
|
||||
}{
|
||||
"attributes": {
|
||||
&proto.Schema_Block{
|
||||
Attributes: []*proto.Schema_Attribute{
|
||||
{
|
||||
Name: "computed",
|
||||
Type: []byte(`["list","bool"]`),
|
||||
Computed: true,
|
||||
},
|
||||
{
|
||||
Name: "optional",
|
||||
Type: []byte(`"string"`),
|
||||
Optional: true,
|
||||
},
|
||||
{
|
||||
Name: "optional_computed",
|
||||
Type: []byte(`["map","bool"]`),
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
{
|
||||
Name: "required",
|
||||
Type: []byte(`"number"`),
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
&configschema.Block{
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"computed": {
|
||||
Type: cty.List(cty.Bool),
|
||||
Computed: true,
|
||||
},
|
||||
"optional": {
|
||||
Type: cty.String,
|
||||
Optional: true,
|
||||
},
|
||||
"optional_computed": {
|
||||
Type: cty.Map(cty.Bool),
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"required": {
|
||||
Type: cty.Number,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"blocks": {
|
||||
&proto.Schema_Block{
|
||||
BlockTypes: []*proto.Schema_NestedBlock{
|
||||
{
|
||||
TypeName: "list",
|
||||
Nesting: proto.Schema_NestedBlock_LIST,
|
||||
Block: &proto.Schema_Block{},
|
||||
},
|
||||
{
|
||||
TypeName: "map",
|
||||
Nesting: proto.Schema_NestedBlock_MAP,
|
||||
Block: &proto.Schema_Block{},
|
||||
},
|
||||
{
|
||||
TypeName: "set",
|
||||
Nesting: proto.Schema_NestedBlock_SET,
|
||||
Block: &proto.Schema_Block{},
|
||||
},
|
||||
{
|
||||
TypeName: "single",
|
||||
Nesting: proto.Schema_NestedBlock_SINGLE,
|
||||
Block: &proto.Schema_Block{
|
||||
Attributes: []*proto.Schema_Attribute{
|
||||
{
|
||||
Name: "foo",
|
||||
Type: []byte(`"dynamic"`),
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&configschema.Block{
|
||||
BlockTypes: map[string]*configschema.NestedBlock{
|
||||
"list": &configschema.NestedBlock{
|
||||
Nesting: configschema.NestingList,
|
||||
},
|
||||
"map": &configschema.NestedBlock{
|
||||
Nesting: configschema.NestingMap,
|
||||
},
|
||||
"set": &configschema.NestedBlock{
|
||||
Nesting: configschema.NestingSet,
|
||||
},
|
||||
"single": &configschema.NestedBlock{
|
||||
Nesting: configschema.NestingSingle,
|
||||
Block: configschema.Block{
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"foo": {
|
||||
Type: cty.DynamicPseudoType,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"deep block nesting": {
|
||||
&proto.Schema_Block{
|
||||
BlockTypes: []*proto.Schema_NestedBlock{
|
||||
{
|
||||
TypeName: "single",
|
||||
Nesting: proto.Schema_NestedBlock_SINGLE,
|
||||
Block: &proto.Schema_Block{
|
||||
BlockTypes: []*proto.Schema_NestedBlock{
|
||||
{
|
||||
TypeName: "list",
|
||||
Nesting: proto.Schema_NestedBlock_LIST,
|
||||
Block: &proto.Schema_Block{
|
||||
BlockTypes: []*proto.Schema_NestedBlock{
|
||||
{
|
||||
TypeName: "set",
|
||||
Nesting: proto.Schema_NestedBlock_SET,
|
||||
Block: &proto.Schema_Block{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&configschema.Block{
|
||||
BlockTypes: map[string]*configschema.NestedBlock{
|
||||
"single": &configschema.NestedBlock{
|
||||
Nesting: configschema.NestingSingle,
|
||||
Block: configschema.Block{
|
||||
BlockTypes: map[string]*configschema.NestedBlock{
|
||||
"list": &configschema.NestedBlock{
|
||||
Nesting: configschema.NestingList,
|
||||
Block: configschema.Block{
|
||||
BlockTypes: map[string]*configschema.NestedBlock{
|
||||
"set": &configschema.NestedBlock{
|
||||
Nesting: configschema.NestingSet,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
converted := ConfigSchemaToProto(tc.Block)
|
||||
if !cmp.Equal(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported) {
|
||||
t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
package plugin6
|
||||
|
||||
// plugin6 builds on types in package plugin to include support for plugin
|
||||
// protocol v6. The main gRPC functions use by terraform (and initialized in
|
||||
// init.go), such as Serve, are in the plugin package. The version of those
|
||||
// functions in this package are used by various mocks and in tests.
|
||||
|
||||
// When provider protocol v5 is deprecated, some functions may need to be moved
|
||||
// here, or the existing functions updated, before removing the plugin pacakge.
|
|
@ -0,0 +1,74 @@
|
|||
package plugin6
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"runtime"
|
||||
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// grpcErr extracts some known error types and formats them into better
|
||||
// representations for core. This must only be called from plugin methods.
|
||||
// Since we don't use RPC status errors for the plugin protocol, these do not
|
||||
// contain any useful details, and we can return some text that at least
|
||||
// indicates the plugin call and possible error condition.
|
||||
func grpcErr(err error) (diags tfdiags.Diagnostics) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// extract the method name from the caller.
|
||||
pc, _, _, ok := runtime.Caller(1)
|
||||
if !ok {
|
||||
logger.Error("unknown grpc call", "error", err)
|
||||
return diags.Append(err)
|
||||
}
|
||||
|
||||
f := runtime.FuncForPC(pc)
|
||||
|
||||
// Function names will contain the full import path. Take the last
|
||||
// segment, which will let users know which method was being called.
|
||||
_, requestName := path.Split(f.Name())
|
||||
|
||||
// Here we can at least correlate the error in the logs to a particular binary.
|
||||
logger.Error(requestName, "error", err)
|
||||
|
||||
// TODO: while this expands the error codes into somewhat better messages,
|
||||
// this still does not easily link the error to an actual user-recognizable
|
||||
// plugin. The grpc plugin does not know its configured name, and the
|
||||
// errors are in a list of diagnostics, making it hard for the caller to
|
||||
// annotate the returned errors.
|
||||
switch status.Code(err) {
|
||||
case codes.Unavailable:
|
||||
// This case is when the plugin has stopped running for some reason,
|
||||
// and is usually the result of a crash.
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Plugin did not respond",
|
||||
fmt.Sprintf("The plugin encountered an error, and failed to respond to the %s call. "+
|
||||
"The plugin logs may contain more details.", requestName),
|
||||
))
|
||||
case codes.Canceled:
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Request cancelled",
|
||||
fmt.Sprintf("The %s request was cancelled.", requestName),
|
||||
))
|
||||
case codes.Unimplemented:
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Unsupported plugin method",
|
||||
fmt.Sprintf("The %s method is not supported by this plugin.", requestName),
|
||||
))
|
||||
default:
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Plugin error",
|
||||
fmt.Sprintf("The plugin returned an unexpected error from %s: %v", requestName, err),
|
||||
))
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,617 @@
|
|||
package plugin6
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"github.com/hashicorp/terraform/internal/logging"
|
||||
proto6 "github.com/hashicorp/terraform/internal/tfplugin6"
|
||||
"github.com/hashicorp/terraform/plugin6/convert"
|
||||
"github.com/hashicorp/terraform/providers"
|
||||
ctyjson "github.com/zclconf/go-cty/cty/json"
|
||||
"github.com/zclconf/go-cty/cty/msgpack"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var logger = logging.HCLogger()
|
||||
|
||||
// GRPCProviderPlugin implements plugin.GRPCPlugin for the go-plugin package.
|
||||
type GRPCProviderPlugin struct {
|
||||
plugin.Plugin
|
||||
GRPCProvider func() proto6.ProviderServer
|
||||
}
|
||||
|
||||
func (p *GRPCProviderPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) {
|
||||
return &GRPCProvider{
|
||||
client: proto6.NewProviderClient(c),
|
||||
ctx: ctx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *GRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error {
|
||||
proto6.RegisterProviderServer(s, p.GRPCProvider())
|
||||
return nil
|
||||
}
|
||||
|
||||
// GRPCProvider handles the client, or core side of the plugin rpc connection.
|
||||
// The GRPCProvider methods are mostly a translation layer between the
|
||||
// terraform provioders types and the grpc proto types, directly converting
|
||||
// between the two.
|
||||
type GRPCProvider struct {
|
||||
// PluginClient provides a reference to the plugin.Client which controls the plugin process.
|
||||
// This allows the GRPCProvider a way to shutdown the plugin process.
|
||||
PluginClient *plugin.Client
|
||||
|
||||
// TestServer contains a grpc.Server to close when the GRPCProvider is being
|
||||
// used in an end to end test of a provider.
|
||||
TestServer *grpc.Server
|
||||
|
||||
// Proto client use to make the grpc service calls.
|
||||
client proto6.ProviderClient
|
||||
|
||||
// this context is created by the plugin package, and is canceled when the
|
||||
// plugin process ends.
|
||||
ctx context.Context
|
||||
|
||||
// schema stores the schema for this provider. This is used to properly
|
||||
// serialize the state for requests.
|
||||
mu sync.Mutex
|
||||
schemas providers.GetSchemaResponse
|
||||
}
|
||||
|
||||
func New(client proto6.ProviderClient, ctx context.Context) GRPCProvider {
|
||||
return GRPCProvider{
|
||||
client: client,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
// getSchema is used internally to get the saved provider schema. The schema
|
||||
// should have already been fetched from the provider, but we have to
|
||||
// synchronize access to avoid being called concurrently with GetSchema.
|
||||
func (p *GRPCProvider) getSchema() providers.GetSchemaResponse {
|
||||
p.mu.Lock()
|
||||
// unlock inline in case GetSchema needs to be called
|
||||
if p.schemas.Provider.Block != nil {
|
||||
p.mu.Unlock()
|
||||
return p.schemas
|
||||
}
|
||||
p.mu.Unlock()
|
||||
|
||||
// the schema should have been fetched already, but give it another shot
|
||||
// just in case things are being called out of order. This may happen for
|
||||
// tests.
|
||||
schemas := p.GetSchema()
|
||||
if schemas.Diagnostics.HasErrors() {
|
||||
panic(schemas.Diagnostics.Err())
|
||||
}
|
||||
|
||||
return schemas
|
||||
}
|
||||
|
||||
// getResourceSchema is a helper to extract the schema for a resource, and
|
||||
// panics if the schema is not available.
|
||||
func (p *GRPCProvider) getResourceSchema(name string) providers.Schema {
|
||||
schema := p.getSchema()
|
||||
resSchema, ok := schema.ResourceTypes[name]
|
||||
if !ok {
|
||||
panic("unknown resource type " + name)
|
||||
}
|
||||
return resSchema
|
||||
}
|
||||
|
||||
// gettDatasourceSchema is a helper to extract the schema for a datasource, and
|
||||
// panics if that schema is not available.
|
||||
func (p *GRPCProvider) getDatasourceSchema(name string) providers.Schema {
|
||||
schema := p.getSchema()
|
||||
dataSchema, ok := schema.DataSources[name]
|
||||
if !ok {
|
||||
panic("unknown data source " + name)
|
||||
}
|
||||
return dataSchema
|
||||
}
|
||||
|
||||
// getProviderMetaSchema is a helper to extract the schema for the meta info
|
||||
// defined for a provider,
|
||||
func (p *GRPCProvider) getProviderMetaSchema() providers.Schema {
|
||||
schema := p.getSchema()
|
||||
return schema.ProviderMeta
|
||||
}
|
||||
|
||||
func (p *GRPCProvider) GetSchema() (resp providers.GetSchemaResponse) {
|
||||
logger.Trace("GRPCProvider.v6: GetSchema")
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.schemas.Provider.Block != nil {
|
||||
return p.schemas
|
||||
}
|
||||
|
||||
resp.ResourceTypes = make(map[string]providers.Schema)
|
||||
resp.DataSources = make(map[string]providers.Schema)
|
||||
|
||||
// Some providers may generate quite large schemas, and the internal default
|
||||
// grpc response size limit is 4MB. 64MB should cover most any use case, and
|
||||
// if we get providers nearing that we may want to consider a finer-grained
|
||||
// API to fetch individual resource schemas.
|
||||
// Note: this option is marked as EXPERIMENTAL in the grpc API.
|
||||
const maxRecvSize = 64 << 20
|
||||
protoResp, err := p.client.GetProviderSchema(p.ctx, new(proto6.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize})
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err))
|
||||
return resp
|
||||
}
|
||||
|
||||
resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
|
||||
|
||||
if protoResp.Provider == nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provider schema"))
|
||||
return resp
|
||||
}
|
||||
|
||||
resp.Provider = convert.ProtoToProviderSchema(protoResp.Provider)
|
||||
if protoResp.ProviderMeta == nil {
|
||||
logger.Debug("No provider meta schema returned")
|
||||
} else {
|
||||
resp.ProviderMeta = convert.ProtoToProviderSchema(protoResp.ProviderMeta)
|
||||
}
|
||||
|
||||
for name, res := range protoResp.ResourceSchemas {
|
||||
resp.ResourceTypes[name] = convert.ProtoToProviderSchema(res)
|
||||
}
|
||||
|
||||
for name, data := range protoResp.DataSourceSchemas {
|
||||
resp.DataSources[name] = convert.ProtoToProviderSchema(data)
|
||||
}
|
||||
|
||||
p.schemas = resp
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
func (p *GRPCProvider) ValidateProviderConfig(r providers.PrepareProviderConfigRequest) (resp providers.PrepareProviderConfigResponse) {
|
||||
logger.Trace("GRPCProvider.v6: ValidateProviderConfig")
|
||||
|
||||
schema := p.getSchema()
|
||||
ty := schema.Provider.Block.ImpliedType()
|
||||
|
||||
mp, err := msgpack.Marshal(r.Config, ty)
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
|
||||
protoReq := &proto6.ValidateProviderConfig_Request{
|
||||
Config: &proto6.DynamicValue{Msgpack: mp},
|
||||
}
|
||||
|
||||
protoResp, err := p.client.ValidateProviderConfig(p.ctx, protoReq)
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err))
|
||||
return resp
|
||||
}
|
||||
|
||||
resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
|
||||
return resp
|
||||
}
|
||||
|
||||
func (p *GRPCProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) (resp providers.ValidateResourceTypeConfigResponse) {
|
||||
logger.Trace("GRPCProvider.v6: ValidateResourceTypeConfig")
|
||||
resourceSchema := p.getResourceSchema(r.TypeName)
|
||||
|
||||
mp, err := msgpack.Marshal(r.Config, resourceSchema.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
|
||||
protoReq := &proto6.ValidateResourceConfig_Request{
|
||||
TypeName: r.TypeName,
|
||||
Config: &proto6.DynamicValue{Msgpack: mp},
|
||||
}
|
||||
|
||||
protoResp, err := p.client.ValidateResourceConfig(p.ctx, protoReq)
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err))
|
||||
return resp
|
||||
}
|
||||
|
||||
resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
|
||||
return resp
|
||||
}
|
||||
|
||||
func (p *GRPCProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) (resp providers.ValidateDataSourceConfigResponse) {
|
||||
logger.Trace("GRPCProvider.v6: ValidateDataSourceConfig")
|
||||
|
||||
dataSchema := p.getDatasourceSchema(r.TypeName)
|
||||
|
||||
mp, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
|
||||
protoReq := &proto6.ValidateDataSourceConfig_Request{
|
||||
TypeName: r.TypeName,
|
||||
Config: &proto6.DynamicValue{Msgpack: mp},
|
||||
}
|
||||
|
||||
protoResp, err := p.client.ValidateDataSourceConfig(p.ctx, protoReq)
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err))
|
||||
return resp
|
||||
}
|
||||
resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
|
||||
return resp
|
||||
}
|
||||
|
||||
func (p *GRPCProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) {
|
||||
logger.Trace("GRPCProvider.v6: UpgradeResourceState")
|
||||
|
||||
resSchema := p.getResourceSchema(r.TypeName)
|
||||
|
||||
protoReq := &proto6.UpgradeResourceState_Request{
|
||||
TypeName: r.TypeName,
|
||||
Version: int64(r.Version),
|
||||
RawState: &proto6.RawState{
|
||||
Json: r.RawStateJSON,
|
||||
Flatmap: r.RawStateFlatmap,
|
||||
},
|
||||
}
|
||||
|
||||
protoResp, err := p.client.UpgradeResourceState(p.ctx, protoReq)
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err))
|
||||
return resp
|
||||
}
|
||||
resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
|
||||
|
||||
ty := resSchema.Block.ImpliedType()
|
||||
resp.UpgradedState = cty.NullVal(ty)
|
||||
if protoResp.UpgradedState == nil {
|
||||
return resp
|
||||
}
|
||||
|
||||
state, err := decodeDynamicValue(protoResp.UpgradedState, ty)
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
resp.UpgradedState = state
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
func (p *GRPCProvider) Configure(r providers.ConfigureRequest) (resp providers.ConfigureResponse) {
|
||||
logger.Trace("GRPCProvider.v6: Configure")
|
||||
|
||||
schema := p.getSchema()
|
||||
|
||||
var mp []byte
|
||||
|
||||
// we don't have anything to marshal if there's no config
|
||||
mp, err := msgpack.Marshal(r.Config, schema.Provider.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
|
||||
protoReq := &proto6.ConfigureProvider_Request{
|
||||
TerraformVersion: r.TerraformVersion,
|
||||
Config: &proto6.DynamicValue{
|
||||
Msgpack: mp,
|
||||
},
|
||||
}
|
||||
|
||||
protoResp, err := p.client.ConfigureProvider(p.ctx, protoReq)
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err))
|
||||
return resp
|
||||
}
|
||||
resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
|
||||
return resp
|
||||
}
|
||||
|
||||
func (p *GRPCProvider) Stop() error {
|
||||
logger.Trace("GRPCProvider.v6: Stop")
|
||||
|
||||
resp, err := p.client.StopProvider(p.ctx, new(proto6.StopProvider_Request))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Error != "" {
|
||||
return errors.New(resp.Error)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp providers.ReadResourceResponse) {
|
||||
logger.Trace("GRPCProvider.v6: ReadResource")
|
||||
|
||||
resSchema := p.getResourceSchema(r.TypeName)
|
||||
metaSchema := p.getProviderMetaSchema()
|
||||
|
||||
mp, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
|
||||
protoReq := &proto6.ReadResource_Request{
|
||||
TypeName: r.TypeName,
|
||||
CurrentState: &proto6.DynamicValue{Msgpack: mp},
|
||||
Private: r.Private,
|
||||
}
|
||||
|
||||
if metaSchema.Block != nil {
|
||||
metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
protoReq.ProviderMeta = &proto6.DynamicValue{Msgpack: metaMP}
|
||||
}
|
||||
|
||||
protoResp, err := p.client.ReadResource(p.ctx, protoReq)
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err))
|
||||
return resp
|
||||
}
|
||||
resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
|
||||
|
||||
state, err := decodeDynamicValue(protoResp.NewState, resSchema.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
resp.NewState = state
|
||||
resp.Private = protoResp.Private
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
func (p *GRPCProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) {
|
||||
logger.Trace("GRPCProvider.v6: PlanResourceChange")
|
||||
|
||||
resSchema := p.getResourceSchema(r.TypeName)
|
||||
metaSchema := p.getProviderMetaSchema()
|
||||
|
||||
priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
|
||||
configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
|
||||
propMP, err := msgpack.Marshal(r.ProposedNewState, resSchema.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
|
||||
protoReq := &proto6.PlanResourceChange_Request{
|
||||
TypeName: r.TypeName,
|
||||
PriorState: &proto6.DynamicValue{Msgpack: priorMP},
|
||||
Config: &proto6.DynamicValue{Msgpack: configMP},
|
||||
ProposedNewState: &proto6.DynamicValue{Msgpack: propMP},
|
||||
PriorPrivate: r.PriorPrivate,
|
||||
}
|
||||
|
||||
if metaSchema.Block != nil {
|
||||
metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
protoReq.ProviderMeta = &proto6.DynamicValue{Msgpack: metaMP}
|
||||
}
|
||||
|
||||
protoResp, err := p.client.PlanResourceChange(p.ctx, protoReq)
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err))
|
||||
return resp
|
||||
}
|
||||
resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
|
||||
|
||||
state, err := decodeDynamicValue(protoResp.PlannedState, resSchema.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
resp.PlannedState = state
|
||||
|
||||
for _, p := range protoResp.RequiresReplace {
|
||||
resp.RequiresReplace = append(resp.RequiresReplace, convert.AttributePathToPath(p))
|
||||
}
|
||||
|
||||
resp.PlannedPrivate = protoResp.PlannedPrivate
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
func (p *GRPCProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) {
|
||||
logger.Trace("GRPCProvider.v6: ApplyResourceChange")
|
||||
|
||||
resSchema := p.getResourceSchema(r.TypeName)
|
||||
metaSchema := p.getProviderMetaSchema()
|
||||
|
||||
priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
plannedMP, err := msgpack.Marshal(r.PlannedState, resSchema.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
|
||||
protoReq := &proto6.ApplyResourceChange_Request{
|
||||
TypeName: r.TypeName,
|
||||
PriorState: &proto6.DynamicValue{Msgpack: priorMP},
|
||||
PlannedState: &proto6.DynamicValue{Msgpack: plannedMP},
|
||||
Config: &proto6.DynamicValue{Msgpack: configMP},
|
||||
PlannedPrivate: r.PlannedPrivate,
|
||||
}
|
||||
|
||||
if metaSchema.Block != nil {
|
||||
metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
protoReq.ProviderMeta = &proto6.DynamicValue{Msgpack: metaMP}
|
||||
}
|
||||
|
||||
protoResp, err := p.client.ApplyResourceChange(p.ctx, protoReq)
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err))
|
||||
return resp
|
||||
}
|
||||
resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
|
||||
|
||||
resp.Private = protoResp.Private
|
||||
|
||||
state, err := decodeDynamicValue(protoResp.NewState, resSchema.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
resp.NewState = state
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) {
|
||||
logger.Trace("GRPCProvider.v6: ImportResourceState")
|
||||
|
||||
protoReq := &proto6.ImportResourceState_Request{
|
||||
TypeName: r.TypeName,
|
||||
Id: r.ID,
|
||||
}
|
||||
|
||||
protoResp, err := p.client.ImportResourceState(p.ctx, protoReq)
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err))
|
||||
return resp
|
||||
}
|
||||
resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
|
||||
|
||||
for _, imported := range protoResp.ImportedResources {
|
||||
resource := providers.ImportedResource{
|
||||
TypeName: imported.TypeName,
|
||||
Private: imported.Private,
|
||||
}
|
||||
|
||||
resSchema := p.getResourceSchema(resource.TypeName)
|
||||
state, err := decodeDynamicValue(imported.State, resSchema.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
resource.State = state
|
||||
resp.ImportedResources = append(resp.ImportedResources, resource)
|
||||
}
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
func (p *GRPCProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) {
|
||||
logger.Trace("GRPCProvider.v6: ReadDataSource")
|
||||
|
||||
dataSchema := p.getDatasourceSchema(r.TypeName)
|
||||
metaSchema := p.getProviderMetaSchema()
|
||||
|
||||
config, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
|
||||
protoReq := &proto6.ReadDataSource_Request{
|
||||
TypeName: r.TypeName,
|
||||
Config: &proto6.DynamicValue{
|
||||
Msgpack: config,
|
||||
},
|
||||
}
|
||||
|
||||
if metaSchema.Block != nil {
|
||||
metaMP, err := msgpack.Marshal(r.ProviderMeta, metaSchema.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
protoReq.ProviderMeta = &proto6.DynamicValue{Msgpack: metaMP}
|
||||
}
|
||||
|
||||
protoResp, err := p.client.ReadDataSource(p.ctx, protoReq)
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err))
|
||||
return resp
|
||||
}
|
||||
resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
|
||||
|
||||
state, err := decodeDynamicValue(protoResp.State, dataSchema.Block.ImpliedType())
|
||||
if err != nil {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(err)
|
||||
return resp
|
||||
}
|
||||
resp.State = state
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
// closing the grpc connection is final, and terraform will call it at the end of every phase.
|
||||
func (p *GRPCProvider) Close() error {
|
||||
logger.Trace("GRPCProvider.v6: Close")
|
||||
|
||||
// Make sure to stop the server if we're not running within go-plugin.
|
||||
if p.TestServer != nil {
|
||||
p.TestServer.Stop()
|
||||
}
|
||||
|
||||
// Check this since it's not automatically inserted during plugin creation.
|
||||
// It's currently only inserted by the command package, because that is
|
||||
// where the factory is built and is the only point with access to the
|
||||
// plugin.Client.
|
||||
if p.PluginClient == nil {
|
||||
logger.Debug("provider has no plugin.Client")
|
||||
return nil
|
||||
}
|
||||
|
||||
p.PluginClient.Kill()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a DynamicValue from either the JSON or MsgPack encoding.
|
||||
func decodeDynamicValue(v *proto6.DynamicValue, ty cty.Type) (cty.Value, error) {
|
||||
// always return a valid value
|
||||
var err error
|
||||
res := cty.NullVal(ty)
|
||||
if v == nil {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(v.Msgpack) > 0:
|
||||
res, err = msgpack.Unmarshal(v.Msgpack, ty)
|
||||
case len(v.Json) > 0:
|
||||
res, err = ctyjson.Unmarshal(v.Json, ty)
|
||||
}
|
||||
return res, err
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
package plugin6
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/go-plugin"
|
||||
proto "github.com/hashicorp/terraform/internal/tfplugin6"
|
||||
)
|
||||
|
||||
const (
|
||||
// The constants below are the names of the plugins that can be dispensed
|
||||
// from the plugin server.
|
||||
ProviderPluginName = "provider"
|
||||
ProvisionerPluginName = "provisioner"
|
||||
|
||||
// DefaultProtocolVersion is the protocol version assumed for legacy clients that don't specify
|
||||
// a particular version during their handshake. This is the version used when Terraform 0.10
|
||||
// and 0.11 launch plugins that were built with support for both versions 4 and 5, and must
|
||||
// stay unchanged at 4 until we intentionally build plugins that are not compatible with 0.10 and
|
||||
// 0.11.
|
||||
DefaultProtocolVersion = 4
|
||||
)
|
||||
|
||||
// Handshake is the HandshakeConfig used to configure clients and servers.
|
||||
var Handshake = plugin.HandshakeConfig{
|
||||
// The ProtocolVersion is the version that must match between TF core
|
||||
// and TF plugins. This should be bumped whenever a change happens in
|
||||
// one or the other that makes it so that they can't safely communicate.
|
||||
// This could be adding a new interface value, it could be how
|
||||
// helper/schema computes diffs, etc.
|
||||
ProtocolVersion: DefaultProtocolVersion,
|
||||
|
||||
// The magic cookie values should NEVER be changed.
|
||||
MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE",
|
||||
MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2",
|
||||
}
|
||||
|
||||
type GRPCProviderFunc func() proto.ProviderServer
|
||||
|
||||
// ServeOpts are the configurations to serve a plugin.
|
||||
type ServeOpts struct {
|
||||
// Wrapped versions of the above plugins will automatically shimmed and
|
||||
// added to the GRPC functions when possible.
|
||||
GRPCProviderFunc GRPCProviderFunc
|
||||
}
|
||||
|
||||
// Serve serves a plugin. This function never returns and should be the final
|
||||
// function called in the main function of the plugin.
|
||||
func Serve(opts *ServeOpts) {
|
||||
plugin.Serve(&plugin.ServeConfig{
|
||||
HandshakeConfig: Handshake,
|
||||
VersionedPlugins: pluginSet(opts),
|
||||
GRPCServer: plugin.DefaultGRPCServer,
|
||||
})
|
||||
}
|
||||
|
||||
func pluginSet(opts *ServeOpts) map[int]plugin.PluginSet {
|
||||
plugins := map[int]plugin.PluginSet{}
|
||||
|
||||
// add the new protocol versions if they're configured
|
||||
if opts.GRPCProviderFunc != nil {
|
||||
plugins[5] = plugin.PluginSet{}
|
||||
if opts.GRPCProviderFunc != nil {
|
||||
plugins[6]["provider"] = &GRPCProviderPlugin{
|
||||
GRPCProvider: opts.GRPCProviderFunc,
|
||||
}
|
||||
}
|
||||
}
|
||||
return plugins
|
||||
}
|
Loading…
Reference in New Issue