core: Rewrite EvalApplyProvisioners for new provisioner API

This commit is contained in:
Martin Atkins 2018-09-14 11:40:37 -07:00
parent 859b384558
commit b6e31be09c
1 changed files with 93 additions and 109 deletions

View File

@ -5,12 +5,14 @@ import (
"log" "log"
"github.com/hashicorp/go-multierror" "github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl2/hcl"
"github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/plans" "github.com/hashicorp/terraform/plans"
"github.com/hashicorp/terraform/providers" "github.com/hashicorp/terraform/providers"
"github.com/hashicorp/terraform/provisioners"
"github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/states"
"github.com/hashicorp/terraform/tfdiags" "github.com/hashicorp/terraform/tfdiags"
) )
@ -380,24 +382,21 @@ func (n *EvalApplyProvisioners) filterProvisioners() []*configs.Provisioner {
} }
func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisioner) error { func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisioner) error {
return fmt.Errorf("EvalApplyProvisioners.apply not yet updated for new types") var diags tfdiags.Diagnostics
/*
instanceAddr := n.Addr instanceAddr := n.Addr
absAddr := instanceAddr.Absolute(ctx.Path()) absAddr := instanceAddr.Absolute(ctx.Path())
state := *n.State
// The hook API still uses the legacy InstanceInfo type, so we need to shim it. // If there's a connection block defined directly inside the resource block
legacyInfo := NewInstanceInfo(n.Addr.Absolute(ctx.Path())) // then it'll serve as a base connection configuration for all of the
// provisioners.
// Store the original connection info, restore later var baseConn hcl.Body
origConnInfo := state.Ephemeral.ConnInfo if n.ResourceConfig.Managed != nil && n.ResourceConfig.Managed.Connection != nil {
defer func() { baseConn = n.ResourceConfig.Managed.Connection.Config
state.Ephemeral.ConnInfo = origConnInfo }
}()
var diags tfdiags.Diagnostics
for _, prov := range provs { for _, prov := range provs {
log.Printf("[TRACE] EvalApplyProvisioners: provisioning %s with %q", absAddr, prov.Type)
// Get the provisioner // Get the provisioner
provisioner := ctx.Provisioner(prov.Type) provisioner := ctx.Provisioner(prov.Type)
schema := ctx.ProvisionerSchema(prov.Type) schema := ctx.ProvisionerSchema(prov.Type)
@ -408,48 +407,35 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisio
config, _, configDiags := ctx.EvaluateBlock(prov.Config, schema, instanceAddr, keyData) config, _, configDiags := ctx.EvaluateBlock(prov.Config, schema, instanceAddr, keyData)
diags = diags.Append(configDiags) diags = diags.Append(configDiags)
// A provisioner may not have a connection block // If the provisioner block contains a connection block of its own then
// it can override the base connection configuration, if any.
var localConn hcl.Body
if prov.Connection != nil { if prov.Connection != nil {
connInfo, _, connInfoDiags := ctx.EvaluateBlock(prov.Connection.Config, connectionBlockSupersetSchema, instanceAddr, keyData) localConn = prov.Connection.Config
}
var connBody hcl.Body
switch {
case baseConn != nil && localConn != nil:
// Our standard merging logic applies here, similar to what we do
// with _override.tf configuration files: arguments from the
// base connection block will be masked by any arguments of the
// same name in the local connection block.
connBody = configs.MergeBodies(baseConn, localConn)
case baseConn != nil:
connBody = baseConn
case localConn != nil:
connBody = localConn
default: // both are nil, by elimination
connBody = hcl.EmptyBody()
}
connInfo, _, connInfoDiags := ctx.EvaluateBlock(connBody, connectionBlockSupersetSchema, instanceAddr, keyData)
diags = diags.Append(connInfoDiags) diags = diags.Append(connInfoDiags)
if diags.HasErrors() {
if configDiags.HasErrors() || connInfoDiags.HasErrors() { // "on failure continue" setting only applies to failures of the
continue // provisioner itself, not to invalid configuration.
} return diags.Err()
// Merge the connection information, and also lower everything to strings
// for compatibility with the communicator API.
overlay := make(map[string]string)
if origConnInfo != nil {
for k, v := range origConnInfo {
overlay[k] = v
}
}
for it := connInfo.ElementIterator(); it.Next(); {
kv, vv := it.Element()
var k, v string
// there are no unset or null values in a connection block, and
// everything needs to map to a string.
if vv.IsNull() {
continue
}
err := gocty.FromCtyValue(kv, &k)
if err != nil {
// Should never happen, because connectionBlockSupersetSchema requires all primitives
panic(err)
}
err = gocty.FromCtyValue(vv, &v)
if err != nil {
// Should never happen, because connectionBlockSupersetSchema requires all primitives
panic(err)
}
overlay[k] = v
}
state.Ephemeral.ConnInfo = overlay
} }
{ {
@ -470,27 +456,26 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisio
}) })
} }
// The provisioner API still uses our legacy ResourceConfig type, so
// we need to shim it.
legacyRC := NewResourceConfigShimmed(config, schema)
// Invoke the Provisioner
output := CallbackUIOutput{OutputFn: outputFn} output := CallbackUIOutput{OutputFn: outputFn}
applyErr := provisioner.Apply(&output, state, legacyRC) resp := provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{
Config: config,
Connection: connInfo,
UIOutput: &output,
})
applyDiags := resp.Diagnostics.InConfigBody(prov.Config)
// Call post hook // Call post hook
hookErr := ctx.Hook(func(h Hook) (HookAction, error) { hookErr := ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostProvisionInstanceStep(absAddr, prov.Type, applyErr) return h.PostProvisionInstanceStep(absAddr, prov.Type, applyDiags.Err())
}) })
// Handle the error before we deal with the hook if diags.HasErrors() {
if applyErr != nil {
// Determine failure behavior
switch prov.OnFailure { switch prov.OnFailure {
case configs.ProvisionerOnFailureContinue: case configs.ProvisionerOnFailureContinue:
log.Printf("[INFO] apply %s [%s]: error during provision, but continuing as requested in configuration", n.Addr, prov.Type) log.Printf("[WARN] Errors while provisioning %s with %q, but continuing as requested in configuration", n.Addr, prov.Type)
case configs.ProvisionerOnFailureFail: default:
return applyErr diags = diags.Append(applyDiags)
return diags.Err()
} }
} }
@ -501,5 +486,4 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisio
} }
return diags.ErrWithWarnings() return diags.ErrWithWarnings()
*/
} }