vendor: github.com/hashicorp/terraform/...@v0.10.0
This commit is contained in:
parent
b4c1ab2d12
commit
24d5b13cac
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,3 @@
|
|||
AWS SDK for Go
|
||||
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
Copyright 2014-2015 Stripe, Inc.
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,33 @@
|
|||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
// Package iot provides the client and types for making API
|
||||
// requests to AWS IoT.
|
||||
//
|
||||
// AWS IoT provides secure, bi-directional communication between Internet-connected
|
||||
// things (such as sensors, actuators, embedded devices, or smart appliances)
|
||||
// and the AWS cloud. You can discover your custom IoT-Data endpoint to communicate
|
||||
// with, configure rules for data processing and integration with other services,
|
||||
// organize resources associated with each thing (Thing Registry), configure
|
||||
// logging, and create and manage policies and credentials to authenticate things.
|
||||
//
|
||||
// For more information about how AWS IoT works, see the Developer Guide (http://docs.aws.amazon.com/iot/latest/developerguide/aws-iot-how-it-works.html).
|
||||
//
|
||||
// See iot package documentation for more information.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/iot/
|
||||
//
|
||||
// Using the Client
|
||||
//
|
||||
// To AWS IoT with the SDK use the New function to create
|
||||
// a new service client. With that client you can make API requests to the service.
|
||||
// These clients are safe to use concurrently.
|
||||
//
|
||||
// See the SDK's documentation for more information on how to use the SDK.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/
|
||||
//
|
||||
// See aws.Config documentation for more information on configuring SDK clients.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
|
||||
//
|
||||
// See the AWS IoT client IoT for more
|
||||
// information on creating client for this service.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/iot/#New
|
||||
package iot
|
|
@ -0,0 +1,131 @@
|
|||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
package iot
|
||||
|
||||
const (
|
||||
|
||||
// ErrCodeCertificateConflictException for service response error code
|
||||
// "CertificateConflictException".
|
||||
//
|
||||
// Unable to verify the CA certificate used to sign the device certificate you
|
||||
// are attempting to register. This is happens when you have registered more
|
||||
// than one CA certificate that has the same subject field and public key.
|
||||
ErrCodeCertificateConflictException = "CertificateConflictException"
|
||||
|
||||
// ErrCodeCertificateStateException for service response error code
|
||||
// "CertificateStateException".
|
||||
//
|
||||
// The certificate operation is not allowed.
|
||||
ErrCodeCertificateStateException = "CertificateStateException"
|
||||
|
||||
// ErrCodeCertificateValidationException for service response error code
|
||||
// "CertificateValidationException".
|
||||
//
|
||||
// The certificate is invalid.
|
||||
ErrCodeCertificateValidationException = "CertificateValidationException"
|
||||
|
||||
// ErrCodeDeleteConflictException for service response error code
|
||||
// "DeleteConflictException".
|
||||
//
|
||||
// You can't delete the resource because it is attached to one or more resources.
|
||||
ErrCodeDeleteConflictException = "DeleteConflictException"
|
||||
|
||||
// ErrCodeInternalException for service response error code
|
||||
// "InternalException".
|
||||
//
|
||||
// An unexpected error has occurred.
|
||||
ErrCodeInternalException = "InternalException"
|
||||
|
||||
// ErrCodeInternalFailureException for service response error code
|
||||
// "InternalFailureException".
|
||||
//
|
||||
// An unexpected error has occurred.
|
||||
ErrCodeInternalFailureException = "InternalFailureException"
|
||||
|
||||
// ErrCodeInvalidRequestException for service response error code
|
||||
// "InvalidRequestException".
|
||||
//
|
||||
// The request is not valid.
|
||||
ErrCodeInvalidRequestException = "InvalidRequestException"
|
||||
|
||||
// ErrCodeLimitExceededException for service response error code
|
||||
// "LimitExceededException".
|
||||
//
|
||||
// The number of attached entities exceeds the limit.
|
||||
ErrCodeLimitExceededException = "LimitExceededException"
|
||||
|
||||
// ErrCodeMalformedPolicyException for service response error code
|
||||
// "MalformedPolicyException".
|
||||
//
|
||||
// The policy documentation is not valid.
|
||||
ErrCodeMalformedPolicyException = "MalformedPolicyException"
|
||||
|
||||
// ErrCodeRegistrationCodeValidationException for service response error code
|
||||
// "RegistrationCodeValidationException".
|
||||
//
|
||||
// The registration code is invalid.
|
||||
ErrCodeRegistrationCodeValidationException = "RegistrationCodeValidationException"
|
||||
|
||||
// ErrCodeResourceAlreadyExistsException for service response error code
|
||||
// "ResourceAlreadyExistsException".
|
||||
//
|
||||
// The resource already exists.
|
||||
ErrCodeResourceAlreadyExistsException = "ResourceAlreadyExistsException"
|
||||
|
||||
// ErrCodeResourceNotFoundException for service response error code
|
||||
// "ResourceNotFoundException".
|
||||
//
|
||||
// The specified resource does not exist.
|
||||
ErrCodeResourceNotFoundException = "ResourceNotFoundException"
|
||||
|
||||
// ErrCodeServiceUnavailableException for service response error code
|
||||
// "ServiceUnavailableException".
|
||||
//
|
||||
// The service is temporarily unavailable.
|
||||
ErrCodeServiceUnavailableException = "ServiceUnavailableException"
|
||||
|
||||
// ErrCodeSqlParseException for service response error code
|
||||
// "SqlParseException".
|
||||
//
|
||||
// The Rule-SQL expression can't be parsed correctly.
|
||||
ErrCodeSqlParseException = "SqlParseException"
|
||||
|
||||
// ErrCodeThrottlingException for service response error code
|
||||
// "ThrottlingException".
|
||||
//
|
||||
// The rate exceeds the limit.
|
||||
ErrCodeThrottlingException = "ThrottlingException"
|
||||
|
||||
// ErrCodeTransferAlreadyCompletedException for service response error code
|
||||
// "TransferAlreadyCompletedException".
|
||||
//
|
||||
// You can't revert the certificate transfer because the transfer is already
|
||||
// complete.
|
||||
ErrCodeTransferAlreadyCompletedException = "TransferAlreadyCompletedException"
|
||||
|
||||
// ErrCodeTransferConflictException for service response error code
|
||||
// "TransferConflictException".
|
||||
//
|
||||
// You can't transfer the certificate because authorization policies are still
|
||||
// attached.
|
||||
ErrCodeTransferConflictException = "TransferConflictException"
|
||||
|
||||
// ErrCodeUnauthorizedException for service response error code
|
||||
// "UnauthorizedException".
|
||||
//
|
||||
// You are not authorized to perform this operation.
|
||||
ErrCodeUnauthorizedException = "UnauthorizedException"
|
||||
|
||||
// ErrCodeVersionConflictException for service response error code
|
||||
// "VersionConflictException".
|
||||
//
|
||||
// An exception thrown when the version of a thing passed to a command is different
|
||||
// than the version specified with the --version parameter.
|
||||
ErrCodeVersionConflictException = "VersionConflictException"
|
||||
|
||||
// ErrCodeVersionsLimitExceededException for service response error code
|
||||
// "VersionsLimitExceededException".
|
||||
//
|
||||
// The number of policy versions exceeds the limit.
|
||||
ErrCodeVersionsLimitExceededException = "VersionsLimitExceededException"
|
||||
)
|
|
@ -0,0 +1,96 @@
|
|||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
package iot
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/signer/v4"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/restjson"
|
||||
)
|
||||
|
||||
// IoT provides the API operation methods for making requests to
|
||||
// AWS IoT. See this package's package overview docs
|
||||
// for details on the service.
|
||||
//
|
||||
// IoT methods are safe to use concurrently. It is not safe to
|
||||
// modify mutate any of the struct's properties though.
|
||||
type IoT struct {
|
||||
*client.Client
|
||||
}
|
||||
|
||||
// Used for custom client initialization logic
|
||||
var initClient func(*client.Client)
|
||||
|
||||
// Used for custom request initialization logic
|
||||
var initRequest func(*request.Request)
|
||||
|
||||
// Service information constants
|
||||
const (
|
||||
ServiceName = "iot" // Service endpoint prefix API calls made to.
|
||||
EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
|
||||
)
|
||||
|
||||
// New creates a new instance of the IoT client with a session.
|
||||
// If additional configuration is needed for the client instance use the optional
|
||||
// aws.Config parameter to add your extra config.
|
||||
//
|
||||
// Example:
|
||||
// // Create a IoT client from just a session.
|
||||
// svc := iot.New(mySession)
|
||||
//
|
||||
// // Create a IoT client with additional configuration
|
||||
// svc := iot.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *IoT {
|
||||
c := p.ClientConfig(EndpointsID, cfgs...)
|
||||
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
|
||||
}
|
||||
|
||||
// newClient creates, initializes and returns a new service client instance.
|
||||
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *IoT {
|
||||
if len(signingName) == 0 {
|
||||
signingName = "execute-api"
|
||||
}
|
||||
svc := &IoT{
|
||||
Client: client.New(
|
||||
cfg,
|
||||
metadata.ClientInfo{
|
||||
ServiceName: ServiceName,
|
||||
SigningName: signingName,
|
||||
SigningRegion: signingRegion,
|
||||
Endpoint: endpoint,
|
||||
APIVersion: "2015-05-28",
|
||||
},
|
||||
handlers,
|
||||
),
|
||||
}
|
||||
|
||||
// Handlers
|
||||
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
|
||||
svc.Handlers.Build.PushBackNamed(restjson.BuildHandler)
|
||||
svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler)
|
||||
svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler)
|
||||
svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler)
|
||||
|
||||
// Run custom client initialization if present
|
||||
if initClient != nil {
|
||||
initClient(svc.Client)
|
||||
}
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
// newRequest creates a new request for a IoT operation and runs any
|
||||
// custom request initialization.
|
||||
func (c *IoT) newRequest(op *request.Operation, params, data interface{}) *request.Request {
|
||||
req := c.NewRequest(op, params, data)
|
||||
|
||||
// Run custom request initialization if present
|
||||
if initRequest != nil {
|
||||
initRequest(req)
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
The MIT License
|
||||
|
||||
Copyright (c) 2014 Benedikt Lang <github at benediktlang.de>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
|
@ -0,0 +1,194 @@
|
|||
semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master)
|
||||
======
|
||||
|
||||
semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
|
||||
|
||||
Usage
|
||||
-----
|
||||
```bash
|
||||
$ go get github.com/blang/semver
|
||||
```
|
||||
Note: Always vendor your dependencies or fix on a specific version tag.
|
||||
|
||||
```go
|
||||
import github.com/blang/semver
|
||||
v1, err := semver.Make("1.0.0-beta")
|
||||
v2, err := semver.Make("2.0.0-beta")
|
||||
v1.Compare(v2)
|
||||
```
|
||||
|
||||
Also check the [GoDocs](http://godoc.org/github.com/blang/semver).
|
||||
|
||||
Why should I use this lib?
|
||||
-----
|
||||
|
||||
- Fully spec compatible
|
||||
- No reflection
|
||||
- No regex
|
||||
- Fully tested (Coverage >99%)
|
||||
- Readable parsing/validation errors
|
||||
- Fast (See [Benchmarks](#benchmarks))
|
||||
- Only Stdlib
|
||||
- Uses values instead of pointers
|
||||
- Many features, see below
|
||||
|
||||
|
||||
Features
|
||||
-----
|
||||
|
||||
- Parsing and validation at all levels
|
||||
- Comparator-like comparisons
|
||||
- Compare Helper Methods
|
||||
- InPlace manipulation
|
||||
- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1`
|
||||
- Wildcards `>=1.x`, `<=2.5.x`
|
||||
- Sortable (implements sort.Interface)
|
||||
- database/sql compatible (sql.Scanner/Valuer)
|
||||
- encoding/json compatible (json.Marshaler/Unmarshaler)
|
||||
|
||||
Ranges
|
||||
------
|
||||
|
||||
A `Range` is a set of conditions which specify which versions satisfy the range.
|
||||
|
||||
A condition is composed of an operator and a version. The supported operators are:
|
||||
|
||||
- `<1.0.0` Less than `1.0.0`
|
||||
- `<=1.0.0` Less than or equal to `1.0.0`
|
||||
- `>1.0.0` Greater than `1.0.0`
|
||||
- `>=1.0.0` Greater than or equal to `1.0.0`
|
||||
- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0`
|
||||
- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`.
|
||||
|
||||
Note that spaces between the operator and the version will be gracefully tolerated.
|
||||
|
||||
A `Range` can link multiple `Ranges` separated by space:
|
||||
|
||||
Ranges can be linked by logical AND:
|
||||
|
||||
- `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0`
|
||||
- `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2`
|
||||
|
||||
Ranges can also be linked by logical OR:
|
||||
|
||||
- `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x`
|
||||
|
||||
AND has a higher precedence than OR. It's not possible to use brackets.
|
||||
|
||||
Ranges can be combined by both AND and OR
|
||||
|
||||
- `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
|
||||
|
||||
Range usage:
|
||||
|
||||
```
|
||||
v, err := semver.Parse("1.2.3")
|
||||
range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0")
|
||||
if range(v) {
|
||||
//valid
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Example
|
||||
-----
|
||||
|
||||
Have a look at full examples in [examples/main.go](examples/main.go)
|
||||
|
||||
```go
|
||||
import github.com/blang/semver
|
||||
|
||||
v, err := semver.Make("0.0.1-alpha.preview+123.github")
|
||||
fmt.Printf("Major: %d\n", v.Major)
|
||||
fmt.Printf("Minor: %d\n", v.Minor)
|
||||
fmt.Printf("Patch: %d\n", v.Patch)
|
||||
fmt.Printf("Pre: %s\n", v.Pre)
|
||||
fmt.Printf("Build: %s\n", v.Build)
|
||||
|
||||
// Prerelease versions array
|
||||
if len(v.Pre) > 0 {
|
||||
fmt.Println("Prerelease versions:")
|
||||
for i, pre := range v.Pre {
|
||||
fmt.Printf("%d: %q\n", i, pre)
|
||||
}
|
||||
}
|
||||
|
||||
// Build meta data array
|
||||
if len(v.Build) > 0 {
|
||||
fmt.Println("Build meta data:")
|
||||
for i, build := range v.Build {
|
||||
fmt.Printf("%d: %q\n", i, build)
|
||||
}
|
||||
}
|
||||
|
||||
v001, err := semver.Make("0.0.1")
|
||||
// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE
|
||||
v001.GT(v) == true
|
||||
v.LT(v001) == true
|
||||
v.GTE(v) == true
|
||||
v.LTE(v) == true
|
||||
|
||||
// Or use v.Compare(v2) for comparisons (-1, 0, 1):
|
||||
v001.Compare(v) == 1
|
||||
v.Compare(v001) == -1
|
||||
v.Compare(v) == 0
|
||||
|
||||
// Manipulate Version in place:
|
||||
v.Pre[0], err = semver.NewPRVersion("beta")
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing pre release version: %q", err)
|
||||
}
|
||||
|
||||
fmt.Println("\nValidate versions:")
|
||||
v.Build[0] = "?"
|
||||
|
||||
err = v.Validate()
|
||||
if err != nil {
|
||||
fmt.Printf("Validation failed: %s\n", err)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
Benchmarks
|
||||
-----
|
||||
|
||||
BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op
|
||||
BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op
|
||||
BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op
|
||||
BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op
|
||||
BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op
|
||||
BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op
|
||||
BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op
|
||||
BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op
|
||||
BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op
|
||||
BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op
|
||||
BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op
|
||||
BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op
|
||||
|
||||
See benchmark cases at [semver_test.go](semver_test.go)
|
||||
|
||||
|
||||
Motivation
|
||||
-----
|
||||
|
||||
I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like.
|
||||
|
||||
|
||||
Contribution
|
||||
-----
|
||||
|
||||
Feel free to make a pull request. For bigger changes create a issue first to discuss about it.
|
||||
|
||||
|
||||
License
|
||||
-----
|
||||
|
||||
See [LICENSE](LICENSE) file.
|
|
@ -0,0 +1,23 @@
|
|||
package semver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// MarshalJSON implements the encoding/json.Marshaler interface.
|
||||
func (v Version) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(v.String())
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
|
||||
func (v *Version) UnmarshalJSON(data []byte) (err error) {
|
||||
var versionString string
|
||||
|
||||
if err = json.Unmarshal(data, &versionString); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
*v, err = Parse(versionString)
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
{
|
||||
"author": "blang",
|
||||
"bugs": {
|
||||
"URL": "https://github.com/blang/semver/issues",
|
||||
"url": "https://github.com/blang/semver/issues"
|
||||
},
|
||||
"gx": {
|
||||
"dvcsimport": "github.com/blang/semver"
|
||||
},
|
||||
"gxVersion": "0.10.0",
|
||||
"language": "go",
|
||||
"license": "MIT",
|
||||
"name": "semver",
|
||||
"releaseCmd": "git commit -a -m \"gx publish $VERSION\"",
|
||||
"version": "3.5.1"
|
||||
}
|
||||
|
|
@ -0,0 +1,416 @@
|
|||
package semver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type wildcardType int
|
||||
|
||||
const (
|
||||
noneWildcard wildcardType = iota
|
||||
majorWildcard wildcardType = 1
|
||||
minorWildcard wildcardType = 2
|
||||
patchWildcard wildcardType = 3
|
||||
)
|
||||
|
||||
func wildcardTypefromInt(i int) wildcardType {
|
||||
switch i {
|
||||
case 1:
|
||||
return majorWildcard
|
||||
case 2:
|
||||
return minorWildcard
|
||||
case 3:
|
||||
return patchWildcard
|
||||
default:
|
||||
return noneWildcard
|
||||
}
|
||||
}
|
||||
|
||||
type comparator func(Version, Version) bool
|
||||
|
||||
var (
|
||||
compEQ comparator = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) == 0
|
||||
}
|
||||
compNE = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) != 0
|
||||
}
|
||||
compGT = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) == 1
|
||||
}
|
||||
compGE = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) >= 0
|
||||
}
|
||||
compLT = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) == -1
|
||||
}
|
||||
compLE = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) <= 0
|
||||
}
|
||||
)
|
||||
|
||||
type versionRange struct {
|
||||
v Version
|
||||
c comparator
|
||||
}
|
||||
|
||||
// rangeFunc creates a Range from the given versionRange.
|
||||
func (vr *versionRange) rangeFunc() Range {
|
||||
return Range(func(v Version) bool {
|
||||
return vr.c(v, vr.v)
|
||||
})
|
||||
}
|
||||
|
||||
// Range represents a range of versions.
|
||||
// A Range can be used to check if a Version satisfies it:
|
||||
//
|
||||
// range, err := semver.ParseRange(">1.0.0 <2.0.0")
|
||||
// range(semver.MustParse("1.1.1") // returns true
|
||||
type Range func(Version) bool
|
||||
|
||||
// OR combines the existing Range with another Range using logical OR.
|
||||
func (rf Range) OR(f Range) Range {
|
||||
return Range(func(v Version) bool {
|
||||
return rf(v) || f(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AND combines the existing Range with another Range using logical AND.
|
||||
func (rf Range) AND(f Range) Range {
|
||||
return Range(func(v Version) bool {
|
||||
return rf(v) && f(v)
|
||||
})
|
||||
}
|
||||
|
||||
// ParseRange parses a range and returns a Range.
|
||||
// If the range could not be parsed an error is returned.
|
||||
//
|
||||
// Valid ranges are:
|
||||
// - "<1.0.0"
|
||||
// - "<=1.0.0"
|
||||
// - ">1.0.0"
|
||||
// - ">=1.0.0"
|
||||
// - "1.0.0", "=1.0.0", "==1.0.0"
|
||||
// - "!1.0.0", "!=1.0.0"
|
||||
//
|
||||
// A Range can consist of multiple ranges separated by space:
|
||||
// Ranges can be linked by logical AND:
|
||||
// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0"
|
||||
// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2
|
||||
//
|
||||
// Ranges can also be linked by logical OR:
|
||||
// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x"
|
||||
//
|
||||
// AND has a higher precedence than OR. It's not possible to use brackets.
|
||||
//
|
||||
// Ranges can be combined by both AND and OR
|
||||
//
|
||||
// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
|
||||
func ParseRange(s string) (Range, error) {
|
||||
parts := splitAndTrim(s)
|
||||
orParts, err := splitORParts(parts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expandedParts, err := expandWildcardVersion(orParts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var orFn Range
|
||||
for _, p := range expandedParts {
|
||||
var andFn Range
|
||||
for _, ap := range p {
|
||||
opStr, vStr, err := splitComparatorVersion(ap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vr, err := buildVersionRange(opStr, vStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err)
|
||||
}
|
||||
rf := vr.rangeFunc()
|
||||
|
||||
// Set function
|
||||
if andFn == nil {
|
||||
andFn = rf
|
||||
} else { // Combine with existing function
|
||||
andFn = andFn.AND(rf)
|
||||
}
|
||||
}
|
||||
if orFn == nil {
|
||||
orFn = andFn
|
||||
} else {
|
||||
orFn = orFn.OR(andFn)
|
||||
}
|
||||
|
||||
}
|
||||
return orFn, nil
|
||||
}
|
||||
|
||||
// splitORParts splits the already cleaned parts by '||'.
|
||||
// Checks for invalid positions of the operator and returns an
|
||||
// error if found.
|
||||
func splitORParts(parts []string) ([][]string, error) {
|
||||
var ORparts [][]string
|
||||
last := 0
|
||||
for i, p := range parts {
|
||||
if p == "||" {
|
||||
if i == 0 {
|
||||
return nil, fmt.Errorf("First element in range is '||'")
|
||||
}
|
||||
ORparts = append(ORparts, parts[last:i])
|
||||
last = i + 1
|
||||
}
|
||||
}
|
||||
if last == len(parts) {
|
||||
return nil, fmt.Errorf("Last element in range is '||'")
|
||||
}
|
||||
ORparts = append(ORparts, parts[last:])
|
||||
return ORparts, nil
|
||||
}
|
||||
|
||||
// buildVersionRange takes a slice of 2: operator and version
|
||||
// and builds a versionRange, otherwise an error.
|
||||
func buildVersionRange(opStr, vStr string) (*versionRange, error) {
|
||||
c := parseComparator(opStr)
|
||||
if c == nil {
|
||||
return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, ""))
|
||||
}
|
||||
v, err := Parse(vStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err)
|
||||
}
|
||||
|
||||
return &versionRange{
|
||||
v: v,
|
||||
c: c,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
// inArray checks if a byte is contained in an array of bytes
|
||||
func inArray(s byte, list []byte) bool {
|
||||
for _, el := range list {
|
||||
if el == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// splitAndTrim splits a range string by spaces and cleans whitespaces
|
||||
func splitAndTrim(s string) (result []string) {
|
||||
last := 0
|
||||
var lastChar byte
|
||||
excludeFromSplit := []byte{'>', '<', '='}
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) {
|
||||
if last < i-1 {
|
||||
result = append(result, s[last:i])
|
||||
}
|
||||
last = i + 1
|
||||
} else if s[i] != ' ' {
|
||||
lastChar = s[i]
|
||||
}
|
||||
}
|
||||
if last < len(s)-1 {
|
||||
result = append(result, s[last:])
|
||||
}
|
||||
|
||||
for i, v := range result {
|
||||
result[i] = strings.Replace(v, " ", "", -1)
|
||||
}
|
||||
|
||||
// parts := strings.Split(s, " ")
|
||||
// for _, x := range parts {
|
||||
// if s := strings.TrimSpace(x); len(s) != 0 {
|
||||
// result = append(result, s)
|
||||
// }
|
||||
// }
|
||||
return
|
||||
}
|
||||
|
||||
// splitComparatorVersion splits the comparator from the version.
|
||||
// Input must be free of leading or trailing spaces.
|
||||
func splitComparatorVersion(s string) (string, string, error) {
|
||||
i := strings.IndexFunc(s, unicode.IsDigit)
|
||||
if i == -1 {
|
||||
return "", "", fmt.Errorf("Could not get version from string: %q", s)
|
||||
}
|
||||
return strings.TrimSpace(s[0:i]), s[i:], nil
|
||||
}
|
||||
|
||||
// getWildcardType will return the type of wildcard that the
|
||||
// passed version contains
|
||||
func getWildcardType(vStr string) wildcardType {
|
||||
parts := strings.Split(vStr, ".")
|
||||
nparts := len(parts)
|
||||
wildcard := parts[nparts-1]
|
||||
|
||||
possibleWildcardType := wildcardTypefromInt(nparts)
|
||||
if wildcard == "x" {
|
||||
return possibleWildcardType
|
||||
}
|
||||
|
||||
return noneWildcard
|
||||
}
|
||||
|
||||
// createVersionFromWildcard will convert a wildcard version
|
||||
// into a regular version, replacing 'x's with '0's, handling
|
||||
// special cases like '1.x.x' and '1.x'
|
||||
func createVersionFromWildcard(vStr string) string {
|
||||
// handle 1.x.x
|
||||
vStr2 := strings.Replace(vStr, ".x.x", ".x", 1)
|
||||
vStr2 = strings.Replace(vStr2, ".x", ".0", 1)
|
||||
parts := strings.Split(vStr2, ".")
|
||||
|
||||
// handle 1.x
|
||||
if len(parts) == 2 {
|
||||
return vStr2 + ".0"
|
||||
}
|
||||
|
||||
return vStr2
|
||||
}
|
||||
|
||||
// incrementMajorVersion will increment the major version
|
||||
// of the passed version
|
||||
func incrementMajorVersion(vStr string) (string, error) {
|
||||
parts := strings.Split(vStr, ".")
|
||||
i, err := strconv.Atoi(parts[0])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
parts[0] = strconv.Itoa(i + 1)
|
||||
|
||||
return strings.Join(parts, "."), nil
|
||||
}
|
||||
|
||||
// incrementMajorVersion will increment the minor version
|
||||
// of the passed version
|
||||
func incrementMinorVersion(vStr string) (string, error) {
|
||||
parts := strings.Split(vStr, ".")
|
||||
i, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
parts[1] = strconv.Itoa(i + 1)
|
||||
|
||||
return strings.Join(parts, "."), nil
|
||||
}
|
||||
|
||||
// expandWildcardVersion will expand wildcards inside versions
|
||||
// following these rules:
|
||||
//
|
||||
// * when dealing with patch wildcards:
|
||||
// >= 1.2.x will become >= 1.2.0
|
||||
// <= 1.2.x will become < 1.3.0
|
||||
// > 1.2.x will become >= 1.3.0
|
||||
// < 1.2.x will become < 1.2.0
|
||||
// != 1.2.x will become < 1.2.0 >= 1.3.0
|
||||
//
|
||||
// * when dealing with minor wildcards:
|
||||
// >= 1.x will become >= 1.0.0
|
||||
// <= 1.x will become < 2.0.0
|
||||
// > 1.x will become >= 2.0.0
|
||||
// < 1.0 will become < 1.0.0
|
||||
// != 1.x will become < 1.0.0 >= 2.0.0
|
||||
//
|
||||
// * when dealing with wildcards without
|
||||
// version operator:
|
||||
// 1.2.x will become >= 1.2.0 < 1.3.0
|
||||
// 1.x will become >= 1.0.0 < 2.0.0
|
||||
func expandWildcardVersion(parts [][]string) ([][]string, error) {
|
||||
var expandedParts [][]string
|
||||
for _, p := range parts {
|
||||
var newParts []string
|
||||
for _, ap := range p {
|
||||
if strings.Index(ap, "x") != -1 {
|
||||
opStr, vStr, err := splitComparatorVersion(ap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
versionWildcardType := getWildcardType(vStr)
|
||||
flatVersion := createVersionFromWildcard(vStr)
|
||||
|
||||
var resultOperator string
|
||||
var shouldIncrementVersion bool
|
||||
switch opStr {
|
||||
case ">":
|
||||
resultOperator = ">="
|
||||
shouldIncrementVersion = true
|
||||
case ">=":
|
||||
resultOperator = ">="
|
||||
case "<":
|
||||
resultOperator = "<"
|
||||
case "<=":
|
||||
resultOperator = "<"
|
||||
shouldIncrementVersion = true
|
||||
case "", "=", "==":
|
||||
newParts = append(newParts, ">="+flatVersion)
|
||||
resultOperator = "<"
|
||||
shouldIncrementVersion = true
|
||||
case "!=", "!":
|
||||
newParts = append(newParts, "<"+flatVersion)
|
||||
resultOperator = ">="
|
||||
shouldIncrementVersion = true
|
||||
}
|
||||
|
||||
var resultVersion string
|
||||
if shouldIncrementVersion {
|
||||
switch versionWildcardType {
|
||||
case patchWildcard:
|
||||
resultVersion, _ = incrementMinorVersion(flatVersion)
|
||||
case minorWildcard:
|
||||
resultVersion, _ = incrementMajorVersion(flatVersion)
|
||||
}
|
||||
} else {
|
||||
resultVersion = flatVersion
|
||||
}
|
||||
|
||||
ap = resultOperator + resultVersion
|
||||
}
|
||||
newParts = append(newParts, ap)
|
||||
}
|
||||
expandedParts = append(expandedParts, newParts)
|
||||
}
|
||||
|
||||
return expandedParts, nil
|
||||
}
|
||||
|
||||
func parseComparator(s string) comparator {
|
||||
switch s {
|
||||
case "==":
|
||||
fallthrough
|
||||
case "":
|
||||
fallthrough
|
||||
case "=":
|
||||
return compEQ
|
||||
case ">":
|
||||
return compGT
|
||||
case ">=":
|
||||
return compGE
|
||||
case "<":
|
||||
return compLT
|
||||
case "<=":
|
||||
return compLE
|
||||
case "!":
|
||||
fallthrough
|
||||
case "!=":
|
||||
return compNE
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MustParseRange is like ParseRange but panics if the range cannot be parsed.
|
||||
func MustParseRange(s string) Range {
|
||||
r, err := ParseRange(s)
|
||||
if err != nil {
|
||||
panic(`semver: ParseRange(` + s + `): ` + err.Error())
|
||||
}
|
||||
return r
|
||||
}
|
|
@ -0,0 +1,418 @@
|
|||
package semver
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
numbers string = "0123456789"
|
||||
alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
|
||||
alphanum = alphas + numbers
|
||||
)
|
||||
|
||||
// SpecVersion is the latest fully supported spec version of semver
|
||||
var SpecVersion = Version{
|
||||
Major: 2,
|
||||
Minor: 0,
|
||||
Patch: 0,
|
||||
}
|
||||
|
||||
// Version represents a semver compatible version
|
||||
type Version struct {
|
||||
Major uint64
|
||||
Minor uint64
|
||||
Patch uint64
|
||||
Pre []PRVersion
|
||||
Build []string //No Precendence
|
||||
}
|
||||
|
||||
// Version to string
|
||||
func (v Version) String() string {
|
||||
b := make([]byte, 0, 5)
|
||||
b = strconv.AppendUint(b, v.Major, 10)
|
||||
b = append(b, '.')
|
||||
b = strconv.AppendUint(b, v.Minor, 10)
|
||||
b = append(b, '.')
|
||||
b = strconv.AppendUint(b, v.Patch, 10)
|
||||
|
||||
if len(v.Pre) > 0 {
|
||||
b = append(b, '-')
|
||||
b = append(b, v.Pre[0].String()...)
|
||||
|
||||
for _, pre := range v.Pre[1:] {
|
||||
b = append(b, '.')
|
||||
b = append(b, pre.String()...)
|
||||
}
|
||||
}
|
||||
|
||||
if len(v.Build) > 0 {
|
||||
b = append(b, '+')
|
||||
b = append(b, v.Build[0]...)
|
||||
|
||||
for _, build := range v.Build[1:] {
|
||||
b = append(b, '.')
|
||||
b = append(b, build...)
|
||||
}
|
||||
}
|
||||
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// Equals checks if v is equal to o.
|
||||
func (v Version) Equals(o Version) bool {
|
||||
return (v.Compare(o) == 0)
|
||||
}
|
||||
|
||||
// EQ checks if v is equal to o.
|
||||
func (v Version) EQ(o Version) bool {
|
||||
return (v.Compare(o) == 0)
|
||||
}
|
||||
|
||||
// NE checks if v is not equal to o.
|
||||
func (v Version) NE(o Version) bool {
|
||||
return (v.Compare(o) != 0)
|
||||
}
|
||||
|
||||
// GT checks if v is greater than o.
|
||||
func (v Version) GT(o Version) bool {
|
||||
return (v.Compare(o) == 1)
|
||||
}
|
||||
|
||||
// GTE checks if v is greater than or equal to o.
|
||||
func (v Version) GTE(o Version) bool {
|
||||
return (v.Compare(o) >= 0)
|
||||
}
|
||||
|
||||
// GE checks if v is greater than or equal to o.
|
||||
func (v Version) GE(o Version) bool {
|
||||
return (v.Compare(o) >= 0)
|
||||
}
|
||||
|
||||
// LT checks if v is less than o.
|
||||
func (v Version) LT(o Version) bool {
|
||||
return (v.Compare(o) == -1)
|
||||
}
|
||||
|
||||
// LTE checks if v is less than or equal to o.
|
||||
func (v Version) LTE(o Version) bool {
|
||||
return (v.Compare(o) <= 0)
|
||||
}
|
||||
|
||||
// LE checks if v is less than or equal to o.
|
||||
func (v Version) LE(o Version) bool {
|
||||
return (v.Compare(o) <= 0)
|
||||
}
|
||||
|
||||
// Compare compares Versions v to o:
|
||||
// -1 == v is less than o
|
||||
// 0 == v is equal to o
|
||||
// 1 == v is greater than o
|
||||
func (v Version) Compare(o Version) int {
|
||||
if v.Major != o.Major {
|
||||
if v.Major > o.Major {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
if v.Minor != o.Minor {
|
||||
if v.Minor > o.Minor {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
if v.Patch != o.Patch {
|
||||
if v.Patch > o.Patch {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Quick comparison if a version has no prerelease versions
|
||||
if len(v.Pre) == 0 && len(o.Pre) == 0 {
|
||||
return 0
|
||||
} else if len(v.Pre) == 0 && len(o.Pre) > 0 {
|
||||
return 1
|
||||
} else if len(v.Pre) > 0 && len(o.Pre) == 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
i := 0
|
||||
for ; i < len(v.Pre) && i < len(o.Pre); i++ {
|
||||
if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 {
|
||||
continue
|
||||
} else if comp == 1 {
|
||||
return 1
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// If all pr versions are the equal but one has further prversion, this one greater
|
||||
if i == len(v.Pre) && i == len(o.Pre) {
|
||||
return 0
|
||||
} else if i == len(v.Pre) && i < len(o.Pre) {
|
||||
return -1
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Validate validates v and returns error in case
|
||||
func (v Version) Validate() error {
|
||||
// Major, Minor, Patch already validated using uint64
|
||||
|
||||
for _, pre := range v.Pre {
|
||||
if !pre.IsNum { //Numeric prerelease versions already uint64
|
||||
if len(pre.VersionStr) == 0 {
|
||||
return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr)
|
||||
}
|
||||
if !containsOnly(pre.VersionStr, alphanum) {
|
||||
return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, build := range v.Build {
|
||||
if len(build) == 0 {
|
||||
return fmt.Errorf("Build meta data can not be empty %q", build)
|
||||
}
|
||||
if !containsOnly(build, alphanum) {
|
||||
return fmt.Errorf("Invalid character(s) found in build meta data %q", build)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
|
||||
func New(s string) (vp *Version, err error) {
|
||||
v, err := Parse(s)
|
||||
vp = &v
|
||||
return
|
||||
}
|
||||
|
||||
// Make is an alias for Parse, parses version string and returns a validated Version or error
|
||||
func Make(s string) (Version, error) {
|
||||
return Parse(s)
|
||||
}
|
||||
|
||||
// ParseTolerant allows for certain version specifications that do not strictly adhere to semver
|
||||
// specs to be parsed by this library. It does so by normalizing versions before passing them to
|
||||
// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions
|
||||
// with only major and minor components specified
|
||||
func ParseTolerant(s string) (Version, error) {
|
||||
s = strings.TrimSpace(s)
|
||||
s = strings.TrimPrefix(s, "v")
|
||||
|
||||
// Split into major.minor.(patch+pr+meta)
|
||||
parts := strings.SplitN(s, ".", 3)
|
||||
if len(parts) < 3 {
|
||||
if strings.ContainsAny(parts[len(parts)-1], "+-") {
|
||||
return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data")
|
||||
}
|
||||
for len(parts) < 3 {
|
||||
parts = append(parts, "0")
|
||||
}
|
||||
s = strings.Join(parts, ".")
|
||||
}
|
||||
|
||||
return Parse(s)
|
||||
}
|
||||
|
||||
// Parse parses version string and returns a validated Version or error
|
||||
func Parse(s string) (Version, error) {
|
||||
if len(s) == 0 {
|
||||
return Version{}, errors.New("Version string empty")
|
||||
}
|
||||
|
||||
// Split into major.minor.(patch+pr+meta)
|
||||
parts := strings.SplitN(s, ".", 3)
|
||||
if len(parts) != 3 {
|
||||
return Version{}, errors.New("No Major.Minor.Patch elements found")
|
||||
}
|
||||
|
||||
// Major
|
||||
if !containsOnly(parts[0], numbers) {
|
||||
return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0])
|
||||
}
|
||||
if hasLeadingZeroes(parts[0]) {
|
||||
return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0])
|
||||
}
|
||||
major, err := strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
|
||||
// Minor
|
||||
if !containsOnly(parts[1], numbers) {
|
||||
return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1])
|
||||
}
|
||||
if hasLeadingZeroes(parts[1]) {
|
||||
return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1])
|
||||
}
|
||||
minor, err := strconv.ParseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
|
||||
v := Version{}
|
||||
v.Major = major
|
||||
v.Minor = minor
|
||||
|
||||
var build, prerelease []string
|
||||
patchStr := parts[2]
|
||||
|
||||
if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 {
|
||||
build = strings.Split(patchStr[buildIndex+1:], ".")
|
||||
patchStr = patchStr[:buildIndex]
|
||||
}
|
||||
|
||||
if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 {
|
||||
prerelease = strings.Split(patchStr[preIndex+1:], ".")
|
||||
patchStr = patchStr[:preIndex]
|
||||
}
|
||||
|
||||
if !containsOnly(patchStr, numbers) {
|
||||
return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr)
|
||||
}
|
||||
if hasLeadingZeroes(patchStr) {
|
||||
return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr)
|
||||
}
|
||||
patch, err := strconv.ParseUint(patchStr, 10, 64)
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
|
||||
v.Patch = patch
|
||||
|
||||
// Prerelease
|
||||
for _, prstr := range prerelease {
|
||||
parsedPR, err := NewPRVersion(prstr)
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
v.Pre = append(v.Pre, parsedPR)
|
||||
}
|
||||
|
||||
// Build meta data
|
||||
for _, str := range build {
|
||||
if len(str) == 0 {
|
||||
return Version{}, errors.New("Build meta data is empty")
|
||||
}
|
||||
if !containsOnly(str, alphanum) {
|
||||
return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str)
|
||||
}
|
||||
v.Build = append(v.Build, str)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// MustParse is like Parse but panics if the version cannot be parsed.
|
||||
func MustParse(s string) Version {
|
||||
v, err := Parse(s)
|
||||
if err != nil {
|
||||
panic(`semver: Parse(` + s + `): ` + err.Error())
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// PRVersion represents a PreRelease Version
|
||||
type PRVersion struct {
|
||||
VersionStr string
|
||||
VersionNum uint64
|
||||
IsNum bool
|
||||
}
|
||||
|
||||
// NewPRVersion creates a new valid prerelease version
|
||||
func NewPRVersion(s string) (PRVersion, error) {
|
||||
if len(s) == 0 {
|
||||
return PRVersion{}, errors.New("Prerelease is empty")
|
||||
}
|
||||
v := PRVersion{}
|
||||
if containsOnly(s, numbers) {
|
||||
if hasLeadingZeroes(s) {
|
||||
return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s)
|
||||
}
|
||||
num, err := strconv.ParseUint(s, 10, 64)
|
||||
|
||||
// Might never be hit, but just in case
|
||||
if err != nil {
|
||||
return PRVersion{}, err
|
||||
}
|
||||
v.VersionNum = num
|
||||
v.IsNum = true
|
||||
} else if containsOnly(s, alphanum) {
|
||||
v.VersionStr = s
|
||||
v.IsNum = false
|
||||
} else {
|
||||
return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// IsNumeric checks if prerelease-version is numeric
|
||||
func (v PRVersion) IsNumeric() bool {
|
||||
return v.IsNum
|
||||
}
|
||||
|
||||
// Compare compares two PreRelease Versions v and o:
|
||||
// -1 == v is less than o
|
||||
// 0 == v is equal to o
|
||||
// 1 == v is greater than o
|
||||
func (v PRVersion) Compare(o PRVersion) int {
|
||||
if v.IsNum && !o.IsNum {
|
||||
return -1
|
||||
} else if !v.IsNum && o.IsNum {
|
||||
return 1
|
||||
} else if v.IsNum && o.IsNum {
|
||||
if v.VersionNum == o.VersionNum {
|
||||
return 0
|
||||
} else if v.VersionNum > o.VersionNum {
|
||||
return 1
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
} else { // both are Alphas
|
||||
if v.VersionStr == o.VersionStr {
|
||||
return 0
|
||||
} else if v.VersionStr > o.VersionStr {
|
||||
return 1
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PreRelease version to string
|
||||
func (v PRVersion) String() string {
|
||||
if v.IsNum {
|
||||
return strconv.FormatUint(v.VersionNum, 10)
|
||||
}
|
||||
return v.VersionStr
|
||||
}
|
||||
|
||||
func containsOnly(s string, set string) bool {
|
||||
return strings.IndexFunc(s, func(r rune) bool {
|
||||
return !strings.ContainsRune(set, r)
|
||||
}) == -1
|
||||
}
|
||||
|
||||
func hasLeadingZeroes(s string) bool {
|
||||
return len(s) > 1 && s[0] == '0'
|
||||
}
|
||||
|
||||
// NewBuildVersion creates a new valid build version
|
||||
func NewBuildVersion(s string) (string, error) {
|
||||
if len(s) == 0 {
|
||||
return "", errors.New("Buildversion is empty")
|
||||
}
|
||||
if !containsOnly(s, alphanum) {
|
||||
return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s)
|
||||
}
|
||||
return s, nil
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
package semver
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Versions represents multiple versions.
|
||||
type Versions []Version
|
||||
|
||||
// Len returns length of version collection
|
||||
func (s Versions) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// Swap swaps two versions inside the collection by its indices
|
||||
func (s Versions) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// Less checks if version at index i is less than version at index j
|
||||
func (s Versions) Less(i, j int) bool {
|
||||
return s[i].LT(s[j])
|
||||
}
|
||||
|
||||
// Sort sorts a slice of versions
|
||||
func Sort(versions []Version) {
|
||||
sort.Sort(Versions(versions))
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
package semver
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Scan implements the database/sql.Scanner interface.
|
||||
func (v *Version) Scan(src interface{}) (err error) {
|
||||
var str string
|
||||
switch src := src.(type) {
|
||||
case string:
|
||||
str = src
|
||||
case []byte:
|
||||
str = string(src)
|
||||
default:
|
||||
return fmt.Errorf("Version.Scan: cannot convert %T to string.", src)
|
||||
}
|
||||
|
||||
if t, err := Parse(str); err == nil {
|
||||
*v = t
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Value implements the database/sql/driver.Valuer interface.
|
||||
func (v Version) Value() (driver.Value, error) {
|
||||
return v.String(), nil
|
||||
}
|
|
@ -121,9 +121,11 @@ type Operation struct {
|
|||
|
||||
// The options below are more self-explanatory and affect the runtime
|
||||
// behavior of the operation.
|
||||
Destroy bool
|
||||
Targets []string
|
||||
Variables map[string]interface{}
|
||||
Destroy bool
|
||||
Targets []string
|
||||
Variables map[string]interface{}
|
||||
AutoApprove bool
|
||||
DestroyForce bool
|
||||
|
||||
// Input/output/control options.
|
||||
UIIn terraform.UIInput
|
||||
|
@ -136,8 +138,9 @@ type Operation struct {
|
|||
// The duration to retry obtaining a State lock.
|
||||
StateLockTimeout time.Duration
|
||||
|
||||
// Environment is the named state that should be loaded from the Backend.
|
||||
Environment string
|
||||
// Workspace is the name of the workspace that this operation should run
|
||||
// in, which controls which named state is used.
|
||||
Workspace string
|
||||
}
|
||||
|
||||
// RunningOperation is the result of starting an operation.
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
backendconsul "github.com/hashicorp/terraform/backend/remote-state/consul"
|
||||
backendinmem "github.com/hashicorp/terraform/backend/remote-state/inmem"
|
||||
backendS3 "github.com/hashicorp/terraform/backend/remote-state/s3"
|
||||
backendSwift "github.com/hashicorp/terraform/backend/remote-state/swift"
|
||||
)
|
||||
|
||||
// backends is the list of available backends. This is a global variable
|
||||
|
@ -37,6 +38,7 @@ func init() {
|
|||
"local": func() backend.Backend { return &backendlocal.Local{} },
|
||||
"consul": func() backend.Backend { return backendconsul.New() },
|
||||
"inmem": func() backend.Backend { return backendinmem.New() },
|
||||
"swift": func() backend.Backend { return backendSwift.New() },
|
||||
"s3": func() backend.Backend { return backendS3.New() },
|
||||
}
|
||||
|
||||
|
|
|
@ -20,8 +20,8 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
DefaultEnvDir = "terraform.tfstate.d"
|
||||
DefaultEnvFile = "environment"
|
||||
DefaultWorkspaceDir = "terraform.tfstate.d"
|
||||
DefaultWorkspaceFile = "environment"
|
||||
DefaultStateFilename = "terraform.tfstate"
|
||||
DefaultDataDir = ".terraform"
|
||||
DefaultBackupExtension = ".backup"
|
||||
|
@ -36,8 +36,8 @@ type Local struct {
|
|||
CLI cli.Ui
|
||||
CLIColor *colorstring.Colorize
|
||||
|
||||
// The State* paths are set from the CLI options, and may be left blank to
|
||||
// use the defaults. If the actual paths for the local backend state are
|
||||
// The State* paths are set from the backend config, and may be left blank
|
||||
// to use the defaults. If the actual paths for the local backend state are
|
||||
// needed, use the StatePaths method.
|
||||
//
|
||||
// StatePath is the local path where state is read from.
|
||||
|
@ -48,12 +48,12 @@ type Local struct {
|
|||
// StateBackupPath is the local path where a backup file will be written.
|
||||
// Set this to "-" to disable state backup.
|
||||
//
|
||||
// StateEnvPath is the path to the folder containing environments. This
|
||||
// defaults to DefaultEnvDir if not set.
|
||||
StatePath string
|
||||
StateOutPath string
|
||||
StateBackupPath string
|
||||
StateEnvDir string
|
||||
// StateWorkspaceDir is the path to the folder containing data for
|
||||
// non-default workspaces. This defaults to DefaultWorkspaceDir if not set.
|
||||
StatePath string
|
||||
StateOutPath string
|
||||
StateBackupPath string
|
||||
StateWorkspaceDir string
|
||||
|
||||
// We only want to create a single instance of a local state, so store them
|
||||
// here as they're loaded.
|
||||
|
@ -127,7 +127,7 @@ func (b *Local) States() ([]string, error) {
|
|||
// the listing always start with "default"
|
||||
envs := []string{backend.DefaultStateName}
|
||||
|
||||
entries, err := ioutil.ReadDir(b.stateEnvDir())
|
||||
entries, err := ioutil.ReadDir(b.stateWorkspaceDir())
|
||||
// no error if there's no envs configured
|
||||
if os.IsNotExist(err) {
|
||||
return envs, nil
|
||||
|
@ -166,7 +166,7 @@ func (b *Local) DeleteState(name string) error {
|
|||
}
|
||||
|
||||
delete(b.states, name)
|
||||
return os.RemoveAll(filepath.Join(b.stateEnvDir(), name))
|
||||
return os.RemoveAll(filepath.Join(b.stateWorkspaceDir(), name))
|
||||
}
|
||||
|
||||
func (b *Local) State(name string) (state.State, error) {
|
||||
|
@ -292,11 +292,20 @@ func (b *Local) init() {
|
|||
Default: "",
|
||||
},
|
||||
|
||||
"environment_dir": &schema.Schema{
|
||||
"workspace_dir": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "",
|
||||
},
|
||||
|
||||
"environment_dir": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "",
|
||||
ConflictsWith: []string{"workspace_dir"},
|
||||
|
||||
Deprecated: "workspace_dir should be used instead, with the same meaning",
|
||||
},
|
||||
},
|
||||
|
||||
ConfigureFunc: b.schemaConfigure,
|
||||
|
@ -318,10 +327,18 @@ func (b *Local) schemaConfigure(ctx context.Context) error {
|
|||
b.StateOutPath = path
|
||||
}
|
||||
|
||||
if raw, ok := d.GetOk("workspace_dir"); ok {
|
||||
path := raw.(string)
|
||||
if path != "" {
|
||||
b.StateWorkspaceDir = path
|
||||
}
|
||||
}
|
||||
|
||||
// Legacy name, which ConflictsWith workspace_dir
|
||||
if raw, ok := d.GetOk("environment_dir"); ok {
|
||||
path := raw.(string)
|
||||
if path != "" {
|
||||
b.StateEnvDir = path
|
||||
b.StateWorkspaceDir = path
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -344,7 +361,7 @@ func (b *Local) StatePaths(name string) (string, string, string) {
|
|||
statePath = DefaultStateFilename
|
||||
}
|
||||
} else {
|
||||
statePath = filepath.Join(b.stateEnvDir(), name, DefaultStateFilename)
|
||||
statePath = filepath.Join(b.stateWorkspaceDir(), name, DefaultStateFilename)
|
||||
}
|
||||
|
||||
if stateOutPath == "" {
|
||||
|
@ -367,7 +384,7 @@ func (b *Local) createState(name string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
stateDir := filepath.Join(b.stateEnvDir(), name)
|
||||
stateDir := filepath.Join(b.stateWorkspaceDir(), name)
|
||||
s, err := os.Stat(stateDir)
|
||||
if err == nil && s.IsDir() {
|
||||
// no need to check for os.IsNotExist, since that is covered by os.MkdirAll
|
||||
|
@ -383,30 +400,33 @@ func (b *Local) createState(name string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// stateEnvDir returns the directory where state environments are stored.
|
||||
func (b *Local) stateEnvDir() string {
|
||||
if b.StateEnvDir != "" {
|
||||
return b.StateEnvDir
|
||||
// stateWorkspaceDir returns the directory where state environments are stored.
|
||||
func (b *Local) stateWorkspaceDir() string {
|
||||
if b.StateWorkspaceDir != "" {
|
||||
return b.StateWorkspaceDir
|
||||
}
|
||||
|
||||
return DefaultEnvDir
|
||||
return DefaultWorkspaceDir
|
||||
}
|
||||
|
||||
// currentStateName returns the name of the current named state as set in the
|
||||
// configuration files.
|
||||
// If there are no configured environments, currentStateName returns "default"
|
||||
func (b *Local) currentStateName() (string, error) {
|
||||
contents, err := ioutil.ReadFile(filepath.Join(DefaultDataDir, DefaultEnvFile))
|
||||
if os.IsNotExist(err) {
|
||||
return backend.DefaultStateName, nil
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if fromFile := strings.TrimSpace(string(contents)); fromFile != "" {
|
||||
return fromFile, nil
|
||||
}
|
||||
|
||||
return backend.DefaultStateName, nil
|
||||
func (b *Local) pluginInitRequired(providerErr *terraform.ResourceProviderError) {
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
||||
strings.TrimSpace(errPluginInit)+"\n",
|
||||
providerErr)))
|
||||
}
|
||||
|
||||
// this relies on multierror to format the plugin errors below the copy
|
||||
const errPluginInit = `
|
||||
[reset][bold][yellow]Plugin reinitialization required. Please run "terraform init".[reset]
|
||||
[yellow]Reason: Could not satisfy plugin requirements.
|
||||
|
||||
Plugins are external binaries that Terraform uses to access and manipulate
|
||||
resources. The configuration provided requires plugins which can't be located,
|
||||
don't satisfy the version constraints, or are otherwise incompatible.
|
||||
|
||||
[reset][red]%s
|
||||
|
||||
[reset][yellow]Terraform automatically discovers provider requirements from your
|
||||
configuration, including providers used in child modules. To see the
|
||||
requirements and constraints from each module, run "terraform providers".
|
||||
`
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/command/clistate"
|
||||
"github.com/hashicorp/terraform/command/format"
|
||||
"github.com/hashicorp/terraform/config/module"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
|
@ -89,10 +90,73 @@ func (b *Local) opApply(
|
|||
|
||||
// Perform the plan
|
||||
log.Printf("[INFO] backend/local: apply calling Plan")
|
||||
if _, err := tfCtx.Plan(); err != nil {
|
||||
plan, err := tfCtx.Plan()
|
||||
if err != nil {
|
||||
runningOp.Err = errwrap.Wrapf("Error running plan: {{err}}", err)
|
||||
return
|
||||
}
|
||||
|
||||
trivialPlan := plan.Diff == nil || plan.Diff.Empty()
|
||||
hasUI := op.UIOut != nil && op.UIIn != nil
|
||||
if hasUI && ((op.Destroy && !op.DestroyForce) ||
|
||||
(!op.Destroy && !op.AutoApprove && !trivialPlan)) {
|
||||
var desc, query string
|
||||
if op.Destroy {
|
||||
// Default destroy message
|
||||
desc = "Terraform will delete all your managed infrastructure, as shown above.\n" +
|
||||
"There is no undo. Only 'yes' will be accepted to confirm."
|
||||
|
||||
// If targets are specified, list those to user
|
||||
if op.Targets != nil {
|
||||
var descBuffer bytes.Buffer
|
||||
descBuffer.WriteString("Terraform will delete the following infrastructure:\n")
|
||||
for _, target := range op.Targets {
|
||||
descBuffer.WriteString("\t")
|
||||
descBuffer.WriteString(target)
|
||||
descBuffer.WriteString("\n")
|
||||
}
|
||||
descBuffer.WriteString("There is no undo. Only 'yes' will be accepted to confirm")
|
||||
desc = descBuffer.String()
|
||||
}
|
||||
query = "Do you really want to destroy?"
|
||||
} else {
|
||||
desc = "Terraform will apply the changes described above.\n" +
|
||||
"Only 'yes' will be accepted to approve."
|
||||
query = "Do you want to apply these changes?"
|
||||
}
|
||||
|
||||
if !trivialPlan {
|
||||
// Display the plan of what we are going to apply/destroy.
|
||||
if op.Destroy {
|
||||
op.UIOut.Output("\n" + strings.TrimSpace(approveDestroyPlanHeader) + "\n")
|
||||
} else {
|
||||
op.UIOut.Output("\n" + strings.TrimSpace(approvePlanHeader) + "\n")
|
||||
}
|
||||
op.UIOut.Output(format.Plan(&format.PlanOpts{
|
||||
Plan: plan,
|
||||
Color: b.Colorize(),
|
||||
ModuleDepth: -1,
|
||||
}))
|
||||
}
|
||||
|
||||
v, err := op.UIIn.Input(&terraform.InputOpts{
|
||||
Id: "approve",
|
||||
Query: query,
|
||||
Description: desc,
|
||||
})
|
||||
if err != nil {
|
||||
runningOp.Err = errwrap.Wrapf("Error asking for approval: {{err}}", err)
|
||||
return
|
||||
}
|
||||
if v != "yes" {
|
||||
if op.Destroy {
|
||||
runningOp.Err = errors.New("Destroy cancelled.")
|
||||
} else {
|
||||
runningOp.Err = errors.New("Apply cancelled.")
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Setup our hook for continuous state updates
|
||||
|
@ -186,7 +250,8 @@ func (b *Local) opApply(
|
|||
countHook.Removed)))
|
||||
}
|
||||
|
||||
if countHook.Added > 0 || countHook.Changed > 0 {
|
||||
// only show the state file help message if the state is local.
|
||||
if (countHook.Added > 0 || countHook.Changed > 0) && b.StateOutPath != "" {
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
||||
"[reset]\n"+
|
||||
"The state of your infrastructure has been saved to the path\n"+
|
||||
|
@ -288,3 +353,17 @@ Terraform encountered an error attempting to save the state before canceling
|
|||
the current operation. Once the operation is complete another attempt will be
|
||||
made to save the final state.
|
||||
`
|
||||
|
||||
const approvePlanHeader = `
|
||||
The Terraform execution plan has been generated and is shown below.
|
||||
Resources are shown in alphabetical order for quick scanning. Green resources
|
||||
will be created (or destroyed and then created if an existing resource
|
||||
exists), yellow resources are being changed in-place, and red resources
|
||||
will be destroyed. Cyan entries are data sources to be read.
|
||||
`
|
||||
|
||||
const approveDestroyPlanHeader = `
|
||||
The Terraform destroy plan has been generated and is shown below.
|
||||
Resources are shown in alphabetical order for quick scanning.
|
||||
Resources shown in red will be destroyed.
|
||||
`
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package local
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
@ -23,7 +24,7 @@ func (b *Local) Context(op *backend.Operation) (*terraform.Context, state.State,
|
|||
|
||||
func (b *Local) context(op *backend.Operation) (*terraform.Context, state.State, error) {
|
||||
// Get the state.
|
||||
s, err := b.State(op.Environment)
|
||||
s, err := b.State(op.Workspace)
|
||||
if err != nil {
|
||||
return nil, nil, errwrap.Wrapf("Error loading state: {{err}}", err)
|
||||
}
|
||||
|
@ -48,6 +49,10 @@ func (b *Local) context(op *backend.Operation) (*terraform.Context, state.State,
|
|||
}
|
||||
|
||||
// Load our state
|
||||
// By the time we get here, the backend creation code in "command" took
|
||||
// care of making s.State() return a state compatible with our plan,
|
||||
// if any, so we can safely pass this value in both the plan context
|
||||
// and new context cases below.
|
||||
opts.State = s.State()
|
||||
|
||||
// Build the context
|
||||
|
@ -57,6 +62,15 @@ func (b *Local) context(op *backend.Operation) (*terraform.Context, state.State,
|
|||
} else {
|
||||
tfCtx, err = terraform.NewContext(&opts)
|
||||
}
|
||||
|
||||
// any errors resolving plugins returns this
|
||||
if rpe, ok := err.(*terraform.ResourceProviderError); ok {
|
||||
b.pluginInitRequired(rpe)
|
||||
// we wrote the full UI error here, so return a generic error for flow
|
||||
// control in the command.
|
||||
return nil, nil, errors.New("error satisfying plugin requirements")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
|
|
@ -18,11 +18,11 @@ import (
|
|||
func TestLocal(t *testing.T) *Local {
|
||||
tempDir := testTempDir(t)
|
||||
return &Local{
|
||||
StatePath: filepath.Join(tempDir, "state.tfstate"),
|
||||
StateOutPath: filepath.Join(tempDir, "state.tfstate"),
|
||||
StateBackupPath: filepath.Join(tempDir, "state.tfstate.bak"),
|
||||
StateEnvDir: filepath.Join(tempDir, "state.tfstate.d"),
|
||||
ContextOpts: &terraform.ContextOpts{},
|
||||
StatePath: filepath.Join(tempDir, "state.tfstate"),
|
||||
StateOutPath: filepath.Join(tempDir, "state.tfstate"),
|
||||
StateBackupPath: filepath.Join(tempDir, "state.tfstate.bak"),
|
||||
StateWorkspaceDir: filepath.Join(tempDir, "state.tfstate.d"),
|
||||
ContextOpts: &terraform.ContextOpts{},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -47,14 +47,13 @@ func TestLocalProvider(t *testing.T, b *Local, name string) *terraform.MockResou
|
|||
if b.ContextOpts == nil {
|
||||
b.ContextOpts = &terraform.ContextOpts{}
|
||||
}
|
||||
if b.ContextOpts.Providers == nil {
|
||||
b.ContextOpts.Providers = make(map[string]terraform.ResourceProviderFactory)
|
||||
}
|
||||
|
||||
// Setup our provider
|
||||
b.ContextOpts.Providers[name] = func() (terraform.ResourceProvider, error) {
|
||||
return p, nil
|
||||
}
|
||||
b.ContextOpts.ProviderResolver = terraform.ResourceProviderResolverFixed(
|
||||
map[string]terraform.ResourceProviderFactory{
|
||||
name: terraform.ResourceProviderFactoryFixed(p),
|
||||
},
|
||||
)
|
||||
|
||||
return p
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package consul
|
|||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
@ -20,6 +21,15 @@ import (
|
|||
const (
|
||||
lockSuffix = "/.lock"
|
||||
lockInfoSuffix = "/.lockinfo"
|
||||
|
||||
// The Session TTL associated with this lock.
|
||||
lockSessionTTL = "15s"
|
||||
|
||||
// the delay time from when a session is lost to when the
|
||||
// lock is released by the server
|
||||
lockDelay = 5 * time.Second
|
||||
// interval between attempts to reacquire a lost lock
|
||||
lockReacquireInterval = 2 * time.Second
|
||||
)
|
||||
|
||||
// RemoteClient is a remote client that stores data in Consul.
|
||||
|
@ -44,9 +54,15 @@ type RemoteClient struct {
|
|||
|
||||
info *state.LockInfo
|
||||
|
||||
// cancel the goroutine which is monitoring the lock.
|
||||
monitorCancel chan struct{}
|
||||
monitorDone chan struct{}
|
||||
// cancel our goroutine which is monitoring the lock to automatically
|
||||
// reacquire it when possible.
|
||||
monitorCancel context.CancelFunc
|
||||
monitorWG sync.WaitGroup
|
||||
|
||||
// sessionCancel cancels the Context use for session.RenewPeriodic, and is
|
||||
// called when unlocking, or before creating a new lock if the lock is
|
||||
// lost.
|
||||
sessionCancel context.CancelFunc
|
||||
}
|
||||
|
||||
func (c *RemoteClient) Get() (*remote.Payload, error) {
|
||||
|
@ -202,25 +218,41 @@ func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) {
|
|||
return c.lock()
|
||||
}
|
||||
|
||||
// called after a lock is acquired
|
||||
var testLockHook func()
|
||||
|
||||
// the lock implementation.
|
||||
// Only to be called while holding Client.mu
|
||||
func (c *RemoteClient) lock() (string, error) {
|
||||
if c.consulLock == nil {
|
||||
opts := &consulapi.LockOptions{
|
||||
Key: c.Path + lockSuffix,
|
||||
// only wait briefly, so terraform has the choice to fail fast or
|
||||
// retry as needed.
|
||||
LockWaitTime: time.Second,
|
||||
LockTryOnce: true,
|
||||
}
|
||||
// We create a new session here, so it can be canceled when the lock is
|
||||
// lost or unlocked.
|
||||
lockSession, err := c.createSession()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
lock, err := c.Client.LockOpts(opts)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
opts := &consulapi.LockOptions{
|
||||
Key: c.Path + lockSuffix,
|
||||
Session: lockSession,
|
||||
|
||||
c.consulLock = lock
|
||||
// only wait briefly, so terraform has the choice to fail fast or
|
||||
// retry as needed.
|
||||
LockWaitTime: time.Second,
|
||||
LockTryOnce: true,
|
||||
|
||||
// Don't let the lock monitor give up right away, as it's possible the
|
||||
// session is still OK. While the session is refreshed at a rate of
|
||||
// TTL/2, the lock monitor is an idle blocking request and is more
|
||||
// susceptible to being closed by a lower network layer.
|
||||
MonitorRetries: 5,
|
||||
//
|
||||
// The delay between lock monitor retries.
|
||||
// While the session has a 15s TTL plus a 5s wait period on a lost
|
||||
// lock, if we can't get our lock back in 10+ seconds something is
|
||||
// wrong so we're going to drop the session and start over.
|
||||
MonitorRetryTime: 2 * time.Second,
|
||||
}
|
||||
|
||||
c.consulLock, err = c.Client.LockOpts(opts)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
lockErr := &state.LockError{}
|
||||
|
@ -239,6 +271,7 @@ func (c *RemoteClient) lock() (string, error) {
|
|||
}
|
||||
|
||||
lockErr.Info = lockInfo
|
||||
|
||||
return "", lockErr
|
||||
}
|
||||
|
||||
|
@ -257,16 +290,22 @@ func (c *RemoteClient) lock() (string, error) {
|
|||
// If we lose the lock to due communication issues with the consul agent,
|
||||
// attempt to immediately reacquire the lock. Put will verify the integrity
|
||||
// of the state by using a CAS operation.
|
||||
c.monitorCancel = make(chan struct{})
|
||||
c.monitorDone = make(chan struct{})
|
||||
go func(cancel, done chan struct{}) {
|
||||
defer func() {
|
||||
close(done)
|
||||
}()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
c.monitorCancel = cancel
|
||||
c.monitorWG.Add(1)
|
||||
go func() {
|
||||
defer c.monitorWG.Done()
|
||||
select {
|
||||
case <-c.lockCh:
|
||||
log.Println("[ERROR] lost consul lock")
|
||||
for {
|
||||
c.mu.Lock()
|
||||
// We lost our lock, so we need to cancel the session too.
|
||||
// The CancelFunc is only replaced while holding Client.mu, so
|
||||
// this is safe to call here. This will be replaced by the
|
||||
// lock() call below.
|
||||
c.sessionCancel()
|
||||
|
||||
c.consulLock = nil
|
||||
_, err := c.lock()
|
||||
c.mu.Unlock()
|
||||
|
@ -276,11 +315,11 @@ func (c *RemoteClient) lock() (string, error) {
|
|||
// terraform is running. There may be changes in progress,
|
||||
// so there's no use in aborting. Either we eventually
|
||||
// reacquire the lock, or a Put will fail on a CAS.
|
||||
log.Printf("[ERROR] attempting to reacquire lock: %s", err)
|
||||
time.Sleep(time.Second)
|
||||
log.Printf("[ERROR] could not reacquire lock: %s", err)
|
||||
time.Sleep(lockReacquireInterval)
|
||||
|
||||
select {
|
||||
case <-cancel:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
@ -292,10 +331,10 @@ func (c *RemoteClient) lock() (string, error) {
|
|||
return
|
||||
}
|
||||
|
||||
case <-cancel:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}(c.monitorCancel, c.monitorDone)
|
||||
}()
|
||||
|
||||
if testLockHook != nil {
|
||||
testLockHook()
|
||||
|
@ -304,6 +343,42 @@ func (c *RemoteClient) lock() (string, error) {
|
|||
return c.info.ID, nil
|
||||
}
|
||||
|
||||
// called after a lock is acquired
|
||||
var testLockHook func()
|
||||
|
||||
func (c *RemoteClient) createSession() (string, error) {
|
||||
// create the context first. Even if the session creation fails, we assume
|
||||
// that the CancelFunc is always callable.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
c.sessionCancel = cancel
|
||||
|
||||
session := c.Client.Session()
|
||||
se := &consulapi.SessionEntry{
|
||||
Name: consulapi.DefaultLockSessionName,
|
||||
TTL: lockSessionTTL,
|
||||
LockDelay: lockDelay,
|
||||
}
|
||||
|
||||
id, _, err := session.Create(se, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
log.Println("[INFO] created consul lock session", id)
|
||||
|
||||
// keep the session renewed
|
||||
// we need an adapter to convert the session Done() channel to a
|
||||
// non-directional channel to satisfy the RenewPeriodic signature.
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
close(done)
|
||||
}()
|
||||
go session.RenewPeriodic(lockSessionTTL, id, nil, done)
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (c *RemoteClient) Unlock(id string) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
@ -315,17 +390,27 @@ func (c *RemoteClient) Unlock(id string) error {
|
|||
return c.unlock(id)
|
||||
}
|
||||
|
||||
// the unlock implementation.
|
||||
// Only to be called while holding Client.mu
|
||||
func (c *RemoteClient) unlock(id string) error {
|
||||
// cancel our monitoring goroutine
|
||||
if c.monitorCancel != nil {
|
||||
close(c.monitorCancel)
|
||||
}
|
||||
|
||||
// this doesn't use the lock id, because the lock is tied to the consul client.
|
||||
if c.consulLock == nil || c.lockCh == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// cancel our monitoring goroutine
|
||||
c.monitorCancel()
|
||||
|
||||
defer func() {
|
||||
c.consulLock = nil
|
||||
|
||||
// The consul session is only used for this single lock, so cancel it
|
||||
// after we unlock.
|
||||
// The session is only created and replaced holding Client.mu, so the
|
||||
// CancelFunc must be non-nil.
|
||||
c.sessionCancel()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-c.lockCh:
|
||||
return errors.New("consul lock was lost")
|
||||
|
@ -344,9 +429,9 @@ func (c *RemoteClient) unlock(id string) error {
|
|||
errs = multierror.Append(errs, err)
|
||||
}
|
||||
|
||||
// the monitoring goroutine may be in a select on this chan, so we need to
|
||||
// the monitoring goroutine may be in a select on the lockCh, so we need to
|
||||
// wait for it to return before changing the value.
|
||||
<-c.monitorDone
|
||||
c.monitorWG.Wait()
|
||||
c.lockCh = nil
|
||||
|
||||
// This is only cleanup, and will fail if the lock was immediately taken by
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
|
||||
terraformAWS "github.com/hashicorp/terraform/builtin/providers/aws"
|
||||
terraformAWS "github.com/terraform-providers/terraform-provider-aws/aws"
|
||||
)
|
||||
|
||||
// New creates a new backend for S3 remote state.
|
||||
|
@ -139,6 +139,13 @@ func New() backend.Backend {
|
|||
Description: "The permissions applied when assuming a role.",
|
||||
Default: "",
|
||||
},
|
||||
|
||||
"workspace_key_prefix": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The prefix applied to the non-default state path inside the bucket",
|
||||
Default: "env:",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -160,6 +167,7 @@ type Backend struct {
|
|||
acl string
|
||||
kmsKeyID string
|
||||
ddbTable string
|
||||
workspaceKeyPrefix string
|
||||
}
|
||||
|
||||
func (b *Backend) configure(ctx context.Context) error {
|
||||
|
@ -175,6 +183,7 @@ func (b *Backend) configure(ctx context.Context) error {
|
|||
b.serverSideEncryption = data.Get("encrypt").(bool)
|
||||
b.acl = data.Get("acl").(string)
|
||||
b.kmsKeyID = data.Get("kms_key_id").(string)
|
||||
b.workspaceKeyPrefix = data.Get("workspace_key_prefix").(string)
|
||||
|
||||
b.ddbTable = data.Get("dynamodb_table").(string)
|
||||
if b.ddbTable == "" {
|
||||
|
|
34
vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend_state.go
generated
vendored
34
vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend_state.go
generated
vendored
|
@ -14,16 +14,10 @@ import (
|
|||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
const (
|
||||
// This will be used as directory name, the odd looking colon is simply to
|
||||
// reduce the chance of name conflicts with existing objects.
|
||||
keyEnvPrefix = "env:"
|
||||
)
|
||||
|
||||
func (b *Backend) States() ([]string, error) {
|
||||
params := &s3.ListObjectsInput{
|
||||
Bucket: &b.bucketName,
|
||||
Prefix: aws.String(keyEnvPrefix + "/"),
|
||||
Prefix: aws.String(b.workspaceKeyPrefix + "/"),
|
||||
}
|
||||
|
||||
resp, err := b.s3Client.ListObjects(params)
|
||||
|
@ -53,7 +47,7 @@ func (b *Backend) keyEnv(key string) string {
|
|||
}
|
||||
|
||||
// shouldn't happen since we listed by prefix
|
||||
if parts[0] != keyEnvPrefix {
|
||||
if parts[0] != b.workspaceKeyPrefix {
|
||||
return ""
|
||||
}
|
||||
|
||||
|
@ -70,20 +64,16 @@ func (b *Backend) DeleteState(name string) error {
|
|||
return fmt.Errorf("can't delete default state")
|
||||
}
|
||||
|
||||
params := &s3.DeleteObjectInput{
|
||||
Bucket: &b.bucketName,
|
||||
Key: aws.String(b.path(name)),
|
||||
}
|
||||
|
||||
_, err := b.s3Client.DeleteObject(params)
|
||||
client, err := b.remoteClient(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return client.Delete()
|
||||
}
|
||||
|
||||
func (b *Backend) State(name string) (state.State, error) {
|
||||
// get a remote client configured for this state
|
||||
func (b *Backend) remoteClient(name string) (*RemoteClient, error) {
|
||||
if name == "" {
|
||||
return nil, errors.New("missing state name")
|
||||
}
|
||||
|
@ -99,8 +89,16 @@ func (b *Backend) State(name string) (state.State, error) {
|
|||
ddbTable: b.ddbTable,
|
||||
}
|
||||
|
||||
stateMgr := &remote.State{Client: client}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (b *Backend) State(name string) (state.State, error) {
|
||||
client, err := b.remoteClient(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stateMgr := &remote.State{Client: client}
|
||||
// Check to see if this state already exists.
|
||||
// If we're trying to force-unlock a state, we can't take the lock before
|
||||
// fetching the state. If the state doesn't exist, we have to assume this
|
||||
|
@ -179,7 +177,7 @@ func (b *Backend) path(name string) string {
|
|||
return b.keyName
|
||||
}
|
||||
|
||||
return strings.Join([]string{keyEnvPrefix, name, b.keyName}, "/")
|
||||
return strings.Join([]string{b.workspaceKeyPrefix, name, b.keyName}, "/")
|
||||
}
|
||||
|
||||
const errStateUnlock = `
|
||||
|
|
325
vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend.go
generated
vendored
Normal file
325
vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend.go
generated
vendored
Normal file
|
@ -0,0 +1,325 @@
|
|||
package swift
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
tf_openstack "github.com/terraform-providers/terraform-provider-openstack/openstack"
|
||||
)
|
||||
|
||||
// New creates a new backend for Swift remote state.
|
||||
func New() backend.Backend {
|
||||
s := &schema.Backend{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"auth_url": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_URL", nil),
|
||||
Description: descriptions["auth_url"],
|
||||
},
|
||||
|
||||
"user_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_USER_ID", ""),
|
||||
Description: descriptions["user_name"],
|
||||
},
|
||||
|
||||
"user_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_USERNAME", ""),
|
||||
Description: descriptions["user_name"],
|
||||
},
|
||||
|
||||
"tenant_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
|
||||
"OS_TENANT_ID",
|
||||
"OS_PROJECT_ID",
|
||||
}, ""),
|
||||
Description: descriptions["tenant_id"],
|
||||
},
|
||||
|
||||
"tenant_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
|
||||
"OS_TENANT_NAME",
|
||||
"OS_PROJECT_NAME",
|
||||
}, ""),
|
||||
Description: descriptions["tenant_name"],
|
||||
},
|
||||
|
||||
"password": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Sensitive: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_PASSWORD", ""),
|
||||
Description: descriptions["password"],
|
||||
},
|
||||
|
||||
"token": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_TOKEN", ""),
|
||||
Description: descriptions["token"],
|
||||
},
|
||||
|
||||
"domain_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
|
||||
"OS_USER_DOMAIN_ID",
|
||||
"OS_PROJECT_DOMAIN_ID",
|
||||
"OS_DOMAIN_ID",
|
||||
}, ""),
|
||||
Description: descriptions["domain_id"],
|
||||
},
|
||||
|
||||
"domain_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
|
||||
"OS_USER_DOMAIN_NAME",
|
||||
"OS_PROJECT_DOMAIN_NAME",
|
||||
"OS_DOMAIN_NAME",
|
||||
"OS_DEFAULT_DOMAIN",
|
||||
}, ""),
|
||||
Description: descriptions["domain_name"],
|
||||
},
|
||||
|
||||
"region_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""),
|
||||
Description: descriptions["region_name"],
|
||||
},
|
||||
|
||||
"insecure": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_INSECURE", ""),
|
||||
Description: descriptions["insecure"],
|
||||
},
|
||||
|
||||
"endpoint_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_ENDPOINT_TYPE", ""),
|
||||
},
|
||||
|
||||
"cacert_file": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_CACERT", ""),
|
||||
Description: descriptions["cacert_file"],
|
||||
},
|
||||
|
||||
"cert": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_CERT", ""),
|
||||
Description: descriptions["cert"],
|
||||
},
|
||||
|
||||
"key": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_KEY", ""),
|
||||
Description: descriptions["key"],
|
||||
},
|
||||
|
||||
"path": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: descriptions["path"],
|
||||
Deprecated: "Use container instead",
|
||||
ConflictsWith: []string{"container"},
|
||||
},
|
||||
|
||||
"container": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: descriptions["container"],
|
||||
},
|
||||
|
||||
"archive_path": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: descriptions["archive_path"],
|
||||
Deprecated: "Use archive_container instead",
|
||||
ConflictsWith: []string{"archive_container"},
|
||||
},
|
||||
|
||||
"archive_container": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: descriptions["archive_container"],
|
||||
},
|
||||
|
||||
"expire_after": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: descriptions["expire_after"],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := &Backend{Backend: s}
|
||||
result.Backend.ConfigureFunc = result.configure
|
||||
return result
|
||||
}
|
||||
|
||||
var descriptions map[string]string
|
||||
|
||||
func init() {
|
||||
descriptions = map[string]string{
|
||||
"auth_url": "The Identity authentication URL.",
|
||||
|
||||
"user_name": "Username to login with.",
|
||||
|
||||
"user_id": "User ID to login with.",
|
||||
|
||||
"tenant_id": "The ID of the Tenant (Identity v2) or Project (Identity v3)\n" +
|
||||
"to login with.",
|
||||
|
||||
"tenant_name": "The name of the Tenant (Identity v2) or Project (Identity v3)\n" +
|
||||
"to login with.",
|
||||
|
||||
"password": "Password to login with.",
|
||||
|
||||
"token": "Authentication token to use as an alternative to username/password.",
|
||||
|
||||
"domain_id": "The ID of the Domain to scope to (Identity v3).",
|
||||
|
||||
"domain_name": "The name of the Domain to scope to (Identity v3).",
|
||||
|
||||
"region_name": "The name of the Region to use.",
|
||||
|
||||
"insecure": "Trust self-signed certificates.",
|
||||
|
||||
"cacert_file": "A Custom CA certificate.",
|
||||
|
||||
"endpoint_type": "The catalog endpoint type to use.",
|
||||
|
||||
"cert": "A client certificate to authenticate with.",
|
||||
|
||||
"key": "A client private key to authenticate with.",
|
||||
|
||||
"path": "Swift container path to use.",
|
||||
|
||||
"container": "Swift container to create",
|
||||
|
||||
"archive_path": "Swift container path to archive state to.",
|
||||
|
||||
"archive_container": "Swift container to archive state to.",
|
||||
|
||||
"expire_after": "Archive object expiry duration.",
|
||||
}
|
||||
}
|
||||
|
||||
type Backend struct {
|
||||
*schema.Backend
|
||||
|
||||
// Fields below are set from configure
|
||||
client *gophercloud.ServiceClient
|
||||
archive bool
|
||||
archiveContainer string
|
||||
expireSecs int
|
||||
container string
|
||||
}
|
||||
|
||||
func (b *Backend) configure(ctx context.Context) error {
|
||||
if b.client != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Grab the resource data
|
||||
data := schema.FromContextBackendConfig(ctx)
|
||||
|
||||
config := &tf_openstack.Config{
|
||||
CACertFile: data.Get("cacert_file").(string),
|
||||
ClientCertFile: data.Get("cert").(string),
|
||||
ClientKeyFile: data.Get("key").(string),
|
||||
DomainID: data.Get("domain_id").(string),
|
||||
DomainName: data.Get("domain_name").(string),
|
||||
EndpointType: data.Get("endpoint_type").(string),
|
||||
IdentityEndpoint: data.Get("auth_url").(string),
|
||||
Insecure: data.Get("insecure").(bool),
|
||||
Password: data.Get("password").(string),
|
||||
Token: data.Get("token").(string),
|
||||
TenantID: data.Get("tenant_id").(string),
|
||||
TenantName: data.Get("tenant_name").(string),
|
||||
Username: data.Get("user_name").(string),
|
||||
UserID: data.Get("user_id").(string),
|
||||
}
|
||||
|
||||
if err := config.LoadAndValidate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Assign Container
|
||||
b.container = data.Get("container").(string)
|
||||
if b.container == "" {
|
||||
// Check deprecated field
|
||||
b.container = data.Get("path").(string)
|
||||
}
|
||||
|
||||
// Enable object archiving?
|
||||
if archiveContainer, ok := data.GetOk("archive_container"); ok {
|
||||
log.Printf("[DEBUG] Archive_container set, enabling object versioning")
|
||||
b.archive = true
|
||||
b.archiveContainer = archiveContainer.(string)
|
||||
} else if archivePath, ok := data.GetOk("archive_path"); ok {
|
||||
log.Printf("[DEBUG] Archive_path set, enabling object versioning")
|
||||
b.archive = true
|
||||
b.archiveContainer = archivePath.(string)
|
||||
}
|
||||
|
||||
// Enable object expiry?
|
||||
if expireRaw, ok := data.GetOk("expire_after"); ok {
|
||||
expire := expireRaw.(string)
|
||||
log.Printf("[DEBUG] Requested that remote state expires after %s", expire)
|
||||
|
||||
if strings.HasSuffix(expire, "d") {
|
||||
log.Printf("[DEBUG] Got a days expire after duration. Converting to hours")
|
||||
days, err := strconv.Atoi(expire[:len(expire)-1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error converting expire_after value %s to int: %s", expire, err)
|
||||
}
|
||||
|
||||
expire = fmt.Sprintf("%dh", days*24)
|
||||
log.Printf("[DEBUG] Expire after %s hours", expire)
|
||||
}
|
||||
|
||||
expireDur, err := time.ParseDuration(expire)
|
||||
if err != nil {
|
||||
log.Printf("[DEBUG] Error parsing duration %s: %s", expire, err)
|
||||
return fmt.Errorf("Error parsing expire_after duration '%s': %s", expire, err)
|
||||
}
|
||||
log.Printf("[DEBUG] Seconds duration = %d", int(expireDur.Seconds()))
|
||||
b.expireSecs = int(expireDur.Seconds())
|
||||
}
|
||||
|
||||
objClient, err := openstack.NewObjectStorageV1(config.OsClient, gophercloud.EndpointOpts{
|
||||
Region: data.Get("region_name").(string),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.client = objClient
|
||||
|
||||
return nil
|
||||
}
|
31
vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend_state.go
generated
vendored
Normal file
31
vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend_state.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
package swift
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
)
|
||||
|
||||
func (b *Backend) States() ([]string, error) {
|
||||
return nil, backend.ErrNamedStatesNotSupported
|
||||
}
|
||||
|
||||
func (b *Backend) DeleteState(name string) error {
|
||||
return backend.ErrNamedStatesNotSupported
|
||||
}
|
||||
|
||||
func (b *Backend) State(name string) (state.State, error) {
|
||||
if name != backend.DefaultStateName {
|
||||
return nil, backend.ErrNamedStatesNotSupported
|
||||
}
|
||||
|
||||
client := &RemoteClient{
|
||||
client: b.client,
|
||||
container: b.container,
|
||||
archive: b.archive,
|
||||
archiveContainer: b.archiveContainer,
|
||||
expireSecs: b.expireSecs,
|
||||
}
|
||||
|
||||
return &remote.State{Client: client}, nil
|
||||
}
|
115
vendor/github.com/hashicorp/terraform/backend/remote-state/swift/client.go
generated
vendored
Normal file
115
vendor/github.com/hashicorp/terraform/backend/remote-state/swift/client.go
generated
vendored
Normal file
|
@ -0,0 +1,115 @@
|
|||
package swift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers"
|
||||
"github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects"
|
||||
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
)
|
||||
|
||||
const (
|
||||
TFSTATE_NAME = "tfstate.tf"
|
||||
TFSTATE_LOCK_NAME = "tfstate.lock"
|
||||
)
|
||||
|
||||
// RemoteClient implements the Client interface for an Openstack Swift server.
|
||||
type RemoteClient struct {
|
||||
client *gophercloud.ServiceClient
|
||||
container string
|
||||
archive bool
|
||||
archiveContainer string
|
||||
expireSecs int
|
||||
}
|
||||
|
||||
func (c *RemoteClient) Get() (*remote.Payload, error) {
|
||||
log.Printf("[DEBUG] Getting object %s in container %s", TFSTATE_NAME, c.container)
|
||||
result := objects.Download(c.client, c.container, TFSTATE_NAME, nil)
|
||||
|
||||
// Extract any errors from result
|
||||
_, err := result.Extract()
|
||||
|
||||
// 404 response is to be expected if the object doesn't already exist!
|
||||
if _, ok := err.(gophercloud.ErrDefault404); ok {
|
||||
log.Println("[DEBUG] Object doesn't exist to download.")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
bytes, err := result.ExtractContent()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hash := md5.Sum(bytes)
|
||||
payload := &remote.Payload{
|
||||
Data: bytes,
|
||||
MD5: hash[:md5.Size],
|
||||
}
|
||||
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
func (c *RemoteClient) Put(data []byte) error {
|
||||
if err := c.ensureContainerExists(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Putting object %s in container %s", TFSTATE_NAME, c.container)
|
||||
reader := bytes.NewReader(data)
|
||||
createOpts := objects.CreateOpts{
|
||||
Content: reader,
|
||||
}
|
||||
|
||||
if c.expireSecs != 0 {
|
||||
log.Printf("[DEBUG] ExpireSecs = %d", c.expireSecs)
|
||||
createOpts.DeleteAfter = c.expireSecs
|
||||
}
|
||||
|
||||
result := objects.Create(c.client, c.container, TFSTATE_NAME, createOpts)
|
||||
|
||||
return result.Err
|
||||
}
|
||||
|
||||
func (c *RemoteClient) Delete() error {
|
||||
log.Printf("[DEBUG] Deleting object %s in container %s", TFSTATE_NAME, c.container)
|
||||
result := objects.Delete(c.client, c.container, TFSTATE_NAME, nil)
|
||||
return result.Err
|
||||
}
|
||||
|
||||
func (c *RemoteClient) ensureContainerExists() error {
|
||||
containerOpts := &containers.CreateOpts{}
|
||||
|
||||
if c.archive {
|
||||
log.Printf("[DEBUG] Creating archive container %s", c.archiveContainer)
|
||||
result := containers.Create(c.client, c.archiveContainer, nil)
|
||||
if result.Err != nil {
|
||||
log.Printf("[DEBUG] Error creating archive container %s: %s", c.archiveContainer, result.Err)
|
||||
return result.Err
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Enabling Versioning on container %s", c.container)
|
||||
containerOpts.VersionsLocation = c.archiveContainer
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Creating container %s", c.container)
|
||||
result := containers.Create(c.client, c.container, containerOpts)
|
||||
if result.Err != nil {
|
||||
return result.Err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func multiEnv(ks []string) string {
|
||||
for _, k := range ks {
|
||||
if v := os.Getenv(k); v != "" {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
|
@ -92,11 +92,20 @@ func testBackendStates(t *testing.T, b Backend) {
|
|||
// start with a fresh state, and record the lineage being
|
||||
// written to "bar"
|
||||
barState := terraform.NewState()
|
||||
|
||||
// creating the named state may have created a lineage, so use that if it exists.
|
||||
if s := bar.State(); s != nil && s.Lineage != "" {
|
||||
barState.Lineage = s.Lineage
|
||||
}
|
||||
barLineage := barState.Lineage
|
||||
|
||||
// the foo lineage should be distinct from bar, and unchanged after
|
||||
// modifying bar
|
||||
fooState := terraform.NewState()
|
||||
// creating the named state may have created a lineage, so use that if it exists.
|
||||
if s := foo.State(); s != nil && s.Lineage != "" {
|
||||
fooState.Lineage = s.Lineage
|
||||
}
|
||||
fooLineage := fooState.Lineage
|
||||
|
||||
// write a known state to foo
|
||||
|
@ -187,6 +196,24 @@ func testBackendStates(t *testing.T, b Backend) {
|
|||
t.Fatal("expected error")
|
||||
}
|
||||
|
||||
// Create and delete the foo state again.
|
||||
// Make sure that there are no leftover artifacts from a deleted state
|
||||
// preventing re-creation.
|
||||
foo, err = b.State("foo")
|
||||
if err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
if err := foo.RefreshState(); err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
if v := foo.State(); v.HasResources() {
|
||||
t.Fatalf("should be empty: %s", v)
|
||||
}
|
||||
// and delete it again
|
||||
if err := b.DeleteState("foo"); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
// Verify deletion
|
||||
{
|
||||
states, err := b.States()
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
func isAWSErr(err error, code string, message string) bool {
|
||||
if err, ok := err.(awserr.Error); ok {
|
||||
return err.Code() == code && strings.Contains(err.Message(), message)
|
||||
}
|
||||
return false
|
||||
}
|
67
vendor/github.com/hashicorp/terraform/builtin/providers/aws/data_source_aws_iam_role.go
generated
vendored
67
vendor/github.com/hashicorp/terraform/builtin/providers/aws/data_source_aws_iam_role.go
generated
vendored
|
@ -1,67 +0,0 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func dataSourceAwsIAMRole() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Read: dataSourceAwsIAMRoleRead,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"arn": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"assume_role_policy_document": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"path": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"role_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"role_name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func dataSourceAwsIAMRoleRead(d *schema.ResourceData, meta interface{}) error {
|
||||
iamconn := meta.(*AWSClient).iamconn
|
||||
|
||||
roleName := d.Get("role_name").(string)
|
||||
|
||||
req := &iam.GetRoleInput{
|
||||
RoleName: aws.String(roleName),
|
||||
}
|
||||
|
||||
resp, err := iamconn.GetRole(req)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf("Error getting roles: {{err}}", err)
|
||||
}
|
||||
if resp == nil {
|
||||
return fmt.Errorf("no IAM role found")
|
||||
}
|
||||
|
||||
role := resp.Role
|
||||
|
||||
d.SetId(*role.RoleId)
|
||||
d.Set("arn", role.Arn)
|
||||
d.Set("assume_role_policy_document", role.AssumeRolePolicyDocument)
|
||||
d.Set("path", role.Path)
|
||||
d.Set("role_id", role.RoleId)
|
||||
|
||||
return nil
|
||||
}
|
|
@ -6,6 +6,7 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/config"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"github.com/mitchellh/colorstring"
|
||||
)
|
||||
|
@ -58,18 +59,27 @@ func formatPlanModuleExpand(
|
|||
return
|
||||
}
|
||||
|
||||
var moduleName string
|
||||
var modulePath []string
|
||||
if !m.IsRoot() {
|
||||
moduleName = fmt.Sprintf("module.%s", strings.Join(m.Path[1:], "."))
|
||||
modulePath = m.Path[1:]
|
||||
}
|
||||
|
||||
// We want to output the resources in sorted order to make things
|
||||
// easier to scan through, so get all the resource names and sort them.
|
||||
names := make([]string, 0, len(m.Resources))
|
||||
for name, _ := range m.Resources {
|
||||
addrs := map[string]*terraform.ResourceAddress{}
|
||||
for name := range m.Resources {
|
||||
names = append(names, name)
|
||||
var err error
|
||||
addrs[name], err = terraform.ParseResourceAddressForInstanceDiff(modulePath, name)
|
||||
if err != nil {
|
||||
// should never happen; indicates invalid diff
|
||||
panic("invalid resource address in diff")
|
||||
}
|
||||
}
|
||||
sort.Strings(names)
|
||||
sort.Slice(names, func(i, j int) bool {
|
||||
return addrs[names[i]].Less(addrs[names[j]])
|
||||
})
|
||||
|
||||
// Go through each sorted name and start building the output
|
||||
for _, name := range names {
|
||||
|
@ -78,25 +88,23 @@ func formatPlanModuleExpand(
|
|||
continue
|
||||
}
|
||||
|
||||
dataSource := strings.HasPrefix(name, "data.")
|
||||
|
||||
if moduleName != "" {
|
||||
name = moduleName + "." + name
|
||||
}
|
||||
addr := addrs[name]
|
||||
addrStr := addr.String()
|
||||
dataSource := addr.Mode == config.DataResourceMode
|
||||
|
||||
// Determine the color for the text (green for adding, yellow
|
||||
// for change, red for delete), and symbol, and output the
|
||||
// resource header.
|
||||
color := "yellow"
|
||||
symbol := "~"
|
||||
symbol := " ~"
|
||||
oldValues := true
|
||||
switch rdiff.ChangeType() {
|
||||
case terraform.DiffDestroyCreate:
|
||||
color = "green"
|
||||
symbol = "-/+"
|
||||
color = "yellow"
|
||||
symbol = "[red]-[reset]/[green]+[reset][yellow]"
|
||||
case terraform.DiffCreate:
|
||||
color = "green"
|
||||
symbol = "+"
|
||||
symbol = " +"
|
||||
oldValues = false
|
||||
|
||||
// If we're "creating" a data resource then we'll present it
|
||||
|
@ -106,12 +114,12 @@ func formatPlanModuleExpand(
|
|||
// to work with, so we need to cheat and exploit knowledge of the
|
||||
// naming scheme for data resources.
|
||||
if dataSource {
|
||||
symbol = "<="
|
||||
symbol = " <="
|
||||
color = "cyan"
|
||||
}
|
||||
case terraform.DiffDestroy:
|
||||
color = "red"
|
||||
symbol = "-"
|
||||
symbol = " -"
|
||||
}
|
||||
|
||||
var extraAttr []string
|
||||
|
@ -125,10 +133,13 @@ func formatPlanModuleExpand(
|
|||
if len(extraAttr) > 0 {
|
||||
extraStr = fmt.Sprintf(" (%s)", strings.Join(extraAttr, ", "))
|
||||
}
|
||||
if rdiff.ChangeType() == terraform.DiffDestroyCreate {
|
||||
extraStr = extraStr + opts.Color.Color(" [red][bold](new resource required)")
|
||||
}
|
||||
|
||||
buf.WriteString(opts.Color.Color(fmt.Sprintf(
|
||||
"[%s]%s %s%s\n",
|
||||
color, symbol, name, extraStr)))
|
||||
color, symbol, addrStr, extraStr)))
|
||||
|
||||
// Get all the attributes that are changing, and sort them. Also
|
||||
// determine the longest key so that we can align them all.
|
||||
|
@ -175,7 +186,7 @@ func formatPlanModuleExpand(
|
|||
u = attrDiff.Old
|
||||
}
|
||||
buf.WriteString(fmt.Sprintf(
|
||||
" %s:%s %#v => %#v%s\n",
|
||||
" %s:%s %#v => %#v%s\n",
|
||||
attrK,
|
||||
strings.Repeat(" ", keyLen-len(attrK)),
|
||||
u,
|
||||
|
@ -183,7 +194,7 @@ func formatPlanModuleExpand(
|
|||
updateMsg))
|
||||
} else {
|
||||
buf.WriteString(fmt.Sprintf(
|
||||
" %s:%s %#v%s\n",
|
||||
" %s:%s %#v%s\n",
|
||||
attrK,
|
||||
strings.Repeat(" ", keyLen-len(attrK)),
|
||||
v,
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/hashicorp/hil"
|
||||
"github.com/hashicorp/hil/ast"
|
||||
"github.com/hashicorp/terraform/helper/hilmapstructure"
|
||||
"github.com/hashicorp/terraform/plugin/discovery"
|
||||
"github.com/mitchellh/reflectwalk"
|
||||
)
|
||||
|
||||
|
@ -64,6 +65,7 @@ type Module struct {
|
|||
type ProviderConfig struct {
|
||||
Name string
|
||||
Alias string
|
||||
Version string
|
||||
RawConfig *RawConfig
|
||||
}
|
||||
|
||||
|
@ -238,6 +240,33 @@ func (r *Resource) Id() string {
|
|||
}
|
||||
}
|
||||
|
||||
// ProviderFullName returns the full name of the provider for this resource,
|
||||
// which may either be specified explicitly using the "provider" meta-argument
|
||||
// or implied by the prefix on the resource type name.
|
||||
func (r *Resource) ProviderFullName() string {
|
||||
return ResourceProviderFullName(r.Type, r.Provider)
|
||||
}
|
||||
|
||||
// ResourceProviderFullName returns the full (dependable) name of the
|
||||
// provider for a hypothetical resource with the given resource type and
|
||||
// explicit provider string. If the explicit provider string is empty then
|
||||
// the provider name is inferred from the resource type name.
|
||||
func ResourceProviderFullName(resourceType, explicitProvider string) string {
|
||||
if explicitProvider != "" {
|
||||
return explicitProvider
|
||||
}
|
||||
|
||||
idx := strings.IndexRune(resourceType, '_')
|
||||
if idx == -1 {
|
||||
// If no underscores, the resource name is assumed to be
|
||||
// also the provider name, e.g. if the provider exposes
|
||||
// only a single resource of each type.
|
||||
return resourceType
|
||||
}
|
||||
|
||||
return resourceType[:idx]
|
||||
}
|
||||
|
||||
// Validate does some basic semantic checking of the configuration.
|
||||
func (c *Config) Validate() error {
|
||||
if c == nil {
|
||||
|
@ -349,7 +378,8 @@ func (c *Config) Validate() error {
|
|||
}
|
||||
}
|
||||
|
||||
// Check that providers aren't declared multiple times.
|
||||
// Check that providers aren't declared multiple times and that their
|
||||
// version constraints, where present, are syntactically valid.
|
||||
providerSet := make(map[string]struct{})
|
||||
for _, p := range c.ProviderConfigs {
|
||||
name := p.FullName()
|
||||
|
@ -360,6 +390,16 @@ func (c *Config) Validate() error {
|
|||
continue
|
||||
}
|
||||
|
||||
if p.Version != "" {
|
||||
_, err := discovery.ConstraintStr(p.Version).Parse()
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"provider.%s: invalid version constraint %q: %s",
|
||||
name, p.Version, err,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
providerSet[name] = struct{}{}
|
||||
}
|
||||
|
||||
|
|
|
@ -70,6 +70,7 @@ func Funcs() map[string]ast.Function {
|
|||
"coalescelist": interpolationFuncCoalesceList(),
|
||||
"compact": interpolationFuncCompact(),
|
||||
"concat": interpolationFuncConcat(),
|
||||
"contains": interpolationFuncContains(),
|
||||
"dirname": interpolationFuncDirname(),
|
||||
"distinct": interpolationFuncDistinct(),
|
||||
"element": interpolationFuncElement(),
|
||||
|
@ -356,6 +357,22 @@ func interpolationFuncCoalesceList() ast.Function {
|
|||
}
|
||||
}
|
||||
|
||||
// interpolationFuncContains returns true if an element is in the list
|
||||
// and return false otherwise
|
||||
func interpolationFuncContains() ast.Function {
|
||||
return ast.Function{
|
||||
ArgTypes: []ast.Type{ast.TypeList, ast.TypeString},
|
||||
ReturnType: ast.TypeBool,
|
||||
Callback: func(args []interface{}) (interface{}, error) {
|
||||
_, err := interpolationFuncIndex().Callback(args)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// interpolationFuncConcat implements the "concat" function that concatenates
|
||||
// multiple lists.
|
||||
func interpolationFuncConcat() ast.Function {
|
||||
|
|
|
@ -194,7 +194,7 @@ func dirFiles(dir string) ([]string, []string, error) {
|
|||
// Only care about files that are valid to load
|
||||
name := fi.Name()
|
||||
extValue := ext(name)
|
||||
if extValue == "" || isIgnoredFile(name) {
|
||||
if extValue == "" || IsIgnoredFile(name) {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -215,9 +215,9 @@ func dirFiles(dir string) ([]string, []string, error) {
|
|||
return files, overrides, nil
|
||||
}
|
||||
|
||||
// isIgnoredFile returns true or false depending on whether the
|
||||
// IsIgnoredFile returns true or false depending on whether the
|
||||
// provided file name is a file that should be ignored.
|
||||
func isIgnoredFile(name string) bool {
|
||||
func IsIgnoredFile(name string) bool {
|
||||
return strings.HasPrefix(name, ".") || // Unix-like hidden files
|
||||
strings.HasSuffix(name, "~") || // vim
|
||||
strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs
|
||||
|
|
|
@ -17,6 +17,20 @@ type hclConfigurable struct {
|
|||
Root *ast.File
|
||||
}
|
||||
|
||||
var ReservedResourceFields = []string{
|
||||
"connection",
|
||||
"count",
|
||||
"depends_on",
|
||||
"lifecycle",
|
||||
"provider",
|
||||
"provisioner",
|
||||
}
|
||||
|
||||
var ReservedProviderFields = []string{
|
||||
"alias",
|
||||
"version",
|
||||
}
|
||||
|
||||
func (t *hclConfigurable) Config() (*Config, error) {
|
||||
validKeys := map[string]struct{}{
|
||||
"atlas": struct{}{},
|
||||
|
@ -562,6 +576,7 @@ func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) {
|
|||
}
|
||||
|
||||
delete(config, "alias")
|
||||
delete(config, "version")
|
||||
|
||||
rawConfig, err := NewRawConfig(config)
|
||||
if err != nil {
|
||||
|
@ -583,9 +598,22 @@ func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// If we have a version field then extract it
|
||||
var version string
|
||||
if a := listVal.Filter("version"); len(a.Items) > 0 {
|
||||
err := hcl.DecodeObject(&version, a.Items[0].Val)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"Error reading version for provider[%s]: %s",
|
||||
n,
|
||||
err)
|
||||
}
|
||||
}
|
||||
|
||||
result = append(result, &ProviderConfig{
|
||||
Name: n,
|
||||
Alias: alias,
|
||||
Version: version,
|
||||
RawConfig: rawConfig,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -92,6 +92,25 @@ func (t *Tree) Children() map[string]*Tree {
|
|||
return t.children
|
||||
}
|
||||
|
||||
// DeepEach calls the provided callback for the receiver and then all of
|
||||
// its descendents in the tree, allowing an operation to be performed on
|
||||
// all modules in the tree.
|
||||
//
|
||||
// Parents will be visited before their children but otherwise the order is
|
||||
// not defined.
|
||||
func (t *Tree) DeepEach(cb func(*Tree)) {
|
||||
t.lock.RLock()
|
||||
defer t.lock.RUnlock()
|
||||
t.deepEach(cb)
|
||||
}
|
||||
|
||||
func (t *Tree) deepEach(cb func(*Tree)) {
|
||||
cb(t)
|
||||
for _, c := range t.children {
|
||||
c.deepEach(cb)
|
||||
}
|
||||
}
|
||||
|
||||
// Loaded says whether or not this tree has been loaded or not yet.
|
||||
func (t *Tree) Loaded() bool {
|
||||
t.lock.RLock()
|
||||
|
|
|
@ -0,0 +1,103 @@
|
|||
package config
|
||||
|
||||
import "github.com/blang/semver"
|
||||
|
||||
// ProviderVersionConstraint presents a constraint for a particular
|
||||
// provider, identified by its full name.
|
||||
type ProviderVersionConstraint struct {
|
||||
Constraint string
|
||||
ProviderType string
|
||||
}
|
||||
|
||||
// ProviderVersionConstraints is a map from provider full name to its associated
|
||||
// ProviderVersionConstraint, as produced by Config.RequiredProviders.
|
||||
type ProviderVersionConstraints map[string]ProviderVersionConstraint
|
||||
|
||||
// RequiredProviders returns the ProviderVersionConstraints for this
|
||||
// module.
|
||||
//
|
||||
// This includes both providers that are explicitly requested by provider
|
||||
// blocks and those that are used implicitly by instantiating one of their
|
||||
// resource types. In the latter case, the returned semver Range will
|
||||
// accept any version of the provider.
|
||||
func (c *Config) RequiredProviders() ProviderVersionConstraints {
|
||||
ret := make(ProviderVersionConstraints, len(c.ProviderConfigs))
|
||||
|
||||
configs := c.ProviderConfigsByFullName()
|
||||
|
||||
// In order to find the *implied* dependencies (those without explicit
|
||||
// "provider" blocks) we need to walk over all of the resources and
|
||||
// cross-reference with the provider configs.
|
||||
for _, rc := range c.Resources {
|
||||
providerName := rc.ProviderFullName()
|
||||
var providerType string
|
||||
|
||||
// Default to (effectively) no constraint whatsoever, but we might
|
||||
// override if there's an explicit constraint in config.
|
||||
constraint := ">=0.0.0"
|
||||
|
||||
config, ok := configs[providerName]
|
||||
if ok {
|
||||
if config.Version != "" {
|
||||
constraint = config.Version
|
||||
}
|
||||
providerType = config.Name
|
||||
} else {
|
||||
providerType = providerName
|
||||
}
|
||||
|
||||
ret[providerName] = ProviderVersionConstraint{
|
||||
ProviderType: providerType,
|
||||
Constraint: constraint,
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// RequiredRanges returns a semver.Range for each distinct provider type in
|
||||
// the constraint map. If the same provider type appears more than once
|
||||
// (e.g. because aliases are in use) then their respective constraints are
|
||||
// combined such that they must *all* apply.
|
||||
//
|
||||
// The result of this method can be passed to the
|
||||
// PluginMetaSet.ConstrainVersions method within the plugin/discovery
|
||||
// package in order to filter down the available plugins to those which
|
||||
// satisfy the given constraints.
|
||||
//
|
||||
// This function will panic if any of the constraints within cannot be
|
||||
// parsed as semver ranges. This is guaranteed to never happen for a
|
||||
// constraint set that was built from a configuration that passed validation.
|
||||
func (cons ProviderVersionConstraints) RequiredRanges() map[string]semver.Range {
|
||||
ret := make(map[string]semver.Range, len(cons))
|
||||
|
||||
for _, con := range cons {
|
||||
spec := semver.MustParseRange(con.Constraint)
|
||||
if existing, exists := ret[con.ProviderType]; exists {
|
||||
ret[con.ProviderType] = existing.AND(spec)
|
||||
} else {
|
||||
ret[con.ProviderType] = spec
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// ProviderConfigsByFullName returns a map from provider full names (as
|
||||
// returned by ProviderConfig.FullName()) to the corresponding provider
|
||||
// configs.
|
||||
//
|
||||
// This function returns no new information than what's already in
|
||||
// c.ProviderConfigs, but returns it in a more convenient shape. If there
|
||||
// is more than one provider config with the same full name then the result
|
||||
// is undefined, but that is guaranteed not to happen for any config that
|
||||
// has passed validation.
|
||||
func (c *Config) ProviderConfigsByFullName() map[string]*ProviderConfig {
|
||||
ret := make(map[string]*ProviderConfig, len(c.ProviderConfigs))
|
||||
|
||||
for _, pc := range c.ProviderConfigs {
|
||||
ret[pc.FullName()] = pc
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
|
@ -60,6 +60,11 @@ func expandArray(m map[string]string, prefix string) []interface{} {
|
|||
return []interface{}{}
|
||||
}
|
||||
|
||||
// NOTE: "num" is not necessarily accurate, e.g. if a user tampers
|
||||
// with state, so the following code should not crash when given a
|
||||
// number of items more or less than what's given in num. The
|
||||
// num key is mainly just a hint that this is a list or set.
|
||||
|
||||
// The Schema "Set" type stores its values in an array format, but
|
||||
// using numeric hash values instead of ordinal keys. Take the set
|
||||
// of keys regardless of value, and expand them in numeric order.
|
||||
|
@ -101,7 +106,7 @@ func expandArray(m map[string]string, prefix string) []interface{} {
|
|||
}
|
||||
sort.Ints(keysList)
|
||||
|
||||
result := make([]interface{}, num)
|
||||
result := make([]interface{}, len(keysList))
|
||||
for i, key := range keysList {
|
||||
keyString := strconv.Itoa(key)
|
||||
if computed[keyString] {
|
||||
|
|
|
@ -1,21 +1,17 @@
|
|||
package resource
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const UniqueIdPrefix = `terraform-`
|
||||
|
||||
// idCounter is a randomly seeded monotonic counter for generating ordered
|
||||
// unique ids. It uses a big.Int so we can easily increment a long numeric
|
||||
// string. The max possible hex value here with 12 random bytes is
|
||||
// "01000000000000000000000000", so there's no chance of rollover during
|
||||
// operation.
|
||||
// idCounter is a monotonic counter for generating ordered unique ids.
|
||||
var idMutex sync.Mutex
|
||||
var idCounter = big.NewInt(0).SetBytes(randomBytes(12))
|
||||
var idCounter uint32
|
||||
|
||||
// Helper for a resource to generate a unique identifier w/ default prefix
|
||||
func UniqueId() string {
|
||||
|
@ -25,15 +21,20 @@ func UniqueId() string {
|
|||
// Helper for a resource to generate a unique identifier w/ given prefix
|
||||
//
|
||||
// After the prefix, the ID consists of an incrementing 26 digit value (to match
|
||||
// previous timestamp output).
|
||||
// previous timestamp output). After the prefix, the ID consists of a timestamp
|
||||
// and an incrementing 8 hex digit value The timestamp means that multiple IDs
|
||||
// created with the same prefix will sort in the order of their creation, even
|
||||
// across multiple terraform executions, as long as the clock is not turned back
|
||||
// between calls, and as long as any given terraform execution generates fewer
|
||||
// than 4 billion IDs.
|
||||
func PrefixedUniqueId(prefix string) string {
|
||||
// Be precise to 4 digits of fractional seconds, but remove the dot before the
|
||||
// fractional seconds.
|
||||
timestamp := strings.Replace(
|
||||
time.Now().UTC().Format("20060102150405.0000"), ".", "", 1)
|
||||
|
||||
idMutex.Lock()
|
||||
defer idMutex.Unlock()
|
||||
return fmt.Sprintf("%s%026x", prefix, idCounter.Add(idCounter, big.NewInt(1)))
|
||||
}
|
||||
|
||||
func randomBytes(n int) []byte {
|
||||
b := make([]byte, n)
|
||||
rand.Read(b)
|
||||
return b
|
||||
idCounter++
|
||||
return fmt.Sprintf("%s%s%08x", prefix, timestamp, idCounter)
|
||||
}
|
||||
|
|
|
@ -383,11 +383,11 @@ func Test(t TestT, c TestCase) {
|
|||
c.PreCheck()
|
||||
}
|
||||
|
||||
ctxProviders, err := testProviderFactories(c)
|
||||
providerResolver, err := testProviderResolver(c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
opts := terraform.ContextOpts{Providers: ctxProviders}
|
||||
opts := terraform.ContextOpts{ProviderResolver: providerResolver}
|
||||
|
||||
// A single state variable to track the lifecycle, starting with no state
|
||||
var state *terraform.State
|
||||
|
@ -400,15 +400,22 @@ func Test(t TestT, c TestCase) {
|
|||
var err error
|
||||
log.Printf("[WARN] Test: Executing step %d", i)
|
||||
|
||||
// Determine the test mode to execute
|
||||
if step.Config != "" {
|
||||
state, err = testStepConfig(opts, state, step)
|
||||
} else if step.ImportState {
|
||||
state, err = testStepImportState(opts, state, step)
|
||||
} else {
|
||||
if step.Config == "" && !step.ImportState {
|
||||
err = fmt.Errorf(
|
||||
"unknown test mode for step. Please see TestStep docs\n\n%#v",
|
||||
step)
|
||||
} else {
|
||||
if step.ImportState {
|
||||
if step.Config == "" {
|
||||
step.Config = testProviderConfig(c)
|
||||
}
|
||||
|
||||
// Can optionally set step.Config in addition to
|
||||
// step.ImportState, to provide config for the import.
|
||||
state, err = testStepImportState(opts, state, step)
|
||||
} else {
|
||||
state, err = testStepConfig(opts, state, step)
|
||||
}
|
||||
}
|
||||
|
||||
// If there was an error, exit
|
||||
|
@ -496,16 +503,29 @@ func Test(t TestT, c TestCase) {
|
|||
}
|
||||
}
|
||||
|
||||
// testProviderFactories is a helper to build the ResourceProviderFactory map
|
||||
// testProviderConfig takes the list of Providers in a TestCase and returns a
|
||||
// config with only empty provider blocks. This is useful for Import, where no
|
||||
// config is provided, but the providers must be defined.
|
||||
func testProviderConfig(c TestCase) string {
|
||||
var lines []string
|
||||
for p := range c.Providers {
|
||||
lines = append(lines, fmt.Sprintf("provider %q {}\n", p))
|
||||
}
|
||||
|
||||
return strings.Join(lines, "")
|
||||
}
|
||||
|
||||
// testProviderResolver is a helper to build a ResourceProviderResolver
|
||||
// with pre instantiated ResourceProviders, so that we can reset them for the
|
||||
// test, while only calling the factory function once.
|
||||
// Any errors are stored so that they can be returned by the factory in
|
||||
// terraform to match non-test behavior.
|
||||
func testProviderFactories(c TestCase) (map[string]terraform.ResourceProviderFactory, error) {
|
||||
ctxProviders := c.ProviderFactories // make(map[string]terraform.ResourceProviderFactory)
|
||||
func testProviderResolver(c TestCase) (terraform.ResourceProviderResolver, error) {
|
||||
ctxProviders := c.ProviderFactories
|
||||
if ctxProviders == nil {
|
||||
ctxProviders = make(map[string]terraform.ResourceProviderFactory)
|
||||
}
|
||||
|
||||
// add any fixed providers
|
||||
for k, p := range c.Providers {
|
||||
ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p)
|
||||
|
@ -527,7 +547,7 @@ func testProviderFactories(c TestCase) (map[string]terraform.ResourceProviderFac
|
|||
}
|
||||
}
|
||||
|
||||
return ctxProviders, nil
|
||||
return terraform.ResourceProviderResolverFixed(ctxProviders), nil
|
||||
}
|
||||
|
||||
// UnitTest is a helper to force the acceptance testing harness to run in the
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/terraform/config"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
|
@ -89,6 +90,13 @@ func (p *Provider) InternalValidate() error {
|
|||
validationErrors = multierror.Append(validationErrors, err)
|
||||
}
|
||||
|
||||
// Provider-specific checks
|
||||
for k, _ := range sm {
|
||||
if isReservedProviderFieldName(k) {
|
||||
return fmt.Errorf("%s is a reserved field name for a provider", k)
|
||||
}
|
||||
}
|
||||
|
||||
for k, r := range p.ResourcesMap {
|
||||
if err := r.InternalValidate(nil, true); err != nil {
|
||||
validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %s", k, err))
|
||||
|
@ -104,6 +112,15 @@ func (p *Provider) InternalValidate() error {
|
|||
return validationErrors
|
||||
}
|
||||
|
||||
func isReservedProviderFieldName(name string) bool {
|
||||
for _, reservedName := range config.ReservedProviderFields {
|
||||
if name == reservedName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Meta returns the metadata associated with this provider that was
|
||||
// returned by the Configure call. It will be nil until Configure is called.
|
||||
func (p *Provider) Meta() interface{} {
|
||||
|
|
|
@ -43,7 +43,7 @@ type Provisioner struct {
|
|||
|
||||
// ValidateFunc is a function for extended validation. This is optional
|
||||
// and should be used when individual field validation is not enough.
|
||||
ValidateFunc func(*ResourceData) ([]string, []error)
|
||||
ValidateFunc func(*terraform.ResourceConfig) ([]string, []error)
|
||||
|
||||
stopCtx context.Context
|
||||
stopCtxCancel context.CancelFunc
|
||||
|
@ -121,32 +121,6 @@ func (p *Provisioner) Stop() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Provisioner) Validate(config *terraform.ResourceConfig) ([]string, []error) {
|
||||
if err := p.InternalValidate(); err != nil {
|
||||
return nil, []error{fmt.Errorf(
|
||||
"Internal validation of the provisioner failed! This is always a bug\n"+
|
||||
"with the provisioner itself, and not a user issue. Please report\n"+
|
||||
"this bug:\n\n%s", err)}
|
||||
}
|
||||
w := []string{}
|
||||
e := []error{}
|
||||
if p.Schema != nil {
|
||||
w2, e2 := schemaMap(p.Schema).Validate(config)
|
||||
w = append(w, w2...)
|
||||
e = append(e, e2...)
|
||||
}
|
||||
if p.ValidateFunc != nil {
|
||||
data := &ResourceData{
|
||||
schema: p.Schema,
|
||||
config: config,
|
||||
}
|
||||
w2, e2 := p.ValidateFunc(data)
|
||||
w = append(w, w2...)
|
||||
e = append(e, e2...)
|
||||
}
|
||||
return w, e
|
||||
}
|
||||
|
||||
// Apply implementation of terraform.ResourceProvisioner interface.
|
||||
func (p *Provisioner) Apply(
|
||||
o terraform.UIOutput,
|
||||
|
@ -204,3 +178,27 @@ func (p *Provisioner) Apply(
|
|||
ctx = context.WithValue(ctx, ProvRawStateKey, s)
|
||||
return p.ApplyFunc(ctx)
|
||||
}
|
||||
|
||||
// Validate implements the terraform.ResourceProvisioner interface.
|
||||
func (p *Provisioner) Validate(c *terraform.ResourceConfig) (ws []string, es []error) {
|
||||
if err := p.InternalValidate(); err != nil {
|
||||
return nil, []error{fmt.Errorf(
|
||||
"Internal validation of the provisioner failed! This is always a bug\n"+
|
||||
"with the provisioner itself, and not a user issue. Please report\n"+
|
||||
"this bug:\n\n%s", err)}
|
||||
}
|
||||
|
||||
if p.Schema != nil {
|
||||
w, e := schemaMap(p.Schema).Validate(c)
|
||||
ws = append(ws, w...)
|
||||
es = append(es, e...)
|
||||
}
|
||||
|
||||
if p.ValidateFunc != nil {
|
||||
w, e := p.ValidateFunc(c)
|
||||
ws = append(ws, w...)
|
||||
es = append(es, e...)
|
||||
}
|
||||
|
||||
return ws, es
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"log"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform/config"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
|
@ -142,6 +143,12 @@ func (r *Resource) Apply(
|
|||
if err := rt.DiffDecode(d); err != nil {
|
||||
log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
|
||||
}
|
||||
} else if s != nil {
|
||||
if _, ok := s.Meta[TimeoutKey]; ok {
|
||||
if err := rt.StateDecode(s); err != nil {
|
||||
log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Printf("[DEBUG] No meta timeoutkey found in Apply()")
|
||||
}
|
||||
|
@ -388,9 +395,25 @@ func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error
|
|||
}
|
||||
}
|
||||
|
||||
// Resource-specific checks
|
||||
for k, _ := range tsm {
|
||||
if isReservedResourceFieldName(k) {
|
||||
return fmt.Errorf("%s is a reserved field name for a resource", k)
|
||||
}
|
||||
}
|
||||
|
||||
return schemaMap(r.Schema).InternalValidate(tsm)
|
||||
}
|
||||
|
||||
func isReservedResourceFieldName(name string) bool {
|
||||
for _, reservedName := range config.ReservedResourceFields {
|
||||
if name == reservedName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Data returns a ResourceData struct for this Resource. Each return value
|
||||
// is a separate copy and can be safely modified differently.
|
||||
//
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -661,7 +662,13 @@ func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error {
|
|||
if v.ValidateFunc != nil {
|
||||
switch v.Type {
|
||||
case TypeList, TypeSet:
|
||||
return fmt.Errorf("ValidateFunc is not yet supported on lists or sets.")
|
||||
return fmt.Errorf("%s: ValidateFunc is not yet supported on lists or sets.", k)
|
||||
}
|
||||
}
|
||||
|
||||
if v.Deprecated == "" && v.Removed == "" {
|
||||
if !isValidFieldName(k) {
|
||||
return fmt.Errorf("%s: Field name may only contain lowercase alphanumeric characters & underscores.", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -669,6 +676,11 @@ func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func isValidFieldName(name string) bool {
|
||||
re := regexp.MustCompile("^[a-z0-9_]+$")
|
||||
return re.MatchString(name)
|
||||
}
|
||||
|
||||
func (m schemaMap) diff(
|
||||
k string,
|
||||
schema *Schema,
|
||||
|
|
|
@ -39,6 +39,8 @@ func (w *closeWalker) Struct(reflect.Value) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
var closerType = reflect.TypeOf((*io.Closer)(nil)).Elem()
|
||||
|
||||
func (w *closeWalker) StructField(f reflect.StructField, v reflect.Value) error {
|
||||
// Not sure why this would be but lets avoid some panics
|
||||
if !v.IsValid() {
|
||||
|
@ -56,17 +58,18 @@ func (w *closeWalker) StructField(f reflect.StructField, v reflect.Value) error
|
|||
return nil
|
||||
}
|
||||
|
||||
// We're looking for an io.Closer
|
||||
raw := v.Interface()
|
||||
if raw == nil {
|
||||
return nil
|
||||
var closer io.Closer
|
||||
if v.Type().Implements(closerType) {
|
||||
closer = v.Interface().(io.Closer)
|
||||
} else if v.CanAddr() {
|
||||
// The Close method may require a pointer receiver, but we only have a value.
|
||||
v := v.Addr()
|
||||
if v.Type().Implements(closerType) {
|
||||
closer = v.Interface().(io.Closer)
|
||||
}
|
||||
}
|
||||
|
||||
closer, ok := raw.(io.Closer)
|
||||
if !ok && v.CanAddr() {
|
||||
closer, ok = v.Addr().Interface().(io.Closer)
|
||||
}
|
||||
if !ok {
|
||||
if closer == nil {
|
||||
return reflectwalk.SkipEntry
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,14 @@ type Value struct {
|
|||
valueSet bool
|
||||
}
|
||||
|
||||
func (v *Value) Lock() {
|
||||
v.lock.Lock()
|
||||
}
|
||||
|
||||
func (v *Value) Unlock() {
|
||||
v.lock.Unlock()
|
||||
}
|
||||
|
||||
// Close closes the value. This can never fail. For a definition of
|
||||
// "close" see the struct docs.
|
||||
func (w *Value) Close() error {
|
||||
|
|
|
@ -28,6 +28,44 @@ func IntBetween(min, max int) schema.SchemaValidateFunc {
|
|||
}
|
||||
}
|
||||
|
||||
// IntAtLeast returns a SchemaValidateFunc which tests if the provided value
|
||||
// is of type int and is at least min (inclusive)
|
||||
func IntAtLeast(min int) schema.SchemaValidateFunc {
|
||||
return func(i interface{}, k string) (s []string, es []error) {
|
||||
v, ok := i.(int)
|
||||
if !ok {
|
||||
es = append(es, fmt.Errorf("expected type of %s to be int", k))
|
||||
return
|
||||
}
|
||||
|
||||
if v < min {
|
||||
es = append(es, fmt.Errorf("expected %s to be at least (%d), got %d", k, min, v))
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// IntAtMost returns a SchemaValidateFunc which tests if the provided value
|
||||
// is of type int and is at most max (inclusive)
|
||||
func IntAtMost(max int) schema.SchemaValidateFunc {
|
||||
return func(i interface{}, k string) (s []string, es []error) {
|
||||
v, ok := i.(int)
|
||||
if !ok {
|
||||
es = append(es, fmt.Errorf("expected type of %s to be int", k))
|
||||
return
|
||||
}
|
||||
|
||||
if v > max {
|
||||
es = append(es, fmt.Errorf("expected %s to be at most (%d), got %d", k, max, v))
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// StringInSlice returns a SchemaValidateFunc which tests if the provided value
|
||||
// is of type string and matches the value of an element in the valid slice
|
||||
// will test with in lower case if ignoreCase is true
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
package moduledeps
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/plugin/discovery"
|
||||
)
|
||||
|
||||
// Providers describes a set of provider dependencies for a given module.
|
||||
//
|
||||
// Each named provider instance can have one version constraint.
|
||||
type Providers map[ProviderInstance]ProviderDependency
|
||||
|
||||
// ProviderDependency describes the dependency for a particular provider
|
||||
// instance, including both the set of allowed versions and the reason for
|
||||
// the dependency.
|
||||
type ProviderDependency struct {
|
||||
Constraints discovery.Constraints
|
||||
Reason ProviderDependencyReason
|
||||
}
|
||||
|
||||
// ProviderDependencyReason is an enumeration of reasons why a dependency might be
|
||||
// present.
|
||||
type ProviderDependencyReason int
|
||||
|
||||
const (
|
||||
// ProviderDependencyExplicit means that there is an explicit "provider"
|
||||
// block in the configuration for this module.
|
||||
ProviderDependencyExplicit ProviderDependencyReason = iota
|
||||
|
||||
// ProviderDependencyImplicit means that there is no explicit "provider"
|
||||
// block but there is at least one resource that uses this provider.
|
||||
ProviderDependencyImplicit
|
||||
|
||||
// ProviderDependencyInherited is a special case of
|
||||
// ProviderDependencyImplicit where a parent module has defined a
|
||||
// configuration for the provider that has been inherited by at least one
|
||||
// resource in this module.
|
||||
ProviderDependencyInherited
|
||||
|
||||
// ProviderDependencyFromState means that this provider is not currently
|
||||
// referenced by configuration at all, but some existing instances in
|
||||
// the state still depend on it.
|
||||
ProviderDependencyFromState
|
||||
)
|
|
@ -0,0 +1,7 @@
|
|||
// Package moduledeps contains types that can be used to describe the
|
||||
// providers required for all of the modules in a module tree.
|
||||
//
|
||||
// It does not itself contain the functionality for populating such
|
||||
// data structures; that's in Terraform core, since this package intentionally
|
||||
// does not depend on terraform core to avoid package dependency cycles.
|
||||
package moduledeps
|
|
@ -0,0 +1,204 @@
|
|||
package moduledeps
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/plugin/discovery"
|
||||
)
|
||||
|
||||
// Module represents the dependencies of a single module, as well being
|
||||
// a node in a tree of such structures representing the dependencies of
|
||||
// an entire configuration.
|
||||
type Module struct {
|
||||
Name string
|
||||
Providers Providers
|
||||
Children []*Module
|
||||
}
|
||||
|
||||
// WalkFunc is a callback type for use with Module.WalkTree
|
||||
type WalkFunc func(path []string, parent *Module, current *Module) error
|
||||
|
||||
// WalkTree calls the given callback once for the receiver and then
|
||||
// once for each descendent, in an order such that parents are called
|
||||
// before their children and siblings are called in the order they
|
||||
// appear in the Children slice.
|
||||
//
|
||||
// When calling the callback, parent will be nil for the first call
|
||||
// for the receiving module, and then set to the direct parent of
|
||||
// each module for the subsequent calls.
|
||||
//
|
||||
// The path given to the callback is valid only until the callback
|
||||
// returns, after which it will be mutated and reused. Callbacks must
|
||||
// therefore copy the path slice if they wish to retain it.
|
||||
//
|
||||
// If the given callback returns an error, the walk will be aborted at
|
||||
// that point and that error returned to the caller.
|
||||
//
|
||||
// This function is not thread-safe for concurrent modifications of the
|
||||
// data structure, so it's the caller's responsibility to arrange for that
|
||||
// should it be needed.
|
||||
//
|
||||
// It is safe for a callback to modify the descendents of the "current"
|
||||
// module, including the ordering of the Children slice itself, but the
|
||||
// callback MUST NOT modify the parent module.
|
||||
func (m *Module) WalkTree(cb WalkFunc) error {
|
||||
return walkModuleTree(make([]string, 0, 1), nil, m, cb)
|
||||
}
|
||||
|
||||
func walkModuleTree(path []string, parent *Module, current *Module, cb WalkFunc) error {
|
||||
path = append(path, current.Name)
|
||||
err := cb(path, parent, current)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, child := range current.Children {
|
||||
err := walkModuleTree(path, current, child, cb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SortChildren sorts the Children slice into lexicographic order by
|
||||
// name, in-place.
|
||||
//
|
||||
// This is primarily useful prior to calling WalkTree so that the walk
|
||||
// will proceed in a consistent order.
|
||||
func (m *Module) SortChildren() {
|
||||
sort.Sort(sortModules{m.Children})
|
||||
}
|
||||
|
||||
// SortDescendents is a convenience wrapper for calling SortChildren on
|
||||
// the receiver and all of its descendent modules.
|
||||
func (m *Module) SortDescendents() {
|
||||
m.WalkTree(func(path []string, parent *Module, current *Module) error {
|
||||
current.SortChildren()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
type sortModules struct {
|
||||
modules []*Module
|
||||
}
|
||||
|
||||
func (s sortModules) Len() int {
|
||||
return len(s.modules)
|
||||
}
|
||||
|
||||
func (s sortModules) Less(i, j int) bool {
|
||||
cmp := strings.Compare(s.modules[i].Name, s.modules[j].Name)
|
||||
return cmp < 0
|
||||
}
|
||||
|
||||
func (s sortModules) Swap(i, j int) {
|
||||
s.modules[i], s.modules[j] = s.modules[j], s.modules[i]
|
||||
}
|
||||
|
||||
// PluginRequirements produces a PluginRequirements structure that can
|
||||
// be used with discovery.PluginMetaSet.ConstrainVersions to identify
|
||||
// suitable plugins to satisfy the module's provider dependencies.
|
||||
//
|
||||
// This method only considers the direct requirements of the receiver.
|
||||
// Use AllPluginRequirements to flatten the dependencies for the
|
||||
// entire tree of modules.
|
||||
//
|
||||
// Requirements returned by this method include only version constraints,
|
||||
// and apply no particular SHA256 hash constraint.
|
||||
func (m *Module) PluginRequirements() discovery.PluginRequirements {
|
||||
ret := make(discovery.PluginRequirements)
|
||||
for inst, dep := range m.Providers {
|
||||
// m.Providers is keyed on provider names, such as "aws.foo".
|
||||
// a PluginRequirements wants keys to be provider *types*, such
|
||||
// as "aws". If there are multiple aliases for the same
|
||||
// provider then we will flatten them into a single requirement
|
||||
// by combining their constraint sets.
|
||||
pty := inst.Type()
|
||||
if existing, exists := ret[pty]; exists {
|
||||
ret[pty].Versions = existing.Versions.Append(dep.Constraints)
|
||||
} else {
|
||||
ret[pty] = &discovery.PluginConstraints{
|
||||
Versions: dep.Constraints,
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// AllPluginRequirements calls PluginRequirements for the receiver and all
|
||||
// of its descendents, and merges the result into a single PluginRequirements
|
||||
// structure that would satisfy all of the modules together.
|
||||
//
|
||||
// Requirements returned by this method include only version constraints,
|
||||
// and apply no particular SHA256 hash constraint.
|
||||
func (m *Module) AllPluginRequirements() discovery.PluginRequirements {
|
||||
var ret discovery.PluginRequirements
|
||||
m.WalkTree(func(path []string, parent *Module, current *Module) error {
|
||||
ret = ret.Merge(current.PluginRequirements())
|
||||
return nil
|
||||
})
|
||||
return ret
|
||||
}
|
||||
|
||||
// Equal returns true if the receiver is the root of an identical tree
|
||||
// to the other given Module. This is a deep comparison that considers
|
||||
// the equality of all downstream modules too.
|
||||
//
|
||||
// The children are considered to be ordered, so callers may wish to use
|
||||
// SortDescendents first to normalize the order of the slices of child nodes.
|
||||
//
|
||||
// The implementation of this function is not optimized since it is provided
|
||||
// primarily for use in tests.
|
||||
func (m *Module) Equal(other *Module) bool {
|
||||
// take care of nils first
|
||||
if m == nil && other == nil {
|
||||
return true
|
||||
} else if (m == nil && other != nil) || (m != nil && other == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
if m.Name != other.Name {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(m.Providers) != len(other.Providers) {
|
||||
return false
|
||||
}
|
||||
if len(m.Children) != len(other.Children) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Can't use reflect.DeepEqual on this provider structure because
|
||||
// the nested Constraints objects contain function pointers that
|
||||
// never compare as equal. So we'll need to walk it the long way.
|
||||
for inst, dep := range m.Providers {
|
||||
if _, exists := other.Providers[inst]; !exists {
|
||||
return false
|
||||
}
|
||||
|
||||
if dep.Reason != other.Providers[inst].Reason {
|
||||
return false
|
||||
}
|
||||
|
||||
// Constraints are not too easy to compare robustly, so
|
||||
// we'll just use their string representations as a proxy
|
||||
// for now.
|
||||
if dep.Constraints.String() != other.Providers[inst].Constraints.String() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Above we already checked that we have the same number of children
|
||||
// in each module, so now we just need to check that they are
|
||||
// recursively equal.
|
||||
for i := range m.Children {
|
||||
if !m.Children[i].Equal(other.Children[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// If we fall out here then they are equal
|
||||
return true
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
package moduledeps
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ProviderInstance describes a particular provider instance by its full name,
|
||||
// like "null" or "aws.foo".
|
||||
type ProviderInstance string
|
||||
|
||||
// Type returns the provider type of this instance. For example, for an instance
|
||||
// named "aws.foo" the type is "aws".
|
||||
func (p ProviderInstance) Type() string {
|
||||
t := string(p)
|
||||
if dotPos := strings.Index(t, "."); dotPos != -1 {
|
||||
t = t[:dotPos]
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Alias returns the alias of this provider, if any. An instance named "aws.foo"
|
||||
// has the alias "foo", while an instance named just "docker" has no alias,
|
||||
// so the empty string would be returned.
|
||||
func (p ProviderInstance) Alias() string {
|
||||
t := string(p)
|
||||
if dotPos := strings.Index(t, "."); dotPos != -1 {
|
||||
return t[dotPos+1:]
|
||||
}
|
||||
return ""
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
package plugin
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"github.com/hashicorp/terraform/plugin/discovery"
|
||||
)
|
||||
|
||||
// ClientConfig returns a configuration object that can be used to instantiate
|
||||
// a client for the plugin described by the given metadata.
|
||||
func ClientConfig(m discovery.PluginMeta) *plugin.ClientConfig {
|
||||
return &plugin.ClientConfig{
|
||||
Cmd: exec.Command(m.Path),
|
||||
HandshakeConfig: Handshake,
|
||||
Managed: true,
|
||||
Plugins: PluginMap,
|
||||
}
|
||||
}
|
||||
|
||||
// Client returns a plugin client for the plugin described by the given metadata.
|
||||
func Client(m discovery.PluginMeta) *plugin.Client {
|
||||
return plugin.NewClient(ClientConfig(m))
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
package discovery
|
||||
|
||||
// Error is a type used to describe situations that the caller must handle
|
||||
// since they indicate some form of user error.
|
||||
//
|
||||
// The functions and methods that return these specialized errors indicate so
|
||||
// in their documentation. The Error type should not itself be used directly,
|
||||
// but rather errors should be compared using the == operator with the
|
||||
// error constants in this package.
|
||||
//
|
||||
// Values of this type are _not_ used when the error being reported is an
|
||||
// operational error (server unavailable, etc) or indicative of a bug in
|
||||
// this package or its caller.
|
||||
type Error string
|
||||
|
||||
// ErrorNoSuitableVersion indicates that a suitable version (meeting given
|
||||
// constraints) is not available.
|
||||
const ErrorNoSuitableVersion = Error("no suitable version is available")
|
||||
|
||||
// ErrorNoVersionCompatible indicates that all of the available versions
|
||||
// that otherwise met constraints are not compatible with the current
|
||||
// version of Terraform.
|
||||
const ErrorNoVersionCompatible = Error("no available version is compatible with this version of Terraform")
|
||||
|
||||
// ErrorNoSuchProvider indicates that no provider exists with a name given
|
||||
const ErrorNoSuchProvider = Error("no provider exists with the given name")
|
||||
|
||||
func (err Error) Error() string {
|
||||
return string(err)
|
||||
}
|
|
@ -0,0 +1,168 @@
|
|||
package discovery
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FindPlugins looks in the given directories for files whose filenames
|
||||
// suggest that they are plugins of the given kind (e.g. "provider") and
|
||||
// returns a PluginMetaSet representing the discovered potential-plugins.
|
||||
//
|
||||
// Currently this supports two different naming schemes. The current
|
||||
// standard naming scheme is a subdirectory called $GOOS-$GOARCH containing
|
||||
// files named terraform-$KIND-$NAME-V$VERSION. The legacy naming scheme is
|
||||
// files directly in the given directory whose names are like
|
||||
// terraform-$KIND-$NAME.
|
||||
//
|
||||
// Only one plugin will be returned for each unique plugin (name, version)
|
||||
// pair, with preference given to files found in earlier directories.
|
||||
//
|
||||
// This is a convenience wrapper around FindPluginPaths and ResolvePluginsPaths.
|
||||
func FindPlugins(kind string, dirs []string) PluginMetaSet {
|
||||
return ResolvePluginPaths(FindPluginPaths(kind, dirs))
|
||||
}
|
||||
|
||||
// FindPluginPaths looks in the given directories for files whose filenames
|
||||
// suggest that they are plugins of the given kind (e.g. "provider").
|
||||
//
|
||||
// The return value is a list of absolute paths that appear to refer to
|
||||
// plugins in the given directories, based only on what can be inferred
|
||||
// from the naming scheme. The paths returned are ordered such that files
|
||||
// in later dirs appear after files in earlier dirs in the given directory
|
||||
// list. Within the same directory plugins are returned in a consistent but
|
||||
// undefined order.
|
||||
func FindPluginPaths(kind string, dirs []string) []string {
|
||||
// This is just a thin wrapper around findPluginPaths so that we can
|
||||
// use the latter in tests with a fake machineName so we can use our
|
||||
// test fixtures.
|
||||
return findPluginPaths(kind, dirs)
|
||||
}
|
||||
|
||||
func findPluginPaths(kind string, dirs []string) []string {
|
||||
prefix := "terraform-" + kind + "-"
|
||||
|
||||
ret := make([]string, 0, len(dirs))
|
||||
|
||||
for _, dir := range dirs {
|
||||
items, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
// Ignore missing dirs, non-dirs, etc
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] checking for %s in %q", kind, dir)
|
||||
|
||||
for _, item := range items {
|
||||
fullName := item.Name()
|
||||
|
||||
if !strings.HasPrefix(fullName, prefix) {
|
||||
log.Printf("[DEBUG] skipping %q, not a %s", fullName, kind)
|
||||
continue
|
||||
}
|
||||
|
||||
// New-style paths must have a version segment in filename
|
||||
if strings.Contains(strings.ToLower(fullName), "_v") {
|
||||
absPath, err := filepath.Abs(filepath.Join(dir, fullName))
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] plugin filepath error: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] found %s %q", kind, fullName)
|
||||
ret = append(ret, filepath.Clean(absPath))
|
||||
continue
|
||||
}
|
||||
|
||||
// Legacy style with files directly in the base directory
|
||||
absPath, err := filepath.Abs(filepath.Join(dir, fullName))
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] plugin filepath error: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("[WARNING] found legacy %s %q", kind, fullName)
|
||||
|
||||
ret = append(ret, filepath.Clean(absPath))
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// ResolvePluginPaths takes a list of paths to plugin executables (as returned
|
||||
// by e.g. FindPluginPaths) and produces a PluginMetaSet describing the
|
||||
// referenced plugins.
|
||||
//
|
||||
// If the same combination of plugin name and version appears multiple times,
|
||||
// the earlier reference will be preferred. Several different versions of
|
||||
// the same plugin name may be returned, in which case the methods of
|
||||
// PluginMetaSet can be used to filter down.
|
||||
func ResolvePluginPaths(paths []string) PluginMetaSet {
|
||||
s := make(PluginMetaSet)
|
||||
|
||||
type nameVersion struct {
|
||||
Name string
|
||||
Version string
|
||||
}
|
||||
found := make(map[nameVersion]struct{})
|
||||
|
||||
for _, path := range paths {
|
||||
baseName := strings.ToLower(filepath.Base(path))
|
||||
if !strings.HasPrefix(baseName, "terraform-") {
|
||||
// Should never happen with reasonable input
|
||||
continue
|
||||
}
|
||||
|
||||
baseName = baseName[10:]
|
||||
firstDash := strings.Index(baseName, "-")
|
||||
if firstDash == -1 {
|
||||
// Should never happen with reasonable input
|
||||
continue
|
||||
}
|
||||
|
||||
baseName = baseName[firstDash+1:]
|
||||
if baseName == "" {
|
||||
// Should never happen with reasonable input
|
||||
continue
|
||||
}
|
||||
|
||||
// Trim the .exe suffix used on Windows before we start wrangling
|
||||
// the remainder of the path.
|
||||
if strings.HasSuffix(baseName, ".exe") {
|
||||
baseName = baseName[:len(baseName)-4]
|
||||
}
|
||||
|
||||
parts := strings.SplitN(baseName, "_v", 2)
|
||||
name := parts[0]
|
||||
version := VersionZero
|
||||
if len(parts) == 2 {
|
||||
version = parts[1]
|
||||
}
|
||||
|
||||
// Auto-installed plugins contain an extra name portion representing
|
||||
// the expected plugin version, which we must trim off.
|
||||
if underX := strings.Index(version, "_x"); underX != -1 {
|
||||
version = version[:underX]
|
||||
}
|
||||
|
||||
if _, ok := found[nameVersion{name, version}]; ok {
|
||||
// Skip duplicate versions of the same plugin
|
||||
// (We do this during this step because after this we will be
|
||||
// dealing with sets and thus lose our ordering with which to
|
||||
// decide preference.)
|
||||
continue
|
||||
}
|
||||
|
||||
s.Add(PluginMeta{
|
||||
Name: name,
|
||||
Version: VersionStr(version),
|
||||
Path: path,
|
||||
})
|
||||
found[nameVersion{name, version}] = struct{}{}
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
|
@ -0,0 +1,424 @@
|
|||
package discovery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/html"
|
||||
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
getter "github.com/hashicorp/go-getter"
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
)
|
||||
|
||||
// Releases are located by parsing the html listing from releases.hashicorp.com.
|
||||
//
|
||||
// The URL for releases follows the pattern:
|
||||
// https://releases.hashicorp.com/terraform-provider-name/<x.y.z>/terraform-provider-name_<x.y.z>_<os>_<arch>.<ext>
|
||||
//
|
||||
// The plugin protocol version will be saved with the release and returned in
|
||||
// the header X-TERRAFORM_PROTOCOL_VERSION.
|
||||
|
||||
const protocolVersionHeader = "x-terraform-protocol-version"
|
||||
|
||||
var releaseHost = "https://releases.hashicorp.com"
|
||||
|
||||
var httpClient = cleanhttp.DefaultClient()
|
||||
|
||||
// An Installer maintains a local cache of plugins by downloading plugins
|
||||
// from an online repository.
|
||||
type Installer interface {
|
||||
Get(name string, req Constraints) (PluginMeta, error)
|
||||
PurgeUnused(used map[string]PluginMeta) (removed PluginMetaSet, err error)
|
||||
}
|
||||
|
||||
// ProviderInstaller is an Installer implementation that knows how to
|
||||
// download Terraform providers from the official HashiCorp releases service
|
||||
// into a local directory. The files downloaded are compliant with the
|
||||
// naming scheme expected by FindPlugins, so the target directory of a
|
||||
// provider installer can be used as one of several plugin discovery sources.
|
||||
type ProviderInstaller struct {
|
||||
Dir string
|
||||
|
||||
PluginProtocolVersion uint
|
||||
|
||||
// OS and Arch specify the OS and architecture that should be used when
|
||||
// installing plugins. These use the same labels as the runtime.GOOS and
|
||||
// runtime.GOARCH variables respectively, and indeed the values of these
|
||||
// are used as defaults if either of these is the empty string.
|
||||
OS string
|
||||
Arch string
|
||||
|
||||
// Skip checksum and signature verification
|
||||
SkipVerify bool
|
||||
}
|
||||
|
||||
// Get is part of an implementation of type Installer, and attempts to download
|
||||
// and install a Terraform provider matching the given constraints.
|
||||
//
|
||||
// This method may return one of a number of sentinel errors from this
|
||||
// package to indicate issues that are likely to be resolvable via user action:
|
||||
//
|
||||
// ErrorNoSuchProvider: no provider with the given name exists in the repository.
|
||||
// ErrorNoSuitableVersion: the provider exists but no available version matches constraints.
|
||||
// ErrorNoVersionCompatible: a plugin was found within the constraints but it is
|
||||
// incompatible with the current Terraform version.
|
||||
//
|
||||
// These errors should be recognized and handled as special cases by the caller
|
||||
// to present a suitable user-oriented error message.
|
||||
//
|
||||
// All other errors indicate an internal problem that is likely _not_ solvable
|
||||
// through user action, or at least not within Terraform's scope. Error messages
|
||||
// are produced under the assumption that if presented to the user they will
|
||||
// be presented alongside context about what is being installed, and thus the
|
||||
// error messages do not redundantly include such information.
|
||||
func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, error) {
|
||||
versions, err := i.listProviderVersions(provider)
|
||||
// TODO: return multiple errors
|
||||
if err != nil {
|
||||
return PluginMeta{}, err
|
||||
}
|
||||
|
||||
if len(versions) == 0 {
|
||||
return PluginMeta{}, ErrorNoSuitableVersion
|
||||
}
|
||||
|
||||
versions = allowedVersions(versions, req)
|
||||
if len(versions) == 0 {
|
||||
return PluginMeta{}, ErrorNoSuitableVersion
|
||||
}
|
||||
|
||||
// sort them newest to oldest
|
||||
Versions(versions).Sort()
|
||||
|
||||
// take the first matching plugin we find
|
||||
for _, v := range versions {
|
||||
url := i.providerURL(provider, v.String())
|
||||
|
||||
if !i.SkipVerify {
|
||||
sha256, err := i.getProviderChecksum(provider, v.String())
|
||||
if err != nil {
|
||||
return PluginMeta{}, err
|
||||
}
|
||||
|
||||
// add the checksum parameter for go-getter to verify the download for us.
|
||||
if sha256 != "" {
|
||||
url = url + "?checksum=sha256:" + sha256
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] fetching provider info for %s version %s", provider, v)
|
||||
if checkPlugin(url, i.PluginProtocolVersion) {
|
||||
log.Printf("[DEBUG] getting provider %q version %q at %s", provider, v, url)
|
||||
err := getter.Get(i.Dir, url)
|
||||
if err != nil {
|
||||
return PluginMeta{}, err
|
||||
}
|
||||
|
||||
// Find what we just installed
|
||||
// (This is weird, because go-getter doesn't directly return
|
||||
// information about what was extracted, and we just extracted
|
||||
// the archive directly into a shared dir here.)
|
||||
log.Printf("[DEBUG] looking for the %s %s plugin we just installed", provider, v)
|
||||
metas := FindPlugins("provider", []string{i.Dir})
|
||||
log.Printf("[DEBUG] all plugins found %#v", metas)
|
||||
metas, _ = metas.ValidateVersions()
|
||||
metas = metas.WithName(provider).WithVersion(v)
|
||||
log.Printf("[DEBUG] filtered plugins %#v", metas)
|
||||
if metas.Count() == 0 {
|
||||
// This should never happen. Suggests that the release archive
|
||||
// contains an executable file whose name doesn't match the
|
||||
// expected convention.
|
||||
return PluginMeta{}, fmt.Errorf(
|
||||
"failed to find installed plugin version %s; this is a bug in Terraform and should be reported",
|
||||
v,
|
||||
)
|
||||
}
|
||||
|
||||
if metas.Count() > 1 {
|
||||
// This should also never happen, and suggests that a
|
||||
// particular version was re-released with a different
|
||||
// executable filename. We consider releases as immutable, so
|
||||
// this is an error.
|
||||
return PluginMeta{}, fmt.Errorf(
|
||||
"multiple plugins installed for version %s; this is a bug in Terraform and should be reported",
|
||||
v,
|
||||
)
|
||||
}
|
||||
|
||||
// By now we know we have exactly one meta, and so "Newest" will
|
||||
// return that one.
|
||||
return metas.Newest(), nil
|
||||
}
|
||||
|
||||
log.Printf("[INFO] incompatible ProtocolVersion for %s version %s", provider, v)
|
||||
}
|
||||
|
||||
return PluginMeta{}, ErrorNoVersionCompatible
|
||||
}
|
||||
|
||||
func (i *ProviderInstaller) PurgeUnused(used map[string]PluginMeta) (PluginMetaSet, error) {
|
||||
purge := make(PluginMetaSet)
|
||||
|
||||
present := FindPlugins("provider", []string{i.Dir})
|
||||
for meta := range present {
|
||||
chosen, ok := used[meta.Name]
|
||||
if !ok {
|
||||
purge.Add(meta)
|
||||
}
|
||||
if chosen.Path != meta.Path {
|
||||
purge.Add(meta)
|
||||
}
|
||||
}
|
||||
|
||||
removed := make(PluginMetaSet)
|
||||
var errs error
|
||||
for meta := range purge {
|
||||
path := meta.Path
|
||||
err := os.Remove(path)
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, fmt.Errorf(
|
||||
"failed to remove unused provider plugin %s: %s",
|
||||
path, err,
|
||||
))
|
||||
} else {
|
||||
removed.Add(meta)
|
||||
}
|
||||
}
|
||||
|
||||
return removed, errs
|
||||
}
|
||||
|
||||
// Plugins are referred to by the short name, but all URLs and files will use
|
||||
// the full name prefixed with terraform-<plugin_type>-
|
||||
func (i *ProviderInstaller) providerName(name string) string {
|
||||
return "terraform-provider-" + name
|
||||
}
|
||||
|
||||
func (i *ProviderInstaller) providerFileName(name, version string) string {
|
||||
os := i.OS
|
||||
arch := i.Arch
|
||||
if os == "" {
|
||||
os = runtime.GOOS
|
||||
}
|
||||
if arch == "" {
|
||||
arch = runtime.GOARCH
|
||||
}
|
||||
return fmt.Sprintf("%s_%s_%s_%s.zip", i.providerName(name), version, os, arch)
|
||||
}
|
||||
|
||||
// providerVersionsURL returns the path to the released versions directory for the provider:
|
||||
// https://releases.hashicorp.com/terraform-provider-name/
|
||||
func (i *ProviderInstaller) providerVersionsURL(name string) string {
|
||||
return releaseHost + "/" + i.providerName(name) + "/"
|
||||
}
|
||||
|
||||
// providerURL returns the full path to the provider file, using the current OS
|
||||
// and ARCH:
|
||||
// .../terraform-provider-name_<x.y.z>/terraform-provider-name_<x.y.z>_<os>_<arch>.<ext>
|
||||
func (i *ProviderInstaller) providerURL(name, version string) string {
|
||||
return fmt.Sprintf("%s%s/%s", i.providerVersionsURL(name), version, i.providerFileName(name, version))
|
||||
}
|
||||
|
||||
func (i *ProviderInstaller) providerChecksumURL(name, version string) string {
|
||||
fileName := fmt.Sprintf("%s_%s_SHA256SUMS", i.providerName(name), version)
|
||||
u := fmt.Sprintf("%s%s/%s", i.providerVersionsURL(name), version, fileName)
|
||||
return u
|
||||
}
|
||||
|
||||
func (i *ProviderInstaller) getProviderChecksum(name, version string) (string, error) {
|
||||
checksums, err := getPluginSHA256SUMs(i.providerChecksumURL(name, version))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return checksumForFile(checksums, i.providerFileName(name, version)), nil
|
||||
}
|
||||
|
||||
// Return the plugin version by making a HEAD request to the provided url.
|
||||
// If the header is not present, we assume the latest version will be
|
||||
// compatible, and leave the check for discovery or execution.
|
||||
func checkPlugin(url string, pluginProtocolVersion uint) bool {
|
||||
resp, err := httpClient.Head(url)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] error fetching plugin headers: %s", err)
|
||||
return false
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
log.Println("[ERROR] non-200 status fetching plugin headers:", resp.Status)
|
||||
return false
|
||||
}
|
||||
|
||||
proto := resp.Header.Get(protocolVersionHeader)
|
||||
if proto == "" {
|
||||
// The header isn't present, but we don't make this error fatal since
|
||||
// the latest version will probably work.
|
||||
log.Printf("[WARNING] missing %s from: %s", protocolVersionHeader, url)
|
||||
return true
|
||||
}
|
||||
|
||||
protoVersion, err := strconv.Atoi(proto)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] invalid ProtocolVersion: %s", proto)
|
||||
return false
|
||||
}
|
||||
|
||||
return protoVersion == int(pluginProtocolVersion)
|
||||
}
|
||||
|
||||
// list the version available for the named plugin
|
||||
func (i *ProviderInstaller) listProviderVersions(name string) ([]Version, error) {
|
||||
versions, err := listPluginVersions(i.providerVersionsURL(name))
|
||||
if err != nil {
|
||||
// listPluginVersions returns a verbose error message indicating
|
||||
// what was being accessed and what failed
|
||||
return nil, err
|
||||
}
|
||||
return versions, nil
|
||||
}
|
||||
|
||||
var errVersionNotFound = errors.New("version not found")
|
||||
|
||||
// take the list of available versions for a plugin, and filter out those that
|
||||
// don't fit the constraints.
|
||||
func allowedVersions(available []Version, required Constraints) []Version {
|
||||
var allowed []Version
|
||||
|
||||
for _, v := range available {
|
||||
if required.Allows(v) {
|
||||
allowed = append(allowed, v)
|
||||
}
|
||||
}
|
||||
|
||||
return allowed
|
||||
}
|
||||
|
||||
// return a list of the plugin versions at the given URL
|
||||
func listPluginVersions(url string) ([]Version, error) {
|
||||
resp, err := httpClient.Get(url)
|
||||
if err != nil {
|
||||
// http library produces a verbose error message that includes the
|
||||
// URL being accessed, etc.
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
log.Printf("[ERROR] failed to fetch plugin versions from %s\n%s\n%s", url, resp.Status, body)
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusNotFound, http.StatusForbidden:
|
||||
// These are treated as indicative of the given name not being
|
||||
// a valid provider name at all.
|
||||
return nil, ErrorNoSuchProvider
|
||||
|
||||
default:
|
||||
// All other errors are assumed to be operational problems.
|
||||
return nil, fmt.Errorf("error accessing %s: %s", url, resp.Status)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
body, err := html.Parse(resp.Body)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
names := []string{}
|
||||
|
||||
// all we need to do is list links on the directory listing page that look like plugins
|
||||
var f func(*html.Node)
|
||||
f = func(n *html.Node) {
|
||||
if n.Type == html.ElementNode && n.Data == "a" {
|
||||
c := n.FirstChild
|
||||
if c != nil && c.Type == html.TextNode && strings.HasPrefix(c.Data, "terraform-") {
|
||||
names = append(names, c.Data)
|
||||
return
|
||||
}
|
||||
}
|
||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||
f(c)
|
||||
}
|
||||
}
|
||||
f(body)
|
||||
|
||||
return versionsFromNames(names), nil
|
||||
}
|
||||
|
||||
// parse the list of directory names into a sorted list of available versions
|
||||
func versionsFromNames(names []string) []Version {
|
||||
var versions []Version
|
||||
for _, name := range names {
|
||||
parts := strings.SplitN(name, "_", 2)
|
||||
if len(parts) == 2 && parts[1] != "" {
|
||||
v, err := VersionStr(parts[1]).Parse()
|
||||
if err != nil {
|
||||
// filter invalid versions scraped from the page
|
||||
log.Printf("[WARN] invalid version found for %q: %s", name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
versions = append(versions, v)
|
||||
}
|
||||
}
|
||||
|
||||
return versions
|
||||
}
|
||||
|
||||
func checksumForFile(sums []byte, name string) string {
|
||||
for _, line := range strings.Split(string(sums), "\n") {
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) > 1 && parts[1] == name {
|
||||
return parts[0]
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// fetch the SHA256SUMS file provided, and verify its signature.
|
||||
func getPluginSHA256SUMs(sumsURL string) ([]byte, error) {
|
||||
sigURL := sumsURL + ".sig"
|
||||
|
||||
sums, err := getFile(sumsURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching checksums: %s", err)
|
||||
}
|
||||
|
||||
sig, err := getFile(sigURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching checksums signature: %s", err)
|
||||
}
|
||||
|
||||
if err := verifySig(sums, sig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sums, nil
|
||||
}
|
||||
|
||||
func getFile(url string) ([]byte, error) {
|
||||
resp, err := httpClient.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("%s", resp.Status)
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return data, err
|
||||
}
|
||||
return data, nil
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
package discovery
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// PluginMeta is metadata about a plugin, useful for launching the plugin
|
||||
// and for understanding which plugins are available.
|
||||
type PluginMeta struct {
|
||||
// Name is the name of the plugin, e.g. as inferred from the plugin
|
||||
// binary's filename, or by explicit configuration.
|
||||
Name string
|
||||
|
||||
// Version is the semver version of the plugin, expressed as a string
|
||||
// that might not be semver-valid.
|
||||
Version VersionStr
|
||||
|
||||
// Path is the absolute path of the executable that can be launched
|
||||
// to provide the RPC server for this plugin.
|
||||
Path string
|
||||
}
|
||||
|
||||
// SHA256 returns a SHA256 hash of the content of the referenced executable
|
||||
// file, or an error if the file's contents cannot be read.
|
||||
func (m PluginMeta) SHA256() ([]byte, error) {
|
||||
f, err := os.Open(m.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
h := sha256.New()
|
||||
_, err = io.Copy(h, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return h.Sum(nil), nil
|
||||
}
|
195
vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go
generated
vendored
Normal file
195
vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go
generated
vendored
Normal file
|
@ -0,0 +1,195 @@
|
|||
package discovery
|
||||
|
||||
// A PluginMetaSet is a set of PluginMeta objects meeting a certain criteria.
|
||||
//
|
||||
// Methods on this type allow filtering of the set to produce subsets that
|
||||
// meet more restrictive criteria.
|
||||
type PluginMetaSet map[PluginMeta]struct{}
|
||||
|
||||
// Add inserts the given PluginMeta into the receiving set. This is a no-op
|
||||
// if the given meta is already present.
|
||||
func (s PluginMetaSet) Add(p PluginMeta) {
|
||||
s[p] = struct{}{}
|
||||
}
|
||||
|
||||
// Remove removes the given PluginMeta from the receiving set. This is a no-op
|
||||
// if the given meta is not already present.
|
||||
func (s PluginMetaSet) Remove(p PluginMeta) {
|
||||
delete(s, p)
|
||||
}
|
||||
|
||||
// Has returns true if the given meta is in the receiving set, or false
|
||||
// otherwise.
|
||||
func (s PluginMetaSet) Has(p PluginMeta) bool {
|
||||
_, ok := s[p]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Count returns the number of metas in the set
|
||||
func (s PluginMetaSet) Count() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// ValidateVersions returns two new PluginMetaSets, separating those with
|
||||
// versions that have syntax-valid semver versions from those that don't.
|
||||
//
|
||||
// Eliminating invalid versions from consideration (and possibly warning about
|
||||
// them) is usually the first step of working with a meta set after discovery
|
||||
// has completed.
|
||||
func (s PluginMetaSet) ValidateVersions() (valid, invalid PluginMetaSet) {
|
||||
valid = make(PluginMetaSet)
|
||||
invalid = make(PluginMetaSet)
|
||||
for p := range s {
|
||||
if _, err := p.Version.Parse(); err == nil {
|
||||
valid.Add(p)
|
||||
} else {
|
||||
invalid.Add(p)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// WithName returns the subset of metas that have the given name.
|
||||
func (s PluginMetaSet) WithName(name string) PluginMetaSet {
|
||||
ns := make(PluginMetaSet)
|
||||
for p := range s {
|
||||
if p.Name == name {
|
||||
ns.Add(p)
|
||||
}
|
||||
}
|
||||
return ns
|
||||
}
|
||||
|
||||
// WithVersion returns the subset of metas that have the given version.
|
||||
//
|
||||
// This should be used only with the "valid" result from ValidateVersions;
|
||||
// it will ignore any plugin metas that have a invalid version strings.
|
||||
func (s PluginMetaSet) WithVersion(version Version) PluginMetaSet {
|
||||
ns := make(PluginMetaSet)
|
||||
for p := range s {
|
||||
gotVersion, err := p.Version.Parse()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if gotVersion.Equal(version) {
|
||||
ns.Add(p)
|
||||
}
|
||||
}
|
||||
return ns
|
||||
}
|
||||
|
||||
// ByName groups the metas in the set by their Names, returning a map.
|
||||
func (s PluginMetaSet) ByName() map[string]PluginMetaSet {
|
||||
ret := make(map[string]PluginMetaSet)
|
||||
for p := range s {
|
||||
if _, ok := ret[p.Name]; !ok {
|
||||
ret[p.Name] = make(PluginMetaSet)
|
||||
}
|
||||
ret[p.Name].Add(p)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Newest returns the one item from the set that has the newest Version value.
|
||||
//
|
||||
// The result is meaningful only if the set is already filtered such that
|
||||
// all of the metas have the same Name.
|
||||
//
|
||||
// If there isn't at least one meta in the set then this function will panic.
|
||||
// Use Count() to ensure that there is at least one value before calling.
|
||||
//
|
||||
// If any of the metas have invalid version strings then this function will
|
||||
// panic. Use ValidateVersions() first to filter out metas with invalid
|
||||
// versions.
|
||||
//
|
||||
// If two metas have the same Version then one is arbitrarily chosen. This
|
||||
// situation should be avoided by pre-filtering the set.
|
||||
func (s PluginMetaSet) Newest() PluginMeta {
|
||||
if len(s) == 0 {
|
||||
panic("can't call NewestStable on empty PluginMetaSet")
|
||||
}
|
||||
|
||||
var first = true
|
||||
var winner PluginMeta
|
||||
var winnerVersion Version
|
||||
for p := range s {
|
||||
version, err := p.Version.Parse()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if first == true || version.NewerThan(winnerVersion) {
|
||||
winner = p
|
||||
winnerVersion = version
|
||||
first = false
|
||||
}
|
||||
}
|
||||
|
||||
return winner
|
||||
}
|
||||
|
||||
// ConstrainVersions takes a set of requirements and attempts to
|
||||
// return a map from name to a set of metas that have the matching
|
||||
// name and an appropriate version.
|
||||
//
|
||||
// If any of the given requirements match *no* plugins then its PluginMetaSet
|
||||
// in the returned map will be empty.
|
||||
//
|
||||
// All viable metas are returned, so the caller can apply any desired filtering
|
||||
// to reduce down to a single option. For example, calling Newest() to obtain
|
||||
// the highest available version.
|
||||
//
|
||||
// If any of the metas in the set have invalid version strings then this
|
||||
// function will panic. Use ValidateVersions() first to filter out metas with
|
||||
// invalid versions.
|
||||
func (s PluginMetaSet) ConstrainVersions(reqd PluginRequirements) map[string]PluginMetaSet {
|
||||
ret := make(map[string]PluginMetaSet)
|
||||
for p := range s {
|
||||
name := p.Name
|
||||
allowedVersions, ok := reqd[name]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if _, ok := ret[p.Name]; !ok {
|
||||
ret[p.Name] = make(PluginMetaSet)
|
||||
}
|
||||
version, err := p.Version.Parse()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if allowedVersions.Allows(version) {
|
||||
ret[p.Name].Add(p)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// OverridePaths returns a new set where any existing plugins with the given
|
||||
// names are removed and replaced with the single path given in the map.
|
||||
//
|
||||
// This is here only to continue to support the legacy way of overriding
|
||||
// plugin binaries in the .terraformrc file. It treats all given plugins
|
||||
// as pre-versioning (version 0.0.0). This mechanism will eventually be
|
||||
// phased out, with vendor directories being the intended replacement.
|
||||
func (s PluginMetaSet) OverridePaths(paths map[string]string) PluginMetaSet {
|
||||
ret := make(PluginMetaSet)
|
||||
for p := range s {
|
||||
if _, ok := paths[p.Name]; ok {
|
||||
// Skip plugins that we're overridding
|
||||
continue
|
||||
}
|
||||
|
||||
ret.Add(p)
|
||||
}
|
||||
|
||||
// Now add the metadata for overriding plugins
|
||||
for name, path := range paths {
|
||||
ret.Add(PluginMeta{
|
||||
Name: name,
|
||||
Version: VersionZero,
|
||||
Path: path,
|
||||
})
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
105
vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go
generated
vendored
Normal file
105
vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go
generated
vendored
Normal file
|
@ -0,0 +1,105 @@
|
|||
package discovery
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
// PluginRequirements describes a set of plugins (assumed to be of a consistent
|
||||
// kind) that are required to exist and have versions within the given
|
||||
// corresponding sets.
|
||||
type PluginRequirements map[string]*PluginConstraints
|
||||
|
||||
// PluginConstraints represents an element of PluginRequirements describing
|
||||
// the constraints for a single plugin.
|
||||
type PluginConstraints struct {
|
||||
// Specifies that the plugin's version must be within the given
|
||||
// constraints.
|
||||
Versions Constraints
|
||||
|
||||
// If non-nil, the hash of the on-disk plugin executable must exactly
|
||||
// match the SHA256 hash given here.
|
||||
SHA256 []byte
|
||||
}
|
||||
|
||||
// Allows returns true if the given version is within the receiver's version
|
||||
// constraints.
|
||||
func (s *PluginConstraints) Allows(v Version) bool {
|
||||
return s.Versions.Allows(v)
|
||||
}
|
||||
|
||||
// AcceptsSHA256 returns true if the given executable SHA256 hash is acceptable,
|
||||
// either because it matches the constraint or because there is no such
|
||||
// constraint.
|
||||
func (s *PluginConstraints) AcceptsSHA256(digest []byte) bool {
|
||||
if s.SHA256 == nil {
|
||||
return true
|
||||
}
|
||||
return bytes.Equal(s.SHA256, digest)
|
||||
}
|
||||
|
||||
// Merge takes the contents of the receiver and the other given requirements
|
||||
// object and merges them together into a single requirements structure
|
||||
// that satisfies both sets of requirements.
|
||||
//
|
||||
// Note that it doesn't make sense to merge two PluginRequirements with
|
||||
// differing required plugin SHA256 hashes, since the result will never
|
||||
// match any plugin.
|
||||
func (r PluginRequirements) Merge(other PluginRequirements) PluginRequirements {
|
||||
ret := make(PluginRequirements)
|
||||
for n, c := range r {
|
||||
ret[n] = &PluginConstraints{
|
||||
Versions: Constraints{}.Append(c.Versions),
|
||||
SHA256: c.SHA256,
|
||||
}
|
||||
}
|
||||
for n, c := range other {
|
||||
if existing, exists := ret[n]; exists {
|
||||
ret[n].Versions = ret[n].Versions.Append(c.Versions)
|
||||
|
||||
if existing.SHA256 != nil {
|
||||
if c.SHA256 != nil && !bytes.Equal(c.SHA256, existing.SHA256) {
|
||||
// If we've been asked to merge two constraints with
|
||||
// different SHA256 hashes then we'll produce a dummy value
|
||||
// that can never match anything. This is a silly edge case
|
||||
// that no reasonable caller should hit.
|
||||
ret[n].SHA256 = []byte(invalidProviderHash)
|
||||
}
|
||||
} else {
|
||||
ret[n].SHA256 = c.SHA256 // might still be nil
|
||||
}
|
||||
} else {
|
||||
ret[n] = &PluginConstraints{
|
||||
Versions: Constraints{}.Append(c.Versions),
|
||||
SHA256: c.SHA256,
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// LockExecutables applies additional constraints to the receiver that
|
||||
// require plugin executables with specific SHA256 digests. This modifies
|
||||
// the receiver in-place, since it's intended to be applied after
|
||||
// version constraints have been resolved.
|
||||
//
|
||||
// The given map must include a key for every plugin that is already
|
||||
// required. If not, any missing keys will cause the corresponding plugin
|
||||
// to never match, though the direct caller doesn't necessarily need to
|
||||
// guarantee this as long as the downstream code _applying_ these constraints
|
||||
// is able to deal with the non-match in some way.
|
||||
func (r PluginRequirements) LockExecutables(sha256s map[string][]byte) {
|
||||
for name, cons := range r {
|
||||
digest := sha256s[name]
|
||||
|
||||
if digest == nil {
|
||||
// Prevent any match, which will then presumably cause the
|
||||
// downstream consumer of this requirements to report an error.
|
||||
cons.SHA256 = []byte(invalidProviderHash)
|
||||
continue
|
||||
}
|
||||
|
||||
cons.SHA256 = digest
|
||||
}
|
||||
}
|
||||
|
||||
const invalidProviderHash = "<invalid>"
|
53
vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go
generated
vendored
Normal file
53
vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
package discovery
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/openpgp"
|
||||
)
|
||||
|
||||
// Verify the data using the provided openpgp detached signature and the
|
||||
// embedded hashicorp public key.
|
||||
func verifySig(data, sig []byte) error {
|
||||
el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(hashiPublicKey))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = openpgp.CheckDetachedSignature(el, bytes.NewReader(data), bytes.NewReader(sig))
|
||||
return err
|
||||
}
|
||||
|
||||
// this is the public key that signs the checksums file for releases.
|
||||
const hashiPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: GnuPG v1
|
||||
|
||||
mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f
|
||||
W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq
|
||||
fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA
|
||||
3drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca
|
||||
KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k
|
||||
SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1
|
||||
cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JATgEEwECACIFAlMORM0CGwMG
|
||||
CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFGFLYc0j/xMyWIIAIPhcVqiQ59n
|
||||
Jc07gjUX0SWBJAxEG1lKxfzS4Xp+57h2xxTpdotGQ1fZwsihaIqow337YHQI3q0i
|
||||
SqV534Ms+j/tU7X8sq11xFJIeEVG8PASRCwmryUwghFKPlHETQ8jJ+Y8+1asRydi
|
||||
psP3B/5Mjhqv/uOK+Vy3zAyIpyDOMtIpOVfjSpCplVRdtSTFWBu9Em7j5I2HMn1w
|
||||
sJZnJgXKpybpibGiiTtmnFLOwibmprSu04rsnP4ncdC2XRD4wIjoyA+4PKgX3sCO
|
||||
klEzKryWYBmLkJOMDdo52LttP3279s7XrkLEE7ia0fXa2c12EQ0f0DQ1tGUvyVEW
|
||||
WmJVccm5bq25AQ0EUw5EzQEIANaPUY04/g7AmYkOMjaCZ6iTp9hB5Rsj/4ee/ln9
|
||||
wArzRO9+3eejLWh53FoN1rO+su7tiXJA5YAzVy6tuolrqjM8DBztPxdLBbEi4V+j
|
||||
2tK0dATdBQBHEh3OJApO2UBtcjaZBT31zrG9K55D+CrcgIVEHAKY8Cb4kLBkb5wM
|
||||
skn+DrASKU0BNIV1qRsxfiUdQHZfSqtp004nrql1lbFMLFEuiY8FZrkkQ9qduixo
|
||||
mTT6f34/oiY+Jam3zCK7RDN/OjuWheIPGj/Qbx9JuNiwgX6yRj7OE1tjUx6d8g9y
|
||||
0H1fmLJbb3WZZbuuGFnK6qrE3bGeY8+AWaJAZ37wpWh1p0cAEQEAAYkBHwQYAQIA
|
||||
CQUCUw5EzQIbDAAKCRBRhS2HNI/8TJntCAClU7TOO/X053eKF1jqNW4A1qpxctVc
|
||||
z8eTcY8Om5O4f6a/rfxfNFKn9Qyja/OG1xWNobETy7MiMXYjaa8uUx5iFy6kMVaP
|
||||
0BXJ59NLZjMARGw6lVTYDTIvzqqqwLxgliSDfSnqUhubGwvykANPO+93BBx89MRG
|
||||
unNoYGXtPlhNFrAsB1VR8+EyKLv2HQtGCPSFBhrjuzH3gxGibNDDdFQLxxuJWepJ
|
||||
EK1UbTS4ms0NgZ2Uknqn1WRU1Ki7rE4sTy68iZtWpKQXZEJa0IGnuI2sSINGcXCJ
|
||||
oEIgXTMyCILo34Fa/C6VCm2WBgz9zZO8/rHIiQm1J5zqz0DrDwKBUM9C
|
||||
=LYpS
|
||||
-----END PGP PUBLIC KEY BLOCK-----`
|
|
@ -0,0 +1,72 @@
|
|||
package discovery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
version "github.com/hashicorp/go-version"
|
||||
)
|
||||
|
||||
const VersionZero = "0.0.0"
|
||||
|
||||
// A VersionStr is a string containing a possibly-invalid representation
|
||||
// of a semver version number. Call Parse on it to obtain a real Version
|
||||
// object, or discover that it is invalid.
|
||||
type VersionStr string
|
||||
|
||||
// Parse transforms a VersionStr into a Version if it is
|
||||
// syntactically valid. If it isn't then an error is returned instead.
|
||||
func (s VersionStr) Parse() (Version, error) {
|
||||
raw, err := version.NewVersion(string(s))
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
return Version{raw}, nil
|
||||
}
|
||||
|
||||
// MustParse transforms a VersionStr into a Version if it is
|
||||
// syntactically valid. If it isn't then it panics.
|
||||
func (s VersionStr) MustParse() Version {
|
||||
ret, err := s.Parse()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Version represents a version number that has been parsed from
|
||||
// a semver string and known to be valid.
|
||||
type Version struct {
|
||||
// We wrap this here just because it avoids a proliferation of
|
||||
// direct go-version imports all over the place, and keeps the
|
||||
// version-processing details within this package.
|
||||
raw *version.Version
|
||||
}
|
||||
|
||||
func (v Version) String() string {
|
||||
return v.raw.String()
|
||||
}
|
||||
|
||||
func (v Version) NewerThan(other Version) bool {
|
||||
return v.raw.GreaterThan(other.raw)
|
||||
}
|
||||
|
||||
func (v Version) Equal(other Version) bool {
|
||||
return v.raw.Equal(other.raw)
|
||||
}
|
||||
|
||||
// MinorUpgradeConstraintStr returns a ConstraintStr that would permit
|
||||
// minor upgrades relative to the receiving version.
|
||||
func (v Version) MinorUpgradeConstraintStr() ConstraintStr {
|
||||
segments := v.raw.Segments()
|
||||
return ConstraintStr(fmt.Sprintf("~> %d.%d", segments[0], segments[1]))
|
||||
}
|
||||
|
||||
type Versions []Version
|
||||
|
||||
// Sort sorts version from newest to oldest.
|
||||
func (v Versions) Sort() {
|
||||
sort.Slice(v, func(i, j int) bool {
|
||||
return v[i].NewerThan(v[j])
|
||||
})
|
||||
}
|
84
vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go
generated
vendored
Normal file
84
vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go
generated
vendored
Normal file
|
@ -0,0 +1,84 @@
|
|||
package discovery
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
version "github.com/hashicorp/go-version"
|
||||
)
|
||||
|
||||
// A ConstraintStr is a string containing a possibly-invalid representation
|
||||
// of a version constraint provided in configuration. Call Parse on it to
|
||||
// obtain a real Constraint object, or discover that it is invalid.
|
||||
type ConstraintStr string
|
||||
|
||||
// Parse transforms a ConstraintStr into a Constraints if it is
|
||||
// syntactically valid. If it isn't then an error is returned instead.
|
||||
func (s ConstraintStr) Parse() (Constraints, error) {
|
||||
raw, err := version.NewConstraint(string(s))
|
||||
if err != nil {
|
||||
return Constraints{}, err
|
||||
}
|
||||
return Constraints{raw}, nil
|
||||
}
|
||||
|
||||
// MustParse is like Parse but it panics if the constraint string is invalid.
|
||||
func (s ConstraintStr) MustParse() Constraints {
|
||||
ret, err := s.Parse()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Constraints represents a set of versions which any given Version is either
|
||||
// a member of or not.
|
||||
type Constraints struct {
|
||||
raw version.Constraints
|
||||
}
|
||||
|
||||
// AllVersions is a Constraints containing all versions
|
||||
var AllVersions Constraints
|
||||
|
||||
func init() {
|
||||
AllVersions = Constraints{
|
||||
raw: make(version.Constraints, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Allows returns true if the given version permitted by the receiving
|
||||
// constraints set.
|
||||
func (s Constraints) Allows(v Version) bool {
|
||||
return s.raw.Check(v.raw)
|
||||
}
|
||||
|
||||
// Append combines the receiving set with the given other set to produce
|
||||
// a set that is the intersection of both sets, which is to say that resulting
|
||||
// constraints contain only the versions that are members of both.
|
||||
func (s Constraints) Append(other Constraints) Constraints {
|
||||
raw := make(version.Constraints, 0, len(s.raw)+len(other.raw))
|
||||
|
||||
// Since "raw" is a list of constraints that remove versions from the set,
|
||||
// "Intersection" is implemented by concatenating together those lists,
|
||||
// thus leaving behind only the versions not removed by either list.
|
||||
raw = append(raw, s.raw...)
|
||||
raw = append(raw, other.raw...)
|
||||
|
||||
// while the set is unordered, we sort these lexically for consistent output
|
||||
sort.Slice(raw, func(i, j int) bool {
|
||||
return raw[i].String() < raw[j].String()
|
||||
})
|
||||
|
||||
return Constraints{raw}
|
||||
}
|
||||
|
||||
// String returns a string representation of the set members as a set
|
||||
// of range constraints.
|
||||
func (s Constraints) String() string {
|
||||
return s.raw.String()
|
||||
}
|
||||
|
||||
// Unconstrained returns true if and only if the receiver is an empty
|
||||
// constraint set.
|
||||
func (s Constraints) Unconstrained() bool {
|
||||
return len(s.raw) == 0
|
||||
}
|
|
@ -32,8 +32,18 @@ func (s *InmemState) WriteState(state *terraform.State) error {
|
|||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
state.IncrementSerialMaybe(s.state)
|
||||
state = state.DeepCopy()
|
||||
|
||||
if s.state != nil {
|
||||
state.Serial = s.state.Serial
|
||||
|
||||
if !s.state.MarshalEqual(state) {
|
||||
state.Serial++
|
||||
}
|
||||
}
|
||||
|
||||
s.state = state
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -48,8 +48,8 @@ func (s *LocalState) SetState(state *terraform.State) {
|
|||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.state = state
|
||||
s.readState = state
|
||||
s.state = state.DeepCopy()
|
||||
s.readState = state.DeepCopy()
|
||||
}
|
||||
|
||||
// StateReader impl.
|
||||
|
@ -74,7 +74,14 @@ func (s *LocalState) WriteState(state *terraform.State) error {
|
|||
}
|
||||
defer s.stateFileOut.Sync()
|
||||
|
||||
s.state = state
|
||||
s.state = state.DeepCopy() // don't want mutations before we actually get this written to disk
|
||||
|
||||
if s.readState != nil && s.state != nil {
|
||||
// We don't trust callers to properly manage serials. Instead, we assume
|
||||
// that a WriteState is always for the next version after what was
|
||||
// most recently read.
|
||||
s.state.Serial = s.readState.Serial
|
||||
}
|
||||
|
||||
if _, err := s.stateFileOut.Seek(0, os.SEEK_SET); err != nil {
|
||||
return err
|
||||
|
@ -88,8 +95,9 @@ func (s *LocalState) WriteState(state *terraform.State) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
s.state.IncrementSerialMaybe(s.readState)
|
||||
s.readState = s.state
|
||||
if !s.state.MarshalEqual(s.readState) {
|
||||
s.state.Serial++
|
||||
}
|
||||
|
||||
if err := terraform.WriteState(s.state, s.stateFileOut); err != nil {
|
||||
return err
|
||||
|
@ -147,7 +155,7 @@ func (s *LocalState) RefreshState() error {
|
|||
}
|
||||
|
||||
s.state = state
|
||||
s.readState = state
|
||||
s.readState = s.state.DeepCopy()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -51,6 +51,5 @@ var BuiltinClients = map[string]Factory{
|
|||
"gcs": gcsFactory,
|
||||
"http": httpFactory,
|
||||
"local": fileFactory,
|
||||
"swift": swiftFactory,
|
||||
"manta": mantaFactory,
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package remote
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/terraform/state"
|
||||
|
@ -33,7 +34,28 @@ func (s *State) WriteState(state *terraform.State) error {
|
|||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.state = state
|
||||
if s.readState != nil && !state.SameLineage(s.readState) {
|
||||
return fmt.Errorf("incompatible state lineage; given %s but want %s", state.Lineage, s.readState.Lineage)
|
||||
}
|
||||
|
||||
// We create a deep copy of the state here, because the caller also has
|
||||
// a reference to the given object and can potentially go on to mutate
|
||||
// it after we return, but we want the snapshot at this point in time.
|
||||
s.state = state.DeepCopy()
|
||||
|
||||
// Force our new state to have the same serial as our read state. We'll
|
||||
// update this if PersistState is called later. (We don't require nor trust
|
||||
// the caller to properly maintain serial for transient state objects since
|
||||
// the rest of Terraform treats state as an openly mutable object.)
|
||||
//
|
||||
// If we have no read state then we assume we're either writing a new
|
||||
// state for the first time or we're migrating a state from elsewhere,
|
||||
// and in both cases we wish to retain the lineage and serial from
|
||||
// the given state.
|
||||
if s.readState != nil {
|
||||
s.state.Serial = s.readState.Serial
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -58,7 +80,7 @@ func (s *State) RefreshState() error {
|
|||
}
|
||||
|
||||
s.state = state
|
||||
s.readState = state
|
||||
s.readState = s.state.DeepCopy() // our states must be separate instances so we can track changes
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -67,14 +89,28 @@ func (s *State) PersistState() error {
|
|||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.state.IncrementSerialMaybe(s.readState)
|
||||
if !s.state.MarshalEqual(s.readState) {
|
||||
// Our new state does not marshal as byte-for-byte identical to
|
||||
// the old, so we need to increment the serial.
|
||||
// Note that in WriteState we force the serial to match that of
|
||||
// s.readState, if we have a readState.
|
||||
s.state.Serial++
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := terraform.WriteState(s.state, &buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.Client.Put(buf.Bytes())
|
||||
err := s.Client.Put(buf.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// After we've successfully persisted, what we just wrote is our new
|
||||
// reference state until someone calls RefreshState again.
|
||||
s.readState = s.state.DeepCopy()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Lock calls the Client's Lock method if it's implemented.
|
||||
|
|
|
@ -1,362 +0,0 @@
|
|||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack"
|
||||
"github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers"
|
||||
"github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects"
|
||||
tf_openstack "github.com/hashicorp/terraform/builtin/providers/openstack"
|
||||
)
|
||||
|
||||
const TFSTATE_NAME = "tfstate.tf"
|
||||
|
||||
// SwiftClient implements the Client interface for an Openstack Swift server.
|
||||
type SwiftClient struct {
|
||||
client *gophercloud.ServiceClient
|
||||
authurl string
|
||||
cacert string
|
||||
cert string
|
||||
domainid string
|
||||
domainname string
|
||||
insecure bool
|
||||
key string
|
||||
password string
|
||||
path string
|
||||
region string
|
||||
tenantid string
|
||||
tenantname string
|
||||
userid string
|
||||
username string
|
||||
token string
|
||||
archive bool
|
||||
archivepath string
|
||||
expireSecs int
|
||||
}
|
||||
|
||||
func swiftFactory(conf map[string]string) (Client, error) {
|
||||
client := &SwiftClient{}
|
||||
|
||||
if err := client.validateConfig(conf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (c *SwiftClient) validateConfig(conf map[string]string) (err error) {
|
||||
authUrl, ok := conf["auth_url"]
|
||||
if !ok {
|
||||
authUrl = os.Getenv("OS_AUTH_URL")
|
||||
if authUrl == "" {
|
||||
return fmt.Errorf("missing 'auth_url' configuration or OS_AUTH_URL environment variable")
|
||||
}
|
||||
}
|
||||
c.authurl = authUrl
|
||||
|
||||
username, ok := conf["user_name"]
|
||||
if !ok {
|
||||
username = os.Getenv("OS_USERNAME")
|
||||
}
|
||||
c.username = username
|
||||
|
||||
userID, ok := conf["user_id"]
|
||||
if !ok {
|
||||
userID = os.Getenv("OS_USER_ID")
|
||||
}
|
||||
c.userid = userID
|
||||
|
||||
token, ok := conf["token"]
|
||||
if !ok {
|
||||
token = os.Getenv("OS_AUTH_TOKEN")
|
||||
}
|
||||
c.token = token
|
||||
|
||||
password, ok := conf["password"]
|
||||
if !ok {
|
||||
password = os.Getenv("OS_PASSWORD")
|
||||
|
||||
}
|
||||
c.password = password
|
||||
if password == "" && token == "" {
|
||||
return fmt.Errorf("missing either password or token configuration or OS_PASSWORD or OS_AUTH_TOKEN environment variable")
|
||||
}
|
||||
|
||||
region, ok := conf["region_name"]
|
||||
if !ok {
|
||||
region = os.Getenv("OS_REGION_NAME")
|
||||
}
|
||||
c.region = region
|
||||
|
||||
tenantID, ok := conf["tenant_id"]
|
||||
if !ok {
|
||||
tenantID = multiEnv([]string{
|
||||
"OS_TENANT_ID",
|
||||
"OS_PROJECT_ID",
|
||||
})
|
||||
}
|
||||
c.tenantid = tenantID
|
||||
|
||||
tenantName, ok := conf["tenant_name"]
|
||||
if !ok {
|
||||
tenantName = multiEnv([]string{
|
||||
"OS_TENANT_NAME",
|
||||
"OS_PROJECT_NAME",
|
||||
})
|
||||
}
|
||||
c.tenantname = tenantName
|
||||
|
||||
domainID, ok := conf["domain_id"]
|
||||
if !ok {
|
||||
domainID = multiEnv([]string{
|
||||
"OS_USER_DOMAIN_ID",
|
||||
"OS_PROJECT_DOMAIN_ID",
|
||||
"OS_DOMAIN_ID",
|
||||
})
|
||||
}
|
||||
c.domainid = domainID
|
||||
|
||||
domainName, ok := conf["domain_name"]
|
||||
if !ok {
|
||||
domainName = multiEnv([]string{
|
||||
"OS_USER_DOMAIN_NAME",
|
||||
"OS_PROJECT_DOMAIN_NAME",
|
||||
"OS_DOMAIN_NAME",
|
||||
"DEFAULT_DOMAIN",
|
||||
})
|
||||
}
|
||||
c.domainname = domainName
|
||||
|
||||
path, ok := conf["path"]
|
||||
if !ok || path == "" {
|
||||
return fmt.Errorf("missing 'path' configuration")
|
||||
}
|
||||
c.path = path
|
||||
|
||||
if archivepath, ok := conf["archive_path"]; ok {
|
||||
log.Printf("[DEBUG] Archivepath set, enabling object versioning")
|
||||
c.archive = true
|
||||
c.archivepath = archivepath
|
||||
}
|
||||
|
||||
if expire, ok := conf["expire_after"]; ok {
|
||||
log.Printf("[DEBUG] Requested that remote state expires after %s", expire)
|
||||
|
||||
if strings.HasSuffix(expire, "d") {
|
||||
log.Printf("[DEBUG] Got a days expire after duration. Converting to hours")
|
||||
days, err := strconv.Atoi(expire[:len(expire)-1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error converting expire_after value %s to int: %s", expire, err)
|
||||
}
|
||||
|
||||
expire = fmt.Sprintf("%dh", days*24)
|
||||
log.Printf("[DEBUG] Expire after %s hours", expire)
|
||||
}
|
||||
|
||||
expireDur, err := time.ParseDuration(expire)
|
||||
if err != nil {
|
||||
log.Printf("[DEBUG] Error parsing duration %s: %s", expire, err)
|
||||
return fmt.Errorf("Error parsing expire_after duration '%s': %s", expire, err)
|
||||
}
|
||||
log.Printf("[DEBUG] Seconds duration = %d", int(expireDur.Seconds()))
|
||||
c.expireSecs = int(expireDur.Seconds())
|
||||
}
|
||||
|
||||
c.insecure = false
|
||||
raw, ok := conf["insecure"]
|
||||
if !ok {
|
||||
raw = os.Getenv("OS_INSECURE")
|
||||
}
|
||||
if raw != "" {
|
||||
v, err := strconv.ParseBool(raw)
|
||||
if err != nil {
|
||||
return fmt.Errorf("'insecure' and 'OS_INSECURE' could not be parsed as bool: %s", err)
|
||||
}
|
||||
c.insecure = v
|
||||
}
|
||||
|
||||
cacertFile, ok := conf["cacert_file"]
|
||||
if !ok {
|
||||
cacertFile = os.Getenv("OS_CACERT")
|
||||
}
|
||||
c.cacert = cacertFile
|
||||
|
||||
cert, ok := conf["cert"]
|
||||
if !ok {
|
||||
cert = os.Getenv("OS_CERT")
|
||||
}
|
||||
c.cert = cert
|
||||
|
||||
key, ok := conf["key"]
|
||||
if !ok {
|
||||
key = os.Getenv("OS_KEY")
|
||||
}
|
||||
c.key = key
|
||||
|
||||
ao := gophercloud.AuthOptions{
|
||||
IdentityEndpoint: c.authurl,
|
||||
UserID: c.userid,
|
||||
Username: c.username,
|
||||
TenantID: c.tenantid,
|
||||
TenantName: c.tenantname,
|
||||
Password: c.password,
|
||||
TokenID: c.token,
|
||||
DomainID: c.domainid,
|
||||
DomainName: c.domainname,
|
||||
}
|
||||
|
||||
provider, err := openstack.NewClient(ao.IdentityEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config := &tls.Config{}
|
||||
|
||||
if c.cacert != "" {
|
||||
caCert, err := ioutil.ReadFile(c.cacert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
config.RootCAs = caCertPool
|
||||
}
|
||||
|
||||
if c.insecure {
|
||||
log.Printf("[DEBUG] Insecure mode set")
|
||||
config.InsecureSkipVerify = true
|
||||
}
|
||||
|
||||
if c.cert != "" && c.key != "" {
|
||||
cert, err := tls.LoadX509KeyPair(c.cert, c.key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config.Certificates = []tls.Certificate{cert}
|
||||
config.BuildNameToCertificate()
|
||||
}
|
||||
|
||||
// if OS_DEBUG is set, log the requests and responses
|
||||
var osDebug bool
|
||||
if os.Getenv("OS_DEBUG") != "" {
|
||||
osDebug = true
|
||||
}
|
||||
|
||||
transport := &http.Transport{Proxy: http.ProxyFromEnvironment, TLSClientConfig: config}
|
||||
provider.HTTPClient = http.Client{
|
||||
Transport: &tf_openstack.LogRoundTripper{
|
||||
Rt: transport,
|
||||
OsDebug: osDebug,
|
||||
},
|
||||
}
|
||||
|
||||
err = openstack.Authenticate(provider, ao)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.client, err = openstack.NewObjectStorageV1(provider, gophercloud.EndpointOpts{
|
||||
Region: c.region,
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *SwiftClient) Get() (*Payload, error) {
|
||||
result := objects.Download(c.client, c.path, TFSTATE_NAME, nil)
|
||||
|
||||
// Extract any errors from result
|
||||
_, err := result.Extract()
|
||||
|
||||
// 404 response is to be expected if the object doesn't already exist!
|
||||
if _, ok := err.(gophercloud.ErrDefault404); ok {
|
||||
log.Printf("[DEBUG] Container doesn't exist to download.")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
bytes, err := result.ExtractContent()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hash := md5.Sum(bytes)
|
||||
payload := &Payload{
|
||||
Data: bytes,
|
||||
MD5: hash[:md5.Size],
|
||||
}
|
||||
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
func (c *SwiftClient) Put(data []byte) error {
|
||||
if err := c.ensureContainerExists(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Creating object %s at path %s", TFSTATE_NAME, c.path)
|
||||
reader := bytes.NewReader(data)
|
||||
createOpts := objects.CreateOpts{
|
||||
Content: reader,
|
||||
}
|
||||
|
||||
if c.expireSecs != 0 {
|
||||
log.Printf("[DEBUG] ExpireSecs = %d", c.expireSecs)
|
||||
createOpts.DeleteAfter = c.expireSecs
|
||||
}
|
||||
|
||||
result := objects.Create(c.client, c.path, TFSTATE_NAME, createOpts)
|
||||
|
||||
return result.Err
|
||||
}
|
||||
|
||||
func (c *SwiftClient) Delete() error {
|
||||
result := objects.Delete(c.client, c.path, TFSTATE_NAME, nil)
|
||||
return result.Err
|
||||
}
|
||||
|
||||
func (c *SwiftClient) ensureContainerExists() error {
|
||||
containerOpts := &containers.CreateOpts{}
|
||||
|
||||
if c.archive {
|
||||
log.Printf("[DEBUG] Creating container %s", c.archivepath)
|
||||
result := containers.Create(c.client, c.archivepath, nil)
|
||||
if result.Err != nil {
|
||||
log.Printf("[DEBUG] Error creating container %s: %s", c.archivepath, result.Err)
|
||||
return result.Err
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Enabling Versioning on container %s", c.path)
|
||||
containerOpts.VersionsLocation = c.archivepath
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Creating container %s", c.path)
|
||||
result := containers.Create(c.client, c.path, containerOpts)
|
||||
if result.Err != nil {
|
||||
return result.Err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func multiEnv(ks []string) string {
|
||||
for _, k := range ks {
|
||||
if v := os.Getenv(k); v != "" {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
|
@ -36,6 +36,11 @@ type State interface {
|
|||
// the state here must not error. Loading the state fresh (an operation that
|
||||
// can likely error) should be implemented by RefreshState. If a state hasn't
|
||||
// been loaded yet, it is okay for State to return nil.
|
||||
//
|
||||
// Each caller of this function must get a distinct copy of the state, and
|
||||
// it must also be distinct from any instance cached inside the reader, to
|
||||
// ensure that mutations of the returned state will not affect the values
|
||||
// returned to other callers.
|
||||
type StateReader interface {
|
||||
State() *terraform.State
|
||||
}
|
||||
|
@ -43,6 +48,15 @@ type StateReader interface {
|
|||
// StateWriter is the interface that must be implemented by something that
|
||||
// can write a state. Writing the state can be cached or in-memory, as
|
||||
// full persistence should be implemented by StatePersister.
|
||||
//
|
||||
// Implementors that cache the state in memory _must_ take a copy of it
|
||||
// before returning, since the caller may continue to modify it once
|
||||
// control returns. The caller must ensure that the state instance is not
|
||||
// concurrently modified _during_ the call, or behavior is undefined.
|
||||
//
|
||||
// If an object implements StatePersister in conjunction with StateReader
|
||||
// then these methods must coordinate such that a subsequent read returns
|
||||
// a copy of the most recent write, even if it has not yet been persisted.
|
||||
type StateWriter interface {
|
||||
WriteState(*terraform.State) error
|
||||
}
|
||||
|
@ -57,6 +71,10 @@ type StateRefresher interface {
|
|||
// StatePersister is implemented to truly persist a state. Whereas StateWriter
|
||||
// is allowed to perhaps be caching in memory, PersistState must write the
|
||||
// state to some durable storage.
|
||||
//
|
||||
// If an object implements StatePersister in conjunction with StateReader
|
||||
// and/or StateRefresher then these methods must coordinate such that
|
||||
// subsequent reads after a persist return an updated value.
|
||||
type StatePersister interface {
|
||||
PersistState() error
|
||||
}
|
||||
|
|
|
@ -10,119 +10,126 @@ import (
|
|||
// TestState is a helper for testing state implementations. It is expected
|
||||
// that the given implementation is pre-loaded with the TestStateInitial
|
||||
// state.
|
||||
func TestState(t *testing.T, s interface{}) {
|
||||
reader, ok := s.(StateReader)
|
||||
if !ok {
|
||||
t.Fatalf("must at least be a StateReader")
|
||||
func TestState(t *testing.T, s State) {
|
||||
if err := s.RefreshState(); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
// If it implements refresh, refresh
|
||||
if rs, ok := s.(StateRefresher); ok {
|
||||
if err := rs.RefreshState(); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
// Check that the initial state is correct.
|
||||
// These do have different Lineages, but we will replace current below.
|
||||
initial := TestStateInitial()
|
||||
if state := s.State(); !state.Equal(initial) {
|
||||
t.Fatalf("state does not match expected initial state:\n%#v\n\n%#v", state, initial)
|
||||
}
|
||||
|
||||
// current will track our current state
|
||||
current := TestStateInitial()
|
||||
|
||||
// Check that the initial state is correct
|
||||
if state := reader.State(); !current.Equal(state) {
|
||||
t.Fatalf("not initial:\n%#v\n\n%#v", state, current)
|
||||
}
|
||||
// Now we've proven that the state we're starting with is an initial
|
||||
// state, we'll complete our work here with that state, since otherwise
|
||||
// further writes would violate the invariant that we only try to write
|
||||
// states that share the same lineage as what was initially written.
|
||||
current := s.State()
|
||||
|
||||
// Write a new state and verify that we have it
|
||||
if ws, ok := s.(StateWriter); ok {
|
||||
current.AddModuleState(&terraform.ModuleState{
|
||||
Path: []string{"root"},
|
||||
Outputs: map[string]*terraform.OutputState{
|
||||
"bar": &terraform.OutputState{
|
||||
Type: "string",
|
||||
Sensitive: false,
|
||||
Value: "baz",
|
||||
},
|
||||
current.AddModuleState(&terraform.ModuleState{
|
||||
Path: []string{"root"},
|
||||
Outputs: map[string]*terraform.OutputState{
|
||||
"bar": &terraform.OutputState{
|
||||
Type: "string",
|
||||
Sensitive: false,
|
||||
Value: "baz",
|
||||
},
|
||||
})
|
||||
},
|
||||
})
|
||||
|
||||
if err := ws.WriteState(current); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if err := s.WriteState(current); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if actual := reader.State(); !actual.Equal(current) {
|
||||
t.Fatalf("bad:\n%#v\n\n%#v", actual, current)
|
||||
}
|
||||
if actual := s.State(); !actual.Equal(current) {
|
||||
t.Fatalf("bad:\n%#v\n\n%#v", actual, current)
|
||||
}
|
||||
|
||||
// Test persistence
|
||||
if ps, ok := s.(StatePersister); ok {
|
||||
if err := ps.PersistState(); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
// Refresh if we got it
|
||||
if rs, ok := s.(StateRefresher); ok {
|
||||
if err := rs.RefreshState(); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Just set the serials the same... Then compare.
|
||||
actual := reader.State()
|
||||
if !actual.Equal(current) {
|
||||
t.Fatalf("bad: %#v\n\n%#v", actual, current)
|
||||
}
|
||||
if err := s.PersistState(); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
// If we can write and persist then verify that the serial
|
||||
// is only implemented on change.
|
||||
writer, writeOk := s.(StateWriter)
|
||||
persister, persistOk := s.(StatePersister)
|
||||
if writeOk && persistOk {
|
||||
// Same serial
|
||||
serial := current.Serial
|
||||
if err := writer.WriteState(current); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if err := persister.PersistState(); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
// Refresh if we got it
|
||||
if err := s.RefreshState(); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if reader.State().Serial != serial {
|
||||
t.Fatalf("bad: expected %d, got %d", serial, reader.State().Serial)
|
||||
}
|
||||
if s.State().Lineage != current.Lineage {
|
||||
t.Fatalf("Lineage changed from %s to %s", s.State().Lineage, current.Lineage)
|
||||
}
|
||||
|
||||
// Change the serial
|
||||
current = current.DeepCopy()
|
||||
current.Modules = []*terraform.ModuleState{
|
||||
&terraform.ModuleState{
|
||||
Path: []string{"root", "somewhere"},
|
||||
Outputs: map[string]*terraform.OutputState{
|
||||
"serialCheck": &terraform.OutputState{
|
||||
Type: "string",
|
||||
Sensitive: false,
|
||||
Value: "true",
|
||||
},
|
||||
// Just set the serials the same... Then compare.
|
||||
actual := s.State()
|
||||
if !actual.Equal(current) {
|
||||
t.Fatalf("bad: %#v\n\n%#v", actual, current)
|
||||
}
|
||||
|
||||
// Same serial
|
||||
serial := s.State().Serial
|
||||
if err := s.WriteState(current); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if err := s.PersistState(); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if s.State().Serial != serial {
|
||||
t.Fatalf("serial changed after persisting with no changes: got %d, want %d", s.State().Serial, serial)
|
||||
}
|
||||
|
||||
// Change the serial
|
||||
current = current.DeepCopy()
|
||||
current.Modules = []*terraform.ModuleState{
|
||||
&terraform.ModuleState{
|
||||
Path: []string{"root", "somewhere"},
|
||||
Outputs: map[string]*terraform.OutputState{
|
||||
"serialCheck": &terraform.OutputState{
|
||||
Type: "string",
|
||||
Sensitive: false,
|
||||
Value: "true",
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := writer.WriteState(current); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if err := persister.PersistState(); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
if err := s.WriteState(current); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if err := s.PersistState(); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if reader.State().Serial <= serial {
|
||||
t.Fatalf("bad: expected %d, got %d", serial, reader.State().Serial)
|
||||
}
|
||||
if s.State().Serial <= serial {
|
||||
t.Fatalf("serial incorrect after persisting with changes: got %d, want > %d", s.State().Serial, serial)
|
||||
}
|
||||
|
||||
// Check that State() returns a copy by modifying the copy and comparing
|
||||
// to the current state.
|
||||
stateCopy := reader.State()
|
||||
stateCopy.Serial++
|
||||
if reflect.DeepEqual(stateCopy, current) {
|
||||
t.Fatal("State() should return a copy")
|
||||
}
|
||||
if s.State().Version != current.Version {
|
||||
t.Fatalf("Version changed from %d to %d", s.State().Version, current.Version)
|
||||
}
|
||||
|
||||
if s.State().TFVersion != current.TFVersion {
|
||||
t.Fatalf("TFVersion changed from %s to %s", s.State().TFVersion, current.TFVersion)
|
||||
}
|
||||
|
||||
// verify that Lineage doesn't change along with Serial, or during copying.
|
||||
if s.State().Lineage != current.Lineage {
|
||||
t.Fatalf("Lineage changed from %s to %s", s.State().Lineage, current.Lineage)
|
||||
}
|
||||
|
||||
// Check that State() returns a copy by modifying the copy and comparing
|
||||
// to the current state.
|
||||
stateCopy := s.State()
|
||||
stateCopy.Serial++
|
||||
if reflect.DeepEqual(stateCopy, s.State()) {
|
||||
t.Fatal("State() should return a copy")
|
||||
}
|
||||
|
||||
// our current expected state should also marhsal identically to the persisted state
|
||||
if current.MarshalEqual(s.State()) {
|
||||
t.Fatalf("Persisted state altered unexpectedly. Expected: %#v\b Got: %#v", current, s.State())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -57,12 +57,17 @@ type ContextOpts struct {
|
|||
Parallelism int
|
||||
State *State
|
||||
StateFutureAllowed bool
|
||||
Providers map[string]ResourceProviderFactory
|
||||
ProviderResolver ResourceProviderResolver
|
||||
Provisioners map[string]ResourceProvisionerFactory
|
||||
Shadow bool
|
||||
Targets []string
|
||||
Variables map[string]interface{}
|
||||
|
||||
// If non-nil, will apply as additional constraints on the provider
|
||||
// plugins that will be requested from the provider resolver.
|
||||
ProviderSHA256s map[string][]byte
|
||||
SkipProviderVerify bool
|
||||
|
||||
UIInput UIInput
|
||||
}
|
||||
|
||||
|
@ -102,6 +107,7 @@ type Context struct {
|
|||
l sync.Mutex // Lock acquired during any task
|
||||
parallelSem Semaphore
|
||||
providerInputConfig map[string]map[string]interface{}
|
||||
providerSHA256s map[string][]byte
|
||||
runLock sync.Mutex
|
||||
runCond *sync.Cond
|
||||
runContext context.Context
|
||||
|
@ -166,7 +172,6 @@ func NewContext(opts *ContextOpts) (*Context, error) {
|
|||
// set by environment variables if necessary. This includes
|
||||
// values taken from -var-file in addition.
|
||||
variables := make(map[string]interface{})
|
||||
|
||||
if opts.Module != nil {
|
||||
var err error
|
||||
variables, err = Variables(opts.Module, opts.Variables)
|
||||
|
@ -175,6 +180,23 @@ func NewContext(opts *ContextOpts) (*Context, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// Bind available provider plugins to the constraints in config
|
||||
var providers map[string]ResourceProviderFactory
|
||||
if opts.ProviderResolver != nil {
|
||||
var err error
|
||||
deps := ModuleTreeDependencies(opts.Module, state)
|
||||
reqd := deps.AllPluginRequirements()
|
||||
if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify {
|
||||
reqd.LockExecutables(opts.ProviderSHA256s)
|
||||
}
|
||||
providers, err = resourceProviderFactories(opts.ProviderResolver, reqd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
providers = make(map[string]ResourceProviderFactory)
|
||||
}
|
||||
|
||||
diff := opts.Diff
|
||||
if diff == nil {
|
||||
diff = &Diff{}
|
||||
|
@ -182,7 +204,7 @@ func NewContext(opts *ContextOpts) (*Context, error) {
|
|||
|
||||
return &Context{
|
||||
components: &basicComponentFactory{
|
||||
providers: opts.Providers,
|
||||
providers: providers,
|
||||
provisioners: opts.Provisioners,
|
||||
},
|
||||
destroy: opts.Destroy,
|
||||
|
@ -198,6 +220,7 @@ func NewContext(opts *ContextOpts) (*Context, error) {
|
|||
|
||||
parallelSem: NewSemaphore(par),
|
||||
providerInputConfig: make(map[string]map[string]interface{}),
|
||||
providerSHA256s: opts.ProviderSHA256s,
|
||||
sh: sh,
|
||||
}, nil
|
||||
}
|
||||
|
@ -509,6 +532,9 @@ func (c *Context) Plan() (*Plan, error) {
|
|||
Vars: c.variables,
|
||||
State: c.state,
|
||||
Targets: c.targets,
|
||||
|
||||
TerraformVersion: VersionString(),
|
||||
ProviderSHA256s: c.providerSHA256s,
|
||||
}
|
||||
|
||||
var operation walkOperation
|
||||
|
|
|
@ -28,7 +28,7 @@ const (
|
|||
// multiVal matches the index key to a flatmapped set, list or map
|
||||
var multiVal = regexp.MustCompile(`\.(#|%)$`)
|
||||
|
||||
// Diff trackes the changes that are necessary to apply a configuration
|
||||
// Diff tracks the changes that are necessary to apply a configuration
|
||||
// to an existing infrastructure.
|
||||
type Diff struct {
|
||||
// Modules contains all the modules that have a diff
|
||||
|
@ -370,7 +370,7 @@ type InstanceDiff struct {
|
|||
|
||||
// Meta is a simple K/V map that is stored in a diff and persisted to
|
||||
// plans but otherwise is completely ignored by Terraform core. It is
|
||||
// mean to be used for additional data a resource may want to pass through.
|
||||
// meant to be used for additional data a resource may want to pass through.
|
||||
// The value here must only contain Go primitives and collections.
|
||||
Meta map[string]interface{}
|
||||
}
|
||||
|
@ -551,7 +551,7 @@ func (d *InstanceDiff) SetDestroyDeposed(b bool) {
|
|||
}
|
||||
|
||||
// These methods are properly locked, for use outside other InstanceDiff
|
||||
// methods but everywhere else within in the terraform package.
|
||||
// methods but everywhere else within the terraform package.
|
||||
// TODO refactor the locking scheme
|
||||
func (d *InstanceDiff) SetTainted(b bool) {
|
||||
d.mu.Lock()
|
||||
|
|
|
@ -81,6 +81,12 @@ type EvalDiff struct {
|
|||
// Resource is needed to fetch the ignore_changes list so we can
|
||||
// filter user-requested ignored attributes from the diff.
|
||||
Resource *config.Resource
|
||||
|
||||
// Stub is used to flag the generated InstanceDiff as a stub. This is used to
|
||||
// ensure that the node exists to perform interpolations and generate
|
||||
// computed paths off of, but not as an actual diff where resouces should be
|
||||
// counted, and not as a diff that should be acted on.
|
||||
Stub bool
|
||||
}
|
||||
|
||||
// TODO: test
|
||||
|
@ -90,11 +96,13 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
|
|||
provider := *n.Provider
|
||||
|
||||
// Call pre-diff hook
|
||||
err := ctx.Hook(func(h Hook) (HookAction, error) {
|
||||
return h.PreDiff(n.Info, state)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if !n.Stub {
|
||||
err := ctx.Hook(func(h Hook) (HookAction, error) {
|
||||
return h.PreDiff(n.Info, state)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// The state for the diff must never be nil
|
||||
|
@ -158,15 +166,19 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
|
|||
}
|
||||
|
||||
// Call post-refresh hook
|
||||
err = ctx.Hook(func(h Hook) (HookAction, error) {
|
||||
return h.PostDiff(n.Info, diff)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if !n.Stub {
|
||||
err = ctx.Hook(func(h Hook) (HookAction, error) {
|
||||
return h.PostDiff(n.Info, diff)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Update our output
|
||||
*n.OutputDiff = diff
|
||||
// Update our output if we care
|
||||
if n.OutputDiff != nil {
|
||||
*n.OutputDiff = diff
|
||||
}
|
||||
|
||||
// Update the state if we care
|
||||
if n.OutputState != nil {
|
||||
|
|
|
@ -117,7 +117,15 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer {
|
|||
&CountBoundaryTransformer{},
|
||||
|
||||
// Target
|
||||
&TargetsTransformer{Targets: b.Targets},
|
||||
&TargetsTransformer{
|
||||
Targets: b.Targets,
|
||||
|
||||
// Resource nodes from config have not yet been expanded for
|
||||
// "count", so we must apply targeting without indices. Exact
|
||||
// targeting will be dealt with later when these resources
|
||||
// DynamicExpand.
|
||||
IgnoreIndices: true,
|
||||
},
|
||||
|
||||
// Close opened plugin connections
|
||||
&CloseProviderTransformer{},
|
||||
|
|
|
@ -144,7 +144,15 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer {
|
|||
&ReferenceTransformer{},
|
||||
|
||||
// Target
|
||||
&TargetsTransformer{Targets: b.Targets},
|
||||
&TargetsTransformer{
|
||||
Targets: b.Targets,
|
||||
|
||||
// Resource nodes from config have not yet been expanded for
|
||||
// "count", so we must apply targeting without indices. Exact
|
||||
// targeting will be dealt with later when these resources
|
||||
// DynamicExpand.
|
||||
IgnoreIndices: true,
|
||||
},
|
||||
|
||||
// Close opened plugin connections
|
||||
&CloseProviderTransformer{},
|
||||
|
|
|
@ -317,9 +317,13 @@ func (i *Interpolater) valueTerraformVar(
|
|||
n string,
|
||||
v *config.TerraformVariable,
|
||||
result map[string]ast.Variable) error {
|
||||
if v.Field != "env" {
|
||||
|
||||
// "env" is supported for backward compatibility, but it's deprecated and
|
||||
// so we won't advertise it as being allowed in the error message. It will
|
||||
// be removed in a future version of Terraform.
|
||||
if v.Field != "workspace" && v.Field != "env" {
|
||||
return fmt.Errorf(
|
||||
"%s: only supported key for 'terraform.X' interpolations is 'env'", n)
|
||||
"%s: only supported key for 'terraform.X' interpolations is 'workspace'", n)
|
||||
}
|
||||
|
||||
if i.Meta == nil {
|
||||
|
|
156
vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go
generated
vendored
Normal file
156
vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go
generated
vendored
Normal file
|
@ -0,0 +1,156 @@
|
|||
package terraform
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/config"
|
||||
"github.com/hashicorp/terraform/config/module"
|
||||
"github.com/hashicorp/terraform/moduledeps"
|
||||
"github.com/hashicorp/terraform/plugin/discovery"
|
||||
)
|
||||
|
||||
// ModuleTreeDependencies returns the dependencies of the tree of modules
|
||||
// described by the given configuration tree and state.
|
||||
//
|
||||
// Both configuration and state are required because there can be resources
|
||||
// implied by instances in the state that no longer exist in config.
|
||||
//
|
||||
// This function will panic if any invalid version constraint strings are
|
||||
// present in the configuration. This is guaranteed not to happen for any
|
||||
// configuration that has passed a call to Config.Validate().
|
||||
func ModuleTreeDependencies(root *module.Tree, state *State) *moduledeps.Module {
|
||||
|
||||
// First we walk the configuration tree to build the overall structure
|
||||
// and capture the explicit/implicit/inherited provider dependencies.
|
||||
deps := moduleTreeConfigDependencies(root, nil)
|
||||
|
||||
// Next we walk over the resources in the state to catch any additional
|
||||
// dependencies created by existing resources that are no longer in config.
|
||||
// Most things we find in state will already be present in 'deps', but
|
||||
// we're interested in the rare thing that isn't.
|
||||
moduleTreeMergeStateDependencies(deps, state)
|
||||
|
||||
return deps
|
||||
}
|
||||
|
||||
func moduleTreeConfigDependencies(root *module.Tree, inheritProviders map[string]*config.ProviderConfig) *moduledeps.Module {
|
||||
if root == nil {
|
||||
// If no config is provided, we'll make a synthetic root.
|
||||
// This isn't necessarily correct if we're called with a nil that
|
||||
// *isn't* at the root, but in practice that can never happen.
|
||||
return &moduledeps.Module{
|
||||
Name: "root",
|
||||
}
|
||||
}
|
||||
|
||||
ret := &moduledeps.Module{
|
||||
Name: root.Name(),
|
||||
}
|
||||
|
||||
cfg := root.Config()
|
||||
providerConfigs := cfg.ProviderConfigsByFullName()
|
||||
|
||||
// Provider dependencies
|
||||
{
|
||||
providers := make(moduledeps.Providers, len(providerConfigs))
|
||||
|
||||
// Any providerConfigs elements are *explicit* provider dependencies,
|
||||
// which is the only situation where the user might provide an actual
|
||||
// version constraint. We'll take care of these first.
|
||||
for fullName, pCfg := range providerConfigs {
|
||||
inst := moduledeps.ProviderInstance(fullName)
|
||||
versionSet := discovery.AllVersions
|
||||
if pCfg.Version != "" {
|
||||
versionSet = discovery.ConstraintStr(pCfg.Version).MustParse()
|
||||
}
|
||||
providers[inst] = moduledeps.ProviderDependency{
|
||||
Constraints: versionSet,
|
||||
Reason: moduledeps.ProviderDependencyExplicit,
|
||||
}
|
||||
}
|
||||
|
||||
// Each resource in the configuration creates an *implicit* provider
|
||||
// dependency, though we'll only record it if there isn't already
|
||||
// an explicit dependency on the same provider.
|
||||
for _, rc := range cfg.Resources {
|
||||
fullName := rc.ProviderFullName()
|
||||
inst := moduledeps.ProviderInstance(fullName)
|
||||
if _, exists := providers[inst]; exists {
|
||||
// Explicit dependency already present
|
||||
continue
|
||||
}
|
||||
|
||||
reason := moduledeps.ProviderDependencyImplicit
|
||||
if _, inherited := inheritProviders[fullName]; inherited {
|
||||
reason = moduledeps.ProviderDependencyInherited
|
||||
}
|
||||
|
||||
providers[inst] = moduledeps.ProviderDependency{
|
||||
Constraints: discovery.AllVersions,
|
||||
Reason: reason,
|
||||
}
|
||||
}
|
||||
|
||||
ret.Providers = providers
|
||||
}
|
||||
|
||||
childInherit := make(map[string]*config.ProviderConfig)
|
||||
for k, v := range inheritProviders {
|
||||
childInherit[k] = v
|
||||
}
|
||||
for k, v := range providerConfigs {
|
||||
childInherit[k] = v
|
||||
}
|
||||
for _, c := range root.Children() {
|
||||
ret.Children = append(ret.Children, moduleTreeConfigDependencies(c, childInherit))
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func moduleTreeMergeStateDependencies(root *moduledeps.Module, state *State) {
|
||||
if state == nil {
|
||||
return
|
||||
}
|
||||
|
||||
findModule := func(path []string) *moduledeps.Module {
|
||||
module := root
|
||||
for _, name := range path[1:] { // skip initial "root"
|
||||
var next *moduledeps.Module
|
||||
for _, cm := range module.Children {
|
||||
if cm.Name == name {
|
||||
next = cm
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if next == nil {
|
||||
// If we didn't find a next node, we'll need to make one
|
||||
next = &moduledeps.Module{
|
||||
Name: name,
|
||||
}
|
||||
module.Children = append(module.Children, next)
|
||||
}
|
||||
|
||||
module = next
|
||||
}
|
||||
return module
|
||||
}
|
||||
|
||||
for _, ms := range state.Modules {
|
||||
module := findModule(ms.Path)
|
||||
|
||||
for _, is := range ms.Resources {
|
||||
fullName := config.ResourceProviderFullName(is.Type, is.Provider)
|
||||
inst := moduledeps.ProviderInstance(fullName)
|
||||
if _, exists := module.Providers[inst]; !exists {
|
||||
if module.Providers == nil {
|
||||
module.Providers = make(moduledeps.Providers)
|
||||
}
|
||||
module.Providers[inst] = moduledeps.ProviderDependency{
|
||||
Constraints: discovery.AllVersions,
|
||||
Reason: moduledeps.ProviderDependencyFromState,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -45,13 +45,6 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph,
|
|||
Addr: n.ResourceAddr(),
|
||||
},
|
||||
|
||||
// Switch up any node missing state to a plannable resource. This helps
|
||||
// catch cases where data sources depend on the counts from this resource
|
||||
// during a scale out.
|
||||
&ResourceRefreshPlannableTransformer{
|
||||
State: state,
|
||||
},
|
||||
|
||||
// Add the count orphans to make sure these resources are accounted for
|
||||
// during a scale in.
|
||||
&OrphanResourceCountTransformer{
|
||||
|
@ -100,6 +93,9 @@ func (n *NodeRefreshableManagedResourceInstance) EvalTree() EvalNode {
|
|||
// Eval info is different depending on what kind of resource this is
|
||||
switch mode := n.Addr.Mode; mode {
|
||||
case config.ManagedResourceMode:
|
||||
if n.ResourceState == nil {
|
||||
return n.evalTreeManagedResourceNoState()
|
||||
}
|
||||
return n.evalTreeManagedResource()
|
||||
|
||||
case config.DataResourceMode:
|
||||
|
@ -176,3 +172,88 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalN
|
|||
},
|
||||
}
|
||||
}
|
||||
|
||||
// evalTreeManagedResourceNoState produces an EvalSequence for refresh resource
|
||||
// nodes that don't have state attached. An example of where this functionality
|
||||
// is useful is when a resource that already exists in state is being scaled
|
||||
// out, ie: has its resource count increased. In this case, the scaled out node
|
||||
// needs to be available to other nodes (namely data sources) that may depend
|
||||
// on it for proper interpolation, or confusing "index out of range" errors can
|
||||
// occur.
|
||||
//
|
||||
// The steps in this sequence are very similar to the steps carried out in
|
||||
// plan, but nothing is done with the diff after it is created - it is dropped,
|
||||
// and its changes are not counted in the UI.
|
||||
func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState() EvalNode {
|
||||
// Declare a bunch of variables that are used for state during
|
||||
// evaluation. Most of this are written to by-address below.
|
||||
var provider ResourceProvider
|
||||
var state *InstanceState
|
||||
var resourceConfig *ResourceConfig
|
||||
|
||||
addr := n.NodeAbstractResource.Addr
|
||||
stateID := addr.stateId()
|
||||
info := &InstanceInfo{
|
||||
Id: stateID,
|
||||
Type: addr.Type,
|
||||
ModulePath: normalizeModulePath(addr.Path),
|
||||
}
|
||||
|
||||
// Build the resource for eval
|
||||
resource := &Resource{
|
||||
Name: addr.Name,
|
||||
Type: addr.Type,
|
||||
CountIndex: addr.Index,
|
||||
}
|
||||
if resource.CountIndex < 0 {
|
||||
resource.CountIndex = 0
|
||||
}
|
||||
|
||||
// Determine the dependencies for the state.
|
||||
stateDeps := n.StateReferences()
|
||||
|
||||
return &EvalSequence{
|
||||
Nodes: []EvalNode{
|
||||
&EvalInterpolate{
|
||||
Config: n.Config.RawConfig.Copy(),
|
||||
Resource: resource,
|
||||
Output: &resourceConfig,
|
||||
},
|
||||
&EvalGetProvider{
|
||||
Name: n.ProvidedBy()[0],
|
||||
Output: &provider,
|
||||
},
|
||||
// Re-run validation to catch any errors we missed, e.g. type
|
||||
// mismatches on computed values.
|
||||
&EvalValidateResource{
|
||||
Provider: &provider,
|
||||
Config: &resourceConfig,
|
||||
ResourceName: n.Config.Name,
|
||||
ResourceType: n.Config.Type,
|
||||
ResourceMode: n.Config.Mode,
|
||||
IgnoreWarnings: true,
|
||||
},
|
||||
&EvalReadState{
|
||||
Name: stateID,
|
||||
Output: &state,
|
||||
},
|
||||
&EvalDiff{
|
||||
Name: stateID,
|
||||
Info: info,
|
||||
Config: &resourceConfig,
|
||||
Resource: n.Config,
|
||||
Provider: &provider,
|
||||
State: &state,
|
||||
OutputState: &state,
|
||||
Stub: true,
|
||||
},
|
||||
&EvalWriteState{
|
||||
Name: stateID,
|
||||
ResourceType: n.Config.Type,
|
||||
Provider: n.Config.Provider,
|
||||
Dependencies: stateDeps,
|
||||
State: &state,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/terraform/config/module"
|
||||
|
@ -31,6 +32,9 @@ type Plan struct {
|
|||
Vars map[string]interface{}
|
||||
Targets []string
|
||||
|
||||
TerraformVersion string
|
||||
ProviderSHA256s map[string][]byte
|
||||
|
||||
// Backend is the backend that this plan should use and store data with.
|
||||
Backend *BackendState
|
||||
|
||||
|
@ -40,19 +44,58 @@ type Plan struct {
|
|||
// Context returns a Context with the data encapsulated in this plan.
|
||||
//
|
||||
// The following fields in opts are overridden by the plan: Config,
|
||||
// Diff, State, Variables.
|
||||
// Diff, Variables.
|
||||
//
|
||||
// If State is not provided, it is set from the plan. If it _is_ provided,
|
||||
// it must be Equal to the state stored in plan, but may have a newer
|
||||
// serial.
|
||||
func (p *Plan) Context(opts *ContextOpts) (*Context, error) {
|
||||
var err error
|
||||
opts, err = p.contextOpts(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewContext(opts)
|
||||
}
|
||||
|
||||
// contextOpts mutates the given base ContextOpts in place to use input
|
||||
// objects obtained from the receiving plan.
|
||||
func (p *Plan) contextOpts(base *ContextOpts) (*ContextOpts, error) {
|
||||
opts := base
|
||||
|
||||
opts.Diff = p.Diff
|
||||
opts.Module = p.Module
|
||||
opts.State = p.State
|
||||
opts.Targets = p.Targets
|
||||
opts.ProviderSHA256s = p.ProviderSHA256s
|
||||
|
||||
if opts.State == nil {
|
||||
opts.State = p.State
|
||||
} else if !opts.State.Equal(p.State) {
|
||||
// Even if we're overriding the state, it should be logically equal
|
||||
// to what's in plan. The only valid change to have made by the time
|
||||
// we get here is to have incremented the serial.
|
||||
//
|
||||
// Due to the fact that serialization may change the representation of
|
||||
// the state, there is little chance that these aren't actually equal.
|
||||
// Log the error condition for reference, but continue with the state
|
||||
// we have.
|
||||
log.Println("[WARNING] Plan state and ContextOpts state are not equal")
|
||||
}
|
||||
|
||||
thisVersion := VersionString()
|
||||
if p.TerraformVersion != "" && p.TerraformVersion != thisVersion {
|
||||
return nil, fmt.Errorf(
|
||||
"plan was created with a different version of Terraform (created with %s, but running %s)",
|
||||
p.TerraformVersion, thisVersion,
|
||||
)
|
||||
}
|
||||
|
||||
opts.Variables = make(map[string]interface{})
|
||||
for k, v := range p.Vars {
|
||||
opts.Variables[k] = v
|
||||
}
|
||||
|
||||
return NewContext(opts)
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
func (p *Plan) String() string {
|
||||
|
@ -86,7 +129,7 @@ func (p *Plan) init() {
|
|||
// the ability in the future to change the file format if we want for any
|
||||
// reason.
|
||||
const planFormatMagic = "tfplan"
|
||||
const planFormatVersion byte = 1
|
||||
const planFormatVersion byte = 2
|
||||
|
||||
// ReadPlan reads a plan structure out of a reader in the format that
|
||||
// was written by WritePlan.
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/config"
|
||||
"github.com/hashicorp/terraform/config/module"
|
||||
)
|
||||
|
||||
// ResourceAddress is a way of identifying an individual resource (or,
|
||||
|
@ -89,6 +90,51 @@ func (r *ResourceAddress) String() string {
|
|||
return strings.Join(result, ".")
|
||||
}
|
||||
|
||||
// HasResourceSpec returns true if the address has a resource spec, as
|
||||
// defined in the documentation:
|
||||
// https://www.terraform.io/docs/internals/resource-addressing.html
|
||||
// In particular, this returns false if the address contains only
|
||||
// a module path, thus addressing the entire module.
|
||||
func (r *ResourceAddress) HasResourceSpec() bool {
|
||||
return r.Type != "" && r.Name != ""
|
||||
}
|
||||
|
||||
// WholeModuleAddress returns the resource address that refers to all
|
||||
// resources in the same module as the receiver address.
|
||||
func (r *ResourceAddress) WholeModuleAddress() *ResourceAddress {
|
||||
return &ResourceAddress{
|
||||
Path: r.Path,
|
||||
Index: -1,
|
||||
InstanceTypeSet: false,
|
||||
}
|
||||
}
|
||||
|
||||
// MatchesConfig returns true if the receiver matches the given
|
||||
// configuration resource within the given configuration module.
|
||||
//
|
||||
// Since resource configuration blocks represent all of the instances of
|
||||
// a multi-instance resource, the index of the address (if any) is not
|
||||
// considered.
|
||||
func (r *ResourceAddress) MatchesConfig(mod *module.Tree, rc *config.Resource) bool {
|
||||
if r.HasResourceSpec() {
|
||||
if r.Mode != rc.Mode || r.Type != rc.Type || r.Name != rc.Name {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
addrPath := r.Path
|
||||
cfgPath := mod.Path()
|
||||
|
||||
// normalize
|
||||
if len(addrPath) == 0 {
|
||||
addrPath = nil
|
||||
}
|
||||
if len(cfgPath) == 0 {
|
||||
cfgPath = nil
|
||||
}
|
||||
return reflect.DeepEqual(addrPath, cfgPath)
|
||||
}
|
||||
|
||||
// stateId returns the ID that this resource should be entered with
|
||||
// in the state. This is also used for diffs. In the future, we'd like to
|
||||
// move away from this string field so I don't export this.
|
||||
|
@ -185,7 +231,10 @@ func ParseResourceAddress(s string) (*ResourceAddress, error) {
|
|||
|
||||
// not allowed to say "data." without a type following
|
||||
if mode == config.DataResourceMode && matches["type"] == "" {
|
||||
return nil, fmt.Errorf("must target specific data instance")
|
||||
return nil, fmt.Errorf(
|
||||
"invalid resource address %q: must target specific data instance",
|
||||
s,
|
||||
)
|
||||
}
|
||||
|
||||
return &ResourceAddress{
|
||||
|
@ -199,6 +248,75 @@ func ParseResourceAddress(s string) (*ResourceAddress, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
// ParseResourceAddressForInstanceDiff creates a ResourceAddress for a
|
||||
// resource name as described in a module diff.
|
||||
//
|
||||
// For historical reasons a different addressing format is used in this
|
||||
// context. The internal format should not be shown in the UI and instead
|
||||
// this function should be used to translate to a ResourceAddress and
|
||||
// then, where appropriate, use the String method to produce a canonical
|
||||
// resource address string for display in the UI.
|
||||
//
|
||||
// The given path slice must be empty (or nil) for the root module, and
|
||||
// otherwise consist of a sequence of module names traversing down into
|
||||
// the module tree. If a non-nil path is provided, the caller must not
|
||||
// modify its underlying array after passing it to this function.
|
||||
func ParseResourceAddressForInstanceDiff(path []string, key string) (*ResourceAddress, error) {
|
||||
addr, err := parseResourceAddressInternal(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addr.Path = path
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// Contains returns true if and only if the given node is contained within
|
||||
// the receiver.
|
||||
//
|
||||
// Containment is defined in terms of the module and resource heirarchy:
|
||||
// a resource is contained within its module and any ancestor modules,
|
||||
// an indexed resource instance is contained with the unindexed resource, etc.
|
||||
func (addr *ResourceAddress) Contains(other *ResourceAddress) bool {
|
||||
ourPath := addr.Path
|
||||
givenPath := other.Path
|
||||
if len(givenPath) < len(ourPath) {
|
||||
return false
|
||||
}
|
||||
for i := range ourPath {
|
||||
if ourPath[i] != givenPath[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// If the receiver is a whole-module address then the path prefix
|
||||
// matching is all we need.
|
||||
if !addr.HasResourceSpec() {
|
||||
return true
|
||||
}
|
||||
|
||||
if addr.Type != other.Type || addr.Name != other.Name || addr.Mode != other.Mode {
|
||||
return false
|
||||
}
|
||||
|
||||
if addr.Index != -1 && addr.Index != other.Index {
|
||||
return false
|
||||
}
|
||||
|
||||
if addr.InstanceTypeSet && (addr.InstanceTypeSet != other.InstanceTypeSet || addr.InstanceType != other.InstanceType) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Equals returns true if the receiver matches the given address.
|
||||
//
|
||||
// The name of this method is a misnomer, since it doesn't test for exact
|
||||
// equality. Instead, it tests that the _specified_ parts of each
|
||||
// address match, treating any unspecified parts as wildcards.
|
||||
//
|
||||
// See also Contains, which takes a more heirarchical approach to comparing
|
||||
// addresses.
|
||||
func (addr *ResourceAddress) Equals(raw interface{}) bool {
|
||||
other, ok := raw.(*ResourceAddress)
|
||||
if !ok {
|
||||
|
@ -233,6 +351,58 @@ func (addr *ResourceAddress) Equals(raw interface{}) bool {
|
|||
modeMatch
|
||||
}
|
||||
|
||||
// Less returns true if and only if the receiver should be sorted before
|
||||
// the given address when presenting a list of resource addresses to
|
||||
// an end-user.
|
||||
//
|
||||
// This sort uses lexicographic sorting for most components, but uses
|
||||
// numeric sort for indices, thus causing index 10 to sort after
|
||||
// index 9, rather than after index 1.
|
||||
func (addr *ResourceAddress) Less(other *ResourceAddress) bool {
|
||||
|
||||
switch {
|
||||
|
||||
case len(addr.Path) < len(other.Path):
|
||||
return true
|
||||
|
||||
case !reflect.DeepEqual(addr.Path, other.Path):
|
||||
// If the two paths are the same length but don't match, we'll just
|
||||
// cheat and compare the string forms since it's easier than
|
||||
// comparing all of the path segments in turn.
|
||||
addrStr := addr.String()
|
||||
otherStr := other.String()
|
||||
return addrStr < otherStr
|
||||
|
||||
case addr.Mode == config.DataResourceMode && other.Mode != config.DataResourceMode:
|
||||
return true
|
||||
|
||||
case addr.Type < other.Type:
|
||||
return true
|
||||
|
||||
case addr.Name < other.Name:
|
||||
return true
|
||||
|
||||
case addr.Index < other.Index:
|
||||
// Since "Index" is -1 for an un-indexed address, this also conveniently
|
||||
// sorts unindexed addresses before indexed ones, should they both
|
||||
// appear for some reason.
|
||||
return true
|
||||
|
||||
case other.InstanceTypeSet && !addr.InstanceTypeSet:
|
||||
return true
|
||||
|
||||
case addr.InstanceType < other.InstanceType:
|
||||
// InstanceType is actually an enum, so this is just an arbitrary
|
||||
// sort based on the enum numeric values, and thus not particularly
|
||||
// meaningful.
|
||||
return true
|
||||
|
||||
default:
|
||||
return false
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func ParseResourceIndex(s string) (int, error) {
|
||||
if s == "" {
|
||||
return -1, nil
|
||||
|
@ -275,7 +445,7 @@ func tokenizeResourceAddress(s string) (map[string]string, error) {
|
|||
// string "aws_instance.web.tainted[1]"
|
||||
re := regexp.MustCompile(`\A` +
|
||||
// "module.foo.module.bar" (optional)
|
||||
`(?P<path>(?:module\.[^.]+\.?)*)` +
|
||||
`(?P<path>(?:module\.(?P<module_name>[^.]+)\.?)*)` +
|
||||
// possibly "data.", if targeting is a data resource
|
||||
`(?P<data_prefix>(?:data\.)?)` +
|
||||
// "aws_instance.web" (optional when module path specified)
|
||||
|
@ -289,7 +459,7 @@ func tokenizeResourceAddress(s string) (map[string]string, error) {
|
|||
groupNames := re.SubexpNames()
|
||||
rawMatches := re.FindAllStringSubmatch(s, -1)
|
||||
if len(rawMatches) != 1 {
|
||||
return nil, fmt.Errorf("Problem parsing address: %q", s)
|
||||
return nil, fmt.Errorf("invalid resource address %q", s)
|
||||
}
|
||||
|
||||
matches := make(map[string]string)
|
||||
|
|
|
@ -1,5 +1,12 @@
|
|||
package terraform
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/terraform/plugin/discovery"
|
||||
)
|
||||
|
||||
// ResourceProvider is an interface that must be implemented by any
|
||||
// resource provider: the thing that creates and manages the resources in
|
||||
// a Terraform configuration.
|
||||
|
@ -154,6 +161,18 @@ type ResourceProvider interface {
|
|||
ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error)
|
||||
}
|
||||
|
||||
// ResourceProviderError may be returned when creating a Context if the
|
||||
// required providers cannot be satisfied. This error can then be used to
|
||||
// format a more useful message for the user.
|
||||
type ResourceProviderError struct {
|
||||
Errors []error
|
||||
}
|
||||
|
||||
func (e *ResourceProviderError) Error() string {
|
||||
// use multierror to format the default output
|
||||
return multierror.Append(nil, e.Errors...).Error()
|
||||
}
|
||||
|
||||
// ResourceProviderCloser is an interface that providers that can close
|
||||
// connections that aren't needed anymore must implement.
|
||||
type ResourceProviderCloser interface {
|
||||
|
@ -171,6 +190,50 @@ type DataSource struct {
|
|||
Name string
|
||||
}
|
||||
|
||||
// ResourceProviderResolver is an interface implemented by objects that are
|
||||
// able to resolve a given set of resource provider version constraints
|
||||
// into ResourceProviderFactory callbacks.
|
||||
type ResourceProviderResolver interface {
|
||||
// Given a constraint map, return a ResourceProviderFactory for each
|
||||
// requested provider. If some or all of the constraints cannot be
|
||||
// satisfied, return a non-nil slice of errors describing the problems.
|
||||
ResolveProviders(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error)
|
||||
}
|
||||
|
||||
// ResourceProviderResolverFunc wraps a callback function and turns it into
|
||||
// a ResourceProviderResolver implementation, for convenience in situations
|
||||
// where a function and its associated closure are sufficient as a resolver
|
||||
// implementation.
|
||||
type ResourceProviderResolverFunc func(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error)
|
||||
|
||||
// ResolveProviders implements ResourceProviderResolver by calling the
|
||||
// wrapped function.
|
||||
func (f ResourceProviderResolverFunc) ResolveProviders(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) {
|
||||
return f(reqd)
|
||||
}
|
||||
|
||||
// ResourceProviderResolverFixed returns a ResourceProviderResolver that
|
||||
// has a fixed set of provider factories provided by the caller. The returned
|
||||
// resolver ignores version constraints entirely and just returns the given
|
||||
// factory for each requested provider name.
|
||||
//
|
||||
// This function is primarily used in tests, to provide mock providers or
|
||||
// in-process providers under test.
|
||||
func ResourceProviderResolverFixed(factories map[string]ResourceProviderFactory) ResourceProviderResolver {
|
||||
return ResourceProviderResolverFunc(func(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) {
|
||||
ret := make(map[string]ResourceProviderFactory, len(reqd))
|
||||
var errs []error
|
||||
for name := range reqd {
|
||||
if factory, exists := factories[name]; exists {
|
||||
ret[name] = factory
|
||||
} else {
|
||||
errs = append(errs, fmt.Errorf("provider %q is not available", name))
|
||||
}
|
||||
}
|
||||
return ret, errs
|
||||
})
|
||||
}
|
||||
|
||||
// ResourceProviderFactory is a function type that creates a new instance
|
||||
// of a resource provider.
|
||||
type ResourceProviderFactory func() (ResourceProvider, error)
|
||||
|
@ -202,3 +265,21 @@ func ProviderHasDataSource(p ResourceProvider, n string) bool {
|
|||
|
||||
return false
|
||||
}
|
||||
|
||||
// resourceProviderFactories matches available plugins to the given version
|
||||
// requirements to produce a map of compatible provider plugins if possible,
|
||||
// or an error if the currently-available plugins are insufficient.
|
||||
//
|
||||
// This should be called only with configurations that have passed calls
|
||||
// to config.Validate(), which ensures that all of the given version
|
||||
// constraints are valid. It will panic if any invalid constraints are present.
|
||||
func resourceProviderFactories(resolver ResourceProviderResolver, reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, error) {
|
||||
ret, errs := resolver.ResolveProviders(reqd)
|
||||
if errs != nil {
|
||||
return nil, &ResourceProviderError{
|
||||
Errors: errs,
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
|
|
@ -533,6 +533,43 @@ func (s *State) equal(other *State) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// MarshalEqual is similar to Equal but provides a stronger definition of
|
||||
// "equal", where two states are equal if and only if their serialized form
|
||||
// is byte-for-byte identical.
|
||||
//
|
||||
// This is primarily useful for callers that are trying to save snapshots
|
||||
// of state to persistent storage, allowing them to detect when a new
|
||||
// snapshot must be taken.
|
||||
//
|
||||
// Note that the serial number and lineage are included in the serialized form,
|
||||
// so it's the caller's responsibility to properly manage these attributes
|
||||
// so that this method is only called on two states that have the same
|
||||
// serial and lineage, unless detecting such differences is desired.
|
||||
func (s *State) MarshalEqual(other *State) bool {
|
||||
if s == nil && other == nil {
|
||||
return true
|
||||
} else if s == nil || other == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
recvBuf := &bytes.Buffer{}
|
||||
otherBuf := &bytes.Buffer{}
|
||||
|
||||
err := WriteState(s, recvBuf)
|
||||
if err != nil {
|
||||
// should never happen, since we're writing to a buffer
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = WriteState(other, otherBuf)
|
||||
if err != nil {
|
||||
// should never happen, since we're writing to a buffer
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return bytes.Equal(recvBuf.Bytes(), otherBuf.Bytes())
|
||||
}
|
||||
|
||||
type StateAgeComparison int
|
||||
|
||||
const (
|
||||
|
@ -603,6 +640,10 @@ func (s *State) SameLineage(other *State) bool {
|
|||
// DeepCopy performs a deep copy of the state structure and returns
|
||||
// a new structure.
|
||||
func (s *State) DeepCopy() *State {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
copy, err := copystructure.Config{Lock: true}.Copy(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -611,30 +652,6 @@ func (s *State) DeepCopy() *State {
|
|||
return copy.(*State)
|
||||
}
|
||||
|
||||
// IncrementSerialMaybe increments the serial number of this state
|
||||
// if it different from the other state.
|
||||
func (s *State) IncrementSerialMaybe(other *State) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
if other == nil {
|
||||
return
|
||||
}
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.Serial > other.Serial {
|
||||
return
|
||||
}
|
||||
if other.TFVersion != s.TFVersion || !s.equal(other) {
|
||||
if other.Serial > s.Serial {
|
||||
s.Serial = other.Serial
|
||||
}
|
||||
|
||||
s.Serial++
|
||||
}
|
||||
}
|
||||
|
||||
// FromFutureTerraform checks if this state was written by a Terraform
|
||||
// version from the future.
|
||||
func (s *State) FromFutureTerraform() bool {
|
||||
|
@ -660,6 +677,7 @@ func (s *State) init() {
|
|||
if s.Version == 0 {
|
||||
s.Version = StateVersion
|
||||
}
|
||||
|
||||
if s.moduleByPath(rootModulePath) == nil {
|
||||
s.addModule(rootModulePath)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
--- FAIL: TestContext2Plan_moduleProviderInherit (0.01s)
|
||||
context_plan_test.go:552: bad: []string{"child"}
|
||||
map[string]dag.Vertex{}
|
||||
"module.middle.null"
|
||||
map[string]dag.Vertex{}
|
||||
"module.middle.module.inner.null"
|
||||
map[string]dag.Vertex{}
|
||||
"aws"
|
||||
FAIL
|
55
vendor/github.com/hashicorp/terraform/terraform/transform_resource_refresh_plannable.go
generated
vendored
55
vendor/github.com/hashicorp/terraform/terraform/transform_resource_refresh_plannable.go
generated
vendored
|
@ -1,55 +0,0 @@
|
|||
package terraform
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
)
|
||||
|
||||
// ResourceRefreshPlannableTransformer is a GraphTransformer that replaces any
|
||||
// nodes that don't have state yet exist in config with
|
||||
// NodePlannableResourceInstance.
|
||||
//
|
||||
// This transformer is used when expanding count on managed resource nodes
|
||||
// during the refresh phase to ensure that data sources that have
|
||||
// interpolations that depend on resources existing in the graph can be walked
|
||||
// properly.
|
||||
type ResourceRefreshPlannableTransformer struct {
|
||||
// The full global state.
|
||||
State *State
|
||||
}
|
||||
|
||||
// Transform implements GraphTransformer for
|
||||
// ResourceRefreshPlannableTransformer.
|
||||
func (t *ResourceRefreshPlannableTransformer) Transform(g *Graph) error {
|
||||
nextVertex:
|
||||
for _, v := range g.Vertices() {
|
||||
addr := v.(*NodeRefreshableManagedResourceInstance).Addr
|
||||
|
||||
// Find the state for this address, if there is one
|
||||
filter := &StateFilter{State: t.State}
|
||||
results, err := filter.Filter(addr.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check to see if we have a state for this resource. If we do, skip this
|
||||
// node.
|
||||
for _, result := range results {
|
||||
if _, ok := result.Value.(*ResourceState); ok {
|
||||
continue nextVertex
|
||||
}
|
||||
}
|
||||
// If we don't, convert this resource to a NodePlannableResourceInstance node
|
||||
// with all of the data we need to make it happen.
|
||||
log.Printf("[TRACE] No state for %s, converting to NodePlannableResourceInstance", addr.String())
|
||||
new := &NodePlannableResourceInstance{
|
||||
NodeAbstractResource: v.(*NodeRefreshableManagedResourceInstance).NodeAbstractResource,
|
||||
}
|
||||
// Replace the node in the graph
|
||||
if !g.Replace(v, new) {
|
||||
return fmt.Errorf("ResourceRefreshPlannableTransformer: Could not replace node %#v with %#v", v, new)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -41,6 +41,12 @@ type TargetsTransformer struct {
|
|||
// that already have the targets parsed
|
||||
ParsedTargets []ResourceAddress
|
||||
|
||||
// If set, the index portions of resource addresses will be ignored
|
||||
// for comparison. This is used when transforming a graph where
|
||||
// counted resources have not yet been expanded, since otherwise
|
||||
// the unexpanded nodes (which never have indices) would not match.
|
||||
IgnoreIndices bool
|
||||
|
||||
// Set to true when we're in a `terraform destroy` or a
|
||||
// `terraform plan -destroy`
|
||||
Destroy bool
|
||||
|
@ -199,7 +205,12 @@ func (t *TargetsTransformer) nodeIsTarget(
|
|||
|
||||
addr := r.ResourceAddr()
|
||||
for _, targetAddr := range addrs {
|
||||
if targetAddr.Equals(addr) {
|
||||
if t.IgnoreIndices {
|
||||
// targetAddr is not a pointer, so we can safely mutate it without
|
||||
// interfering with references elsewhere.
|
||||
targetAddr.Index = -1
|
||||
}
|
||||
if targetAddr.Contains(addr) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,7 +2,8 @@ package terraform
|
|||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/config"
|
||||
)
|
||||
|
||||
// Semaphore is a wrapper around a channel to provide
|
||||
|
@ -47,21 +48,8 @@ func (s Semaphore) Release() {
|
|||
}
|
||||
}
|
||||
|
||||
// resourceProvider returns the provider name for the given type.
|
||||
func resourceProvider(t, alias string) string {
|
||||
if alias != "" {
|
||||
return alias
|
||||
}
|
||||
|
||||
idx := strings.IndexRune(t, '_')
|
||||
if idx == -1 {
|
||||
// If no underscores, the resource name is assumed to be
|
||||
// also the provider name, e.g. if the provider exposes
|
||||
// only a single resource of each type.
|
||||
return t
|
||||
}
|
||||
|
||||
return t[:idx]
|
||||
func resourceProvider(resourceType, explicitProvider string) string {
|
||||
return config.ResourceProviderFullName(resourceType, explicitProvider)
|
||||
}
|
||||
|
||||
// strSliceContains checks if a given string is contained in a slice
|
||||
|
|
|
@ -7,12 +7,12 @@ import (
|
|||
)
|
||||
|
||||
// The main version number that is being run at the moment.
|
||||
const Version = "0.9.8"
|
||||
const Version = "0.10.0"
|
||||
|
||||
// A pre-release marker for the version. If this is "" (empty string)
|
||||
// then it means that it is a final release. Otherwise, this is a pre-release
|
||||
// such as "dev" (in development), "beta", "rc1", etc.
|
||||
var VersionPrerelease = ""
|
||||
var VersionPrerelease = "dev"
|
||||
|
||||
// SemVersion is an instance of version.Version. This has the secondary
|
||||
// benefit of verifying during tests and init time that our version is a
|
||||
|
|
373
vendor/github.com/terraform-providers/terraform-provider-aws/LICENSE
generated
vendored
Normal file
373
vendor/github.com/terraform-providers/terraform-provider-aws/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,373 @@
|
|||
Mozilla Public License Version 2.0
|
||||
==================================
|
||||
|
||||
1. Definitions
|
||||
--------------
|
||||
|
||||
1.1. "Contributor"
|
||||
means each individual or legal entity that creates, contributes to
|
||||
the creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
means the combination of the Contributions of others (if any) used
|
||||
by a Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
means Source Code Form to which the initial Contributor has attached
|
||||
the notice in Exhibit A, the Executable Form of such Source Code
|
||||
Form, and Modifications of such Source Code Form, in each case
|
||||
including portions thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
(a) that the initial Contributor has attached the notice described
|
||||
in Exhibit B to the Covered Software; or
|
||||
|
||||
(b) that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the
|
||||
terms of a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
means a work that combines Covered Software with other material, in
|
||||
a separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
means having the right to grant, to the maximum extent possible,
|
||||
whether at the time of the initial grant or subsequently, any and
|
||||
all of the rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
means any of the following:
|
||||
|
||||
(a) any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered
|
||||
Software; or
|
||||
|
||||
(b) any new file in Source Code Form that contains any Covered
|
||||
Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the
|
||||
License, by the making, using, selling, offering for sale, having
|
||||
made, import, or transfer of either its Contributions or its
|
||||
Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
means either the GNU General Public License, Version 2.0, the GNU
|
||||
Lesser General Public License, Version 2.1, the GNU Affero General
|
||||
Public License, Version 3.0, or any later versions of those
|
||||
licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that
|
||||
controls, is controlled by, or is under common control with You. For
|
||||
purposes of this definition, "control" means (a) the power, direct
|
||||
or indirect, to cause the direction or management of such entity,
|
||||
whether by contract or otherwise, or (b) ownership of more than
|
||||
fifty percent (50%) of the outstanding shares or beneficial
|
||||
ownership of such entity.
|
||||
|
||||
2. License Grants and Conditions
|
||||
--------------------------------
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
(a) under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
(b) under Patent Claims of such Contributor to make, use, sell, offer
|
||||
for sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
(a) for any code that a Contributor has removed from Covered Software;
|
||||
or
|
||||
|
||||
(b) for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
(c) under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights
|
||||
to grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
|
||||
in Section 2.1.
|
||||
|
||||
3. Responsibilities
|
||||
-------------------
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
(a) such Covered Software must also be made available in Source Code
|
||||
Form, as described in Section 3.1, and You must inform recipients of
|
||||
the Executable Form how they can obtain a copy of such Source Code
|
||||
Form by reasonable means in a timely manner, at a charge no more
|
||||
than the cost of distribution to the recipient; and
|
||||
|
||||
(b) You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter
|
||||
the recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty,
|
||||
or limitations of liability) contained within the Source Code Form of
|
||||
the Covered Software, except that You may alter any license notices to
|
||||
the extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
---------------------------------------------------
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this
|
||||
License with respect to some or all of the Covered Software due to
|
||||
statute, judicial order, or regulation then You must: (a) comply with
|
||||
the terms of this License to the maximum extent possible; and (b)
|
||||
describe the limitations and the code they affect. Such description must
|
||||
be placed in a text file included with all distributions of the Covered
|
||||
Software under this License. Except to the extent prohibited by statute
|
||||
or regulation, such description must be sufficiently detailed for a
|
||||
recipient of ordinary skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
--------------
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically
|
||||
if You fail to comply with any of its terms. However, if You become
|
||||
compliant, then the rights granted under this License from a particular
|
||||
Contributor are reinstated (a) provisionally, unless and until such
|
||||
Contributor explicitly and finally terminates Your grants, and (b) on an
|
||||
ongoing basis, if such Contributor fails to notify You of the
|
||||
non-compliance by some reasonable means prior to 60 days after You have
|
||||
come back into compliance. Moreover, Your grants from a particular
|
||||
Contributor are reinstated on an ongoing basis if such Contributor
|
||||
notifies You of the non-compliance by some reasonable means, this is the
|
||||
first time You have received notice of non-compliance with this License
|
||||
from such Contributor, and You become compliant prior to 30 days after
|
||||
Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
|
||||
end user license agreements (excluding distributors and resellers) which
|
||||
have been validly granted by You or Your distributors under this License
|
||||
prior to termination shall survive termination.
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 6. Disclaimer of Warranty *
|
||||
* ------------------------- *
|
||||
* *
|
||||
* Covered Software is provided under this License on an "as is" *
|
||||
* basis, without warranty of any kind, either expressed, implied, or *
|
||||
* statutory, including, without limitation, warranties that the *
|
||||
* Covered Software is free of defects, merchantable, fit for a *
|
||||
* particular purpose or non-infringing. The entire risk as to the *
|
||||
* quality and performance of the Covered Software is with You. *
|
||||
* Should any Covered Software prove defective in any respect, You *
|
||||
* (not any Contributor) assume the cost of any necessary servicing, *
|
||||
* repair, or correction. This disclaimer of warranty constitutes an *
|
||||
* essential part of this License. No use of any Covered Software is *
|
||||
* authorized under this License except under this disclaimer. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 7. Limitation of Liability *
|
||||
* -------------------------- *
|
||||
* *
|
||||
* Under no circumstances and under no legal theory, whether tort *
|
||||
* (including negligence), contract, or otherwise, shall any *
|
||||
* Contributor, or anyone who distributes Covered Software as *
|
||||
* permitted above, be liable to You for any direct, indirect, *
|
||||
* special, incidental, or consequential damages of any character *
|
||||
* including, without limitation, damages for lost profits, loss of *
|
||||
* goodwill, work stoppage, computer failure or malfunction, or any *
|
||||
* and all other commercial damages or losses, even if such party *
|
||||
* shall have been informed of the possibility of such damages. This *
|
||||
* limitation of liability shall not apply to liability for death or *
|
||||
* personal injury resulting from such party's negligence to the *
|
||||
* extent applicable law prohibits such limitation. Some *
|
||||
* jurisdictions do not allow the exclusion or limitation of *
|
||||
* incidental or consequential damages, so this exclusion and *
|
||||
* limitation may not apply to You. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
8. Litigation
|
||||
-------------
|
||||
|
||||
Any litigation relating to this License may be brought only in the
|
||||
courts of a jurisdiction where the defendant maintains its principal
|
||||
place of business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions.
|
||||
Nothing in this Section shall prevent a party's ability to bring
|
||||
cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
----------------
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides
|
||||
that the language of a contract shall be construed against the drafter
|
||||
shall not be used to construe this License against a Contributor.
|
||||
|
||||
10. Versions of the License
|
||||
---------------------------
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses
|
||||
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
-------------------------------------------
|
||||
|
||||
This Source Code Form is subject to the terms of the Mozilla Public
|
||||
License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular
|
||||
file, then You may include the notice in a location (such as a LICENSE
|
||||
file in a relevant directory) where a recipient would be likely to look
|
||||
for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
---------------------------------------------------------
|
||||
|
||||
This Source Code Form is "Incompatible With Secondary Licenses", as
|
||||
defined by the Mozilla Public License, v. 2.0.
|
|
@ -19,9 +19,11 @@ import (
|
|||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
)
|
||||
|
||||
func GetAccountInfo(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string) (string, string, error) {
|
||||
var errors error
|
||||
// If we have creds from instance profile, we can use metadata API
|
||||
if authProviderName == ec2rolecreds.ProviderName {
|
||||
log.Println("[DEBUG] Trying to get account ID via AWS Metadata API")
|
||||
|
@ -35,30 +37,35 @@ func GetAccountInfo(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string)
|
|||
|
||||
metadataClient := ec2metadata.New(sess)
|
||||
info, err := metadataClient.IAMInfo()
|
||||
if err != nil {
|
||||
// This can be triggered when no IAM Role is assigned
|
||||
// or AWS just happens to return invalid response
|
||||
return "", "", fmt.Errorf("Failed getting EC2 IAM info: %s", err)
|
||||
if err == nil {
|
||||
return parseAccountInfoFromArn(info.InstanceProfileArn)
|
||||
}
|
||||
|
||||
return parseAccountInfoFromArn(info.InstanceProfileArn)
|
||||
log.Printf("[DEBUG] Failed to get account info from metadata service: %s", err)
|
||||
errors = multierror.Append(errors, err)
|
||||
// We can end up here if there's an issue with the instance metadata service
|
||||
// or if we're getting credentials from AdRoll's Hologram (in which case IAMInfo will
|
||||
// error out). In any event, if we can't get account info here, we should try
|
||||
// the other methods available.
|
||||
// If we have creds from something that looks like an IAM instance profile, but
|
||||
// we were unable to retrieve account info from the instance profile, it's probably
|
||||
// a safe assumption that we're not an IAM user
|
||||
} else {
|
||||
// Creds aren't from an IAM instance profile, so try try iam:GetUser
|
||||
log.Println("[DEBUG] Trying to get account ID via iam:GetUser")
|
||||
outUser, err := iamconn.GetUser(nil)
|
||||
if err == nil {
|
||||
return parseAccountInfoFromArn(*outUser.User.Arn)
|
||||
}
|
||||
errors = multierror.Append(errors, err)
|
||||
awsErr, ok := err.(awserr.Error)
|
||||
// AccessDenied and ValidationError can be raised
|
||||
// if credentials belong to federated profile, so we ignore these
|
||||
if !ok || (awsErr.Code() != "AccessDenied" && awsErr.Code() != "ValidationError" && awsErr.Code() != "InvalidClientTokenId") {
|
||||
return "", "", fmt.Errorf("Failed getting account ID via 'iam:GetUser': %s", err)
|
||||
}
|
||||
log.Printf("[DEBUG] Getting account ID via iam:GetUser failed: %s", err)
|
||||
}
|
||||
|
||||
// Then try IAM GetUser
|
||||
log.Println("[DEBUG] Trying to get account ID via iam:GetUser")
|
||||
outUser, err := iamconn.GetUser(nil)
|
||||
if err == nil {
|
||||
return parseAccountInfoFromArn(*outUser.User.Arn)
|
||||
}
|
||||
|
||||
awsErr, ok := err.(awserr.Error)
|
||||
// AccessDenied and ValidationError can be raised
|
||||
// if credentials belong to federated profile, so we ignore these
|
||||
if !ok || (awsErr.Code() != "AccessDenied" && awsErr.Code() != "ValidationError" && awsErr.Code() != "InvalidClientTokenId") {
|
||||
return "", "", fmt.Errorf("Failed getting account ID via 'iam:GetUser': %s", err)
|
||||
}
|
||||
log.Printf("[DEBUG] Getting account ID via iam:GetUser failed: %s", err)
|
||||
|
||||
// Then try STS GetCallerIdentity
|
||||
log.Println("[DEBUG] Trying to get account ID via sts:GetCallerIdentity")
|
||||
outCallerIdentity, err := stsconn.GetCallerIdentity(&sts.GetCallerIdentityInput{})
|
||||
|
@ -66,6 +73,7 @@ func GetAccountInfo(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string)
|
|||
return parseAccountInfoFromArn(*outCallerIdentity.Arn)
|
||||
}
|
||||
log.Printf("[DEBUG] Getting account ID via sts:GetCallerIdentity failed: %s", err)
|
||||
errors = multierror.Append(errors, err)
|
||||
|
||||
// Then try IAM ListRoles
|
||||
log.Println("[DEBUG] Trying to get account ID via iam:ListRoles")
|
||||
|
@ -73,11 +81,16 @@ func GetAccountInfo(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string)
|
|||
MaxItems: aws.Int64(int64(1)),
|
||||
})
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("Failed getting account ID via 'iam:ListRoles': %s", err)
|
||||
log.Printf("[DEBUG] Failed to get account ID via iam:ListRoles: %s", err)
|
||||
errors = multierror.Append(errors, err)
|
||||
return "", "", fmt.Errorf("Failed getting account ID via all available methods. Errors: %s", errors)
|
||||
}
|
||||
|
||||
if len(outRoles.Roles) < 1 {
|
||||
return "", "", errors.New("Failed getting account ID via 'iam:ListRoles': No roles available")
|
||||
err = fmt.Errorf("Failed to get account ID via iam:ListRoles: No roles available")
|
||||
log.Printf("[DEBUG] %s", err)
|
||||
errors = multierror.Append(errors, err)
|
||||
return "", "", fmt.Errorf("Failed getting account ID via all available methods. Errors: %s", errors)
|
||||
}
|
||||
|
||||
return parseAccountInfoFromArn(*outRoles.Roles[0].Arn)
|
||||
|
@ -112,8 +125,25 @@ func GetCredentials(c *Config) (*awsCredentials.Credentials, error) {
|
|||
// Build isolated HTTP client to avoid issues with globally-shared settings
|
||||
client := cleanhttp.DefaultClient()
|
||||
|
||||
// Keep the timeout low as we don't want to wait in non-EC2 environments
|
||||
// Keep the default timeout (100ms) low as we don't want to wait in non-EC2 environments
|
||||
client.Timeout = 100 * time.Millisecond
|
||||
|
||||
const userTimeoutEnvVar = "AWS_METADATA_TIMEOUT"
|
||||
userTimeout := os.Getenv(userTimeoutEnvVar)
|
||||
if userTimeout != "" {
|
||||
newTimeout, err := time.ParseDuration(userTimeout)
|
||||
if err == nil {
|
||||
if newTimeout.Nanoseconds() > 0 {
|
||||
client.Timeout = newTimeout
|
||||
} else {
|
||||
log.Printf("[WARN] Non-positive value of %s (%s) is meaningless, ignoring", userTimeoutEnvVar, newTimeout.String())
|
||||
}
|
||||
} else {
|
||||
log.Printf("[WARN] Error converting %s to time.Duration: %s", userTimeoutEnvVar, err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("[INFO] Setting AWS metadata API timeout to %s", client.Timeout.String())
|
||||
cfg := &aws.Config{
|
||||
HTTPClient: client,
|
||||
}
|
33
vendor/github.com/terraform-providers/terraform-provider-aws/aws/awserr.go
generated
vendored
Normal file
33
vendor/github.com/terraform-providers/terraform-provider-aws/aws/awserr.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func isAWSErr(err error, code string, message string) bool {
|
||||
if err, ok := err.(awserr.Error); ok {
|
||||
return err.Code() == code && strings.Contains(err.Message(), message)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func retryOnAwsCode(code string, f func() (interface{}, error)) (interface{}, error) {
|
||||
var resp interface{}
|
||||
err := resource.Retry(1*time.Minute, func() *resource.RetryError {
|
||||
var err error
|
||||
resp, err = f()
|
||||
if err != nil {
|
||||
awsErr, ok := err.(awserr.Error)
|
||||
if ok && awsErr.Code() == code {
|
||||
return resource.RetryableError(err)
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return resp, err
|
||||
}
|
|
@ -49,6 +49,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/service/glacier"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/aws/aws-sdk-go/service/inspector"
|
||||
"github.com/aws/aws-sdk-go/service/iot"
|
||||
"github.com/aws/aws-sdk-go/service/kinesis"
|
||||
"github.com/aws/aws-sdk-go/service/kms"
|
||||
"github.com/aws/aws-sdk-go/service/lambda"
|
||||
|
@ -174,6 +175,7 @@ type AWSClient struct {
|
|||
ssmconn *ssm.SSM
|
||||
wafconn *waf.WAF
|
||||
wafregionalconn *wafregional.WAFRegional
|
||||
iotconn *iot.IoT
|
||||
}
|
||||
|
||||
func (c *AWSClient) S3() *s3.S3 {
|
||||
|
@ -246,7 +248,7 @@ func (c *Config) Client() (interface{}, error) {
|
|||
}
|
||||
|
||||
if logging.IsDebugOrHigher() {
|
||||
awsConfig.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody)
|
||||
awsConfig.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody | aws.LogDebugWithRequestRetries | aws.LogDebugWithRequestErrors)
|
||||
awsConfig.Logger = awsLogger{}
|
||||
}
|
||||
|
||||
|
@ -364,6 +366,7 @@ func (c *Config) Client() (interface{}, error) {
|
|||
client.firehoseconn = firehose.New(sess)
|
||||
client.inspectorconn = inspector.New(sess)
|
||||
client.glacierconn = glacier.New(sess)
|
||||
client.iotconn = iot.New(sess)
|
||||
client.kinesisconn = kinesis.New(awsKinesisSess)
|
||||
client.kmsconn = kms.New(awsKmsSess)
|
||||
client.lambdaconn = lambda.New(sess)
|
||||
|
@ -382,9 +385,32 @@ func (c *Config) Client() (interface{}, error) {
|
|||
client.wafconn = waf.New(sess)
|
||||
client.wafregionalconn = wafregional.New(sess)
|
||||
|
||||
// Workaround for https://github.com/aws/aws-sdk-go/issues/1376
|
||||
client.kinesisconn.Handlers.Retry.PushBack(func(r *request.Request) {
|
||||
if !strings.HasPrefix(r.Operation.Name, "Describe") && !strings.HasPrefix(r.Operation.Name, "List") {
|
||||
return
|
||||
}
|
||||
err, ok := r.Error.(awserr.Error)
|
||||
if !ok || err == nil {
|
||||
return
|
||||
}
|
||||
if err.Code() == kinesis.ErrCodeLimitExceededException {
|
||||
r.Retryable = aws.Bool(true)
|
||||
}
|
||||
})
|
||||
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
func hasEc2Classic(platforms []string) bool {
|
||||
for _, p := range platforms {
|
||||
if p == "EC2" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidateRegion returns an error if the configured region is not a
|
||||
// valid aws region and nil otherwise.
|
||||
func (c *Config) ValidateRegion() error {
|
153
vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_alb_target_group.go
generated
vendored
Normal file
153
vendor/github.com/terraform-providers/terraform-provider-aws/aws/data_source_aws_alb_target_group.go
generated
vendored
Normal file
|
@ -0,0 +1,153 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/elbv2"
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func dataSourceAwsAlbTargetGroup() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Read: dataSourceAwsAlbTargetGroupRead,
|
||||
Schema: map[string]*schema.Schema{
|
||||
"arn": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"arn_suffix": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"port": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"protocol": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"vpc_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"deregistration_delay": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"stickiness": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"enabled": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"cookie_duration": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"health_check": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"interval": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"path": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"port": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"protocol": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"timeout": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"healthy_threshold": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"matcher": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"unhealthy_threshold": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"tags": tagsSchemaComputed(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func dataSourceAwsAlbTargetGroupRead(d *schema.ResourceData, meta interface{}) error {
|
||||
elbconn := meta.(*AWSClient).elbv2conn
|
||||
tgArn := d.Get("arn").(string)
|
||||
tgName := d.Get("name").(string)
|
||||
|
||||
describeTgOpts := &elbv2.DescribeTargetGroupsInput{}
|
||||
switch {
|
||||
case tgArn != "":
|
||||
describeTgOpts.TargetGroupArns = []*string{aws.String(tgArn)}
|
||||
case tgName != "":
|
||||
describeTgOpts.Names = []*string{aws.String(tgName)}
|
||||
}
|
||||
|
||||
describeResp, err := elbconn.DescribeTargetGroups(describeTgOpts)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf("Error retrieving ALB Target Group: {{err}}", err)
|
||||
}
|
||||
if len(describeResp.TargetGroups) != 1 {
|
||||
return fmt.Errorf("Search returned %d results, please revise so only one is returned", len(describeResp.TargetGroups))
|
||||
}
|
||||
|
||||
targetGroup := describeResp.TargetGroups[0]
|
||||
|
||||
d.SetId(*targetGroup.TargetGroupArn)
|
||||
return flattenAwsAlbTargetGroupResource(d, meta, targetGroup)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue