Merge remote-tracking branch 'origin' into pr-10594

This commit is contained in:
Jake Champlin 2017-04-21 15:37:39 -04:00
commit 9ef947b0c3
No known key found for this signature in database
GPG Key ID: DC31F41958EF4AC2
6447 changed files with 1179720 additions and 109547 deletions

View File

@ -44,7 +44,7 @@ ensure Terraform remains stable.
We make the distinction between these two types of providers to help
highlight the vast amounts of community effort that goes in to making Terraform
great, and to help contributers better understand the role HashiCorp employees
great, and to help contributors better understand the role HashiCorp employees
play in the various areas of the code base.
## Issues
@ -52,7 +52,7 @@ play in the various areas of the code base.
### Issue Reporting Checklists
We welcome issues of all kinds including feature requests, bug reports, and
general questions. Below you'll find checklists with guidlines for well-formed
general questions. Below you'll find checklists with guidelines for well-formed
issues of each type.
#### Bug Reports
@ -201,6 +201,9 @@ Implementing a new resource is a good way to learn more about how Terraform
interacts with upstream APIs. There are plenty of examples to draw from in the
existing resources, but you still get to implement something completely new.
- [ ] __Minimal LOC__: It can be inefficient for both the reviewer
and author to go through long feedback cycles on a big PR with many
resources. We therefore encourage you to only submit **1 resource at a time**.
- [ ] __Acceptance tests__: New resources should include acceptance tests
covering their behavior. See [Writing Acceptance
Tests](#writing-acceptance-tests) below for a detailed guide on how to
@ -220,9 +223,14 @@ existing resources, but you still get to implement something completely new.
#### New Provider
Implementing a new provider gives Terraform the ability to manage resources in
a whole new API. It's a larger undertaking, but brings major new functionaliy
a whole new API. It's a larger undertaking, but brings major new functionality
into Terraform.
- [ ] __Minimal initial LOC__: Some providers may be big and it can be
inefficient for both reviewer & author to go through long feedback cycles
on a big PR with many resources. We encourage you to only submit
the necessary minimum in a single PR, ideally **just the first resource**
of the provider.
- [ ] __Acceptance tests__: Each provider should include an acceptance test
suite with tests for each resource should include acceptance tests covering
its behavior. See [Writing Acceptance Tests](#writing-acceptance-tests) below
@ -368,12 +376,12 @@ point to the framework is the `resource.Test()` function.
Tests are divided into `TestStep`s. Each `TestStep` proceeds by applying some
Terraform configuration using the provider under test, and then verifying that
results are as expected by making assertions using the provider API. It is
common for a single test function to excercise both the creation of and updates
common for a single test function to exercise both the creation of and updates
to a single resource. Most tests follow a similar structure.
1. Pre-flight checks are made to ensure that sufficient provider configuration
is available to be able to proceed - for example in an acceptance test
targetting AWS, `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` must be set prior
targeting AWS, `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` must be set prior
to running acceptance tests. This is common to all tests exercising a single
provider.
@ -462,7 +470,7 @@ When executing the test, the following steps are taken for each `TestStep`:
the resource - though in this case it is necessary to split the ID into
constituent parts in order to use the provider API. For computed properties,
we instead assert that the value saved in the Terraform state was the
expected value if possible. The testing framework providers helper functions
expected value if possible. The testing framework provides helper functions
for several common types of check - for example:
```go

4
.gitignore vendored
View File

@ -25,3 +25,7 @@ website/node_modules
*.iml
website/vendor
# Test exclusions
!command/test-fixtures/**/*.tfstate
!command/test-fixtures/**/.terraform/

View File

@ -1,16 +1,32 @@
dist: trusty
sudo: false
language: go
go:
- 1.7.4
- 1.8beta1
- 1.8
# add TF_CONSUL_TEST=1 to run consul tests
# they were causing timouts in travis
env:
- CONSUL_VERSION=0.7.5 GOMAXPROCS=4
# Fetch consul for the backend and provider tests
before_install:
- curl -sLo consul.zip https://releases.hashicorp.com/consul/${CONSUL_VERSION}/consul_${CONSUL_VERSION}_linux_amd64.zip
- unzip consul.zip
- mkdir ~/bin
- mv consul ~/bin
- export PATH="~/bin:$PATH"
install:
# This script is used by the Travis build to install a cookie for
# go.googlesource.com so rate limits are higher when using `go get` to fetch
# packages that live there.
# See: https://github.com/golang/go/issues/12933
- bash scripts/gogetcookie.sh
- go get github.com/kardianos/govendor
script:
- make test vet
- make vet vendor-status test
- GOOS=windows go build
branches:
only:
- master

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,4 @@
TEST?=$$(go list ./... | grep -v '/terraform/vendor/' | grep -v '/builtin/bins/')
VETARGS?=-all
GOFMT_FILES?=$$(find . -name '*.go' | grep -v vendor)
default: test vet
@ -40,7 +39,9 @@ plugin-dev: generate
# test runs the unit tests
test: fmtcheck errcheck generate
TF_ACC= go test $(TEST) $(TESTARGS) -timeout=30s -parallel=4
go test -i $(TEST) || exit 1
echo $(TEST) | \
xargs -t -n4 go test $(TESTARGS) -timeout=60s -parallel=4
# testacc runs acceptance tests
testacc: fmtcheck generate
@ -74,8 +75,8 @@ cover:
# vet runs the Go source code static analysis tool `vet` to find
# any common errors.
vet:
@echo "go tool vet $(VETARGS) ."
@go tool vet $(VETARGS) $$(ls -d */ | grep -v vendor) ; if [ $$? -eq 1 ]; then \
@echo "go vet ."
@go vet $$(go list ./... | grep -v vendor/) ; if [ $$? -eq 1 ]; then \
echo ""; \
echo "Vet found suspicious constructs. Please check the reported constructs"; \
echo "and fix them if necessary before submitting the code for review."; \
@ -100,4 +101,12 @@ fmtcheck:
errcheck:
@sh -c "'$(CURDIR)/scripts/errcheck.sh'"
.PHONY: bin default generate test vet fmt fmtcheck tools
vendor-status:
@govendor status
# disallow any parallelism (-j) for Make. This is necessary since some
# commands during the build process create temporary files that collide
# under parallel conditions.
.NOTPARALLEL:
.PHONY: bin core-dev core-test cover default dev errcheck fmt fmtcheck generate plugin-dev quickdev test-compile test testacc testrace tools vendor-status vet

View File

@ -1,11 +1,11 @@
Terraform
=========
- Website: http://www.terraform.io
- Website: https://www.terraform.io
- [![Gitter chat](https://badges.gitter.im/hashicorp-terraform/Lobby.png)](https://gitter.im/hashicorp-terraform/Lobby)
- Mailing list: [Google Groups](http://groups.google.com/group/terraform-tool)
![Terraform](https://raw.githubusercontent.com/hashicorp/terraform/master/website/source/assets/images/readme.png)
![Terraform](https://rawgithub.com/hashicorp/terraform/master/website/source/assets/images/logo-hashicorp.svg)
Terraform is a tool for building, changing, and versioning infrastructure safely and efficiently. Terraform can manage existing and popular service providers as well as custom in-house solutions.
@ -29,14 +29,14 @@ All documentation is available on the [Terraform website](http://www.terraform.i
Developing Terraform
--------------------
If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.7+ is *required*). Alternatively, you can use the Vagrantfile in the root of this repo to stand up a virtual machine with the appropriate dev tooling already set up for you.
If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.8+ is *required*). Alternatively, you can use the Vagrantfile in the root of this repo to stand up a virtual machine with the appropriate dev tooling already set up for you.
For local dev first make sure Go is properly installed, including setting up a [GOPATH](http://golang.org/doc/code.html#GOPATH). You will also need to add `$GOPATH/bin` to your `$PATH`.
Next, using [Git](https://git-scm.com/), clone this repository into `$GOPATH/src/github.com/hashicorp/terraform`. All the necessary dependencies are either vendored or automatically installed, so you just need to type `make`. This will compile the code and then run the tests. If this exits with exit status 0, then everything is working!
```sh
$ cd $GOPATH/src/github.com/hashicorp/terraform
$ cd "$GOPATH/src/github.com/hashicorp/terraform"
$ make
```
@ -88,7 +88,7 @@ Assuming your work is on a branch called `my-feature-branch`, the steps look lik
go get github.com/hashicorp/my-project
```
2. Add the new package to your vendor/ directory:
2. Add the new package to your `vendor/` directory:
```bash
govendor add github.com/hashicorp/my-project/package
@ -152,3 +152,13 @@ $ tree ./pkg/ -P "terraform|*.zip"
```
_Note: Cross-compilation uses [gox](https://github.com/mitchellh/gox), which requires toolchains to be built with versions of Go prior to 1.5. In order to successfully cross-compile with older versions of Go, you will need to run `gox -build-toolchain` before running the commands detailed above._
#### Docker
When using docker you don't need to have any of the Go development tools installed and you can clone terraform to any location on disk (doesn't have to be in your $GOPATH). This is useful for users who want to build `master` or a specific branch for testing without setting up a proper Go environment.
For example, run the following command to build terraform in a linux-based container for macOS.
```sh
docker run --rm -v $(pwd):/go/src/github.com/hashicorp/terraform -w /go/src/github.com/hashicorp/terraform -e XC_OS=darwin -e XC_ARCH=amd64 golang:latest bash -c "apt-get update && apt-get install -y zip && make bin"
```

31
Vagrantfile vendored
View File

@ -4,8 +4,16 @@
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
# Software version variables
GOVERSION = "1.8"
UBUNTUVERSION = "16.04"
# CPU and RAM can be adjusted depending on your system
CPUCOUNT = "2"
RAM = "4096"
$script = <<SCRIPT
GOVERSION="1.7.4"
GOVERSION="#{GOVERSION}"
SRCROOT="/opt/go"
SRCPATH="/opt/gopath"
@ -38,13 +46,14 @@ mkdir -p "$SRCPATH"
chown -R vagrant:vagrant "$SRCPATH" 2>/dev/null || true
# ^^ silencing errors here because we expect this to fail for the shared folder
install -m0755 /dev/stdin /etc/profile.d/gopath.sh <<EOF
cat >/etc/profile.d/gopath.sh <<EOF
export GOPATH="$SRCPATH"
export GOROOT="$SRCROOT"
export PATH="$SRCROOT/bin:$SRCPATH/bin:\$PATH"
EOF
chmod 755 /etc/profile.d/gopath.sh
cat >>/home/vagrant/.bashrc <<EOF
grep -q -F 'cd /opt/gopath/src/github.com/hashicorp/terraform' /home/vagrant/.bashrc || cat >>/home/vagrant/.bashrc <<EOF
## After login, change to terraform directory
cd /opt/gopath/src/github.com/hashicorp/terraform
@ -53,7 +62,7 @@ EOF
SCRIPT
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "bento/ubuntu-14.04"
config.vm.box = "bento/ubuntu-#{UBUNTUVERSION}"
config.vm.hostname = "terraform"
config.vm.provision "prepare-shell", type: "shell", inline: "sudo sed -i '/tty/!s/mesg n/tty -s \\&\\& mesg n/' /root/.profile", privileged: false
@ -61,23 +70,23 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.synced_folder '.', '/opt/gopath/src/github.com/hashicorp/terraform'
config.vm.provider "docker" do |v, override|
override.vm.box = "tknerr/baseimage-ubuntu-14.04"
override.vm.box = "tknerr/baseimage-ubuntu-#{UBUNTUVERSION}"
end
["vmware_fusion", "vmware_workstation"].each do |p|
config.vm.provider p do |v|
v.vmx["memsize"] = "4096"
v.vmx["numvcpus"] = "2"
v.vmx["memsize"] = "#{RAM}"
v.vmx["numvcpus"] = "#{CPUCOUNT}"
end
end
config.vm.provider "virtualbox" do |v|
v.memory = 4096
v.cpus = 2
v.memory = "#{RAM}"
v.cpus = "#{CPUCOUNT}"
end
config.vm.provider "parallels" do |prl|
prl.memory = 4096
prl.cpus = 2
prl.memory = "#{RAM}"
prl.cpus = "#{CPUCOUNT}"
end
end

163
backend/atlas/backend.go Normal file
View File

@ -0,0 +1,163 @@
package atlas
import (
"context"
"fmt"
"net/url"
"os"
"strings"
"sync"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/state/remote"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/cli"
"github.com/mitchellh/colorstring"
)
// Backend is an implementation of EnhancedBackend that performs all operations
// in Atlas. State must currently also be stored in Atlas, although it is worth
// investigating in the future if state storage can be external as well.
type Backend struct {
// CLI and Colorize control the CLI output. If CLI is nil then no CLI
// output will be done. If CLIColor is nil then no coloring will be done.
CLI cli.Ui
CLIColor *colorstring.Colorize
// ContextOpts are the base context options to set when initializing a
// Terraform context. Many of these will be overridden or merged by
// Operation. See Operation for more details.
ContextOpts *terraform.ContextOpts
//---------------------------------------------------------------
// Internal fields, do not set
//---------------------------------------------------------------
// stateClient is the legacy state client, setup in Configure
stateClient *stateClient
// schema is the schema for configuration, set by init
schema *schema.Backend
once sync.Once
// opLock locks operations
opLock sync.Mutex
}
func (b *Backend) Input(
ui terraform.UIInput, c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
b.once.Do(b.init)
return b.schema.Input(ui, c)
}
func (b *Backend) Validate(c *terraform.ResourceConfig) ([]string, []error) {
b.once.Do(b.init)
return b.schema.Validate(c)
}
func (b *Backend) Configure(c *terraform.ResourceConfig) error {
b.once.Do(b.init)
return b.schema.Configure(c)
}
func (b *Backend) States() ([]string, error) {
return nil, backend.ErrNamedStatesNotSupported
}
func (b *Backend) DeleteState(name string) error {
return backend.ErrNamedStatesNotSupported
}
func (b *Backend) State(name string) (state.State, error) {
if name != backend.DefaultStateName {
return nil, backend.ErrNamedStatesNotSupported
}
return &remote.State{Client: b.stateClient}, nil
}
// Colorize returns the Colorize structure that can be used for colorizing
// output. This is gauranteed to always return a non-nil value and so is useful
// as a helper to wrap any potentially colored strings.
func (b *Backend) Colorize() *colorstring.Colorize {
if b.CLIColor != nil {
return b.CLIColor
}
return &colorstring.Colorize{
Colors: colorstring.DefaultColors,
Disable: true,
}
}
func (b *Backend) init() {
b.schema = &schema.Backend{
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
Description: schemaDescriptions["name"],
},
"access_token": &schema.Schema{
Type: schema.TypeString,
Required: true,
Description: schemaDescriptions["access_token"],
DefaultFunc: schema.EnvDefaultFunc("ATLAS_TOKEN", nil),
},
"address": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: schemaDescriptions["address"],
DefaultFunc: schema.EnvDefaultFunc("ATLAS_ADDRESS", defaultAtlasServer),
},
},
ConfigureFunc: b.schemaConfigure,
}
}
func (b *Backend) schemaConfigure(ctx context.Context) error {
d := schema.FromContextBackendConfig(ctx)
// Parse the address
addr := d.Get("address").(string)
addrUrl, err := url.Parse(addr)
if err != nil {
return fmt.Errorf("Error parsing 'address': %s", err)
}
// Parse the org/env
name := d.Get("name").(string)
parts := strings.Split(name, "/")
if len(parts) != 2 {
return fmt.Errorf("malformed name '%s', expected format '<org>/<name>'", name)
}
org := parts[0]
env := parts[1]
// Setup the client
b.stateClient = &stateClient{
Server: addr,
ServerURL: addrUrl,
AccessToken: d.Get("access_token").(string),
User: org,
Name: env,
// This is optionally set during Atlas Terraform runs.
RunId: os.Getenv("ATLAS_RUN_ID"),
}
return nil
}
var schemaDescriptions = map[string]string{
"name": "Full name of the environment in Atlas, such as 'hashicorp/myenv'",
"access_token": "Access token to use to access Atlas. If ATLAS_TOKEN is set then\n" +
"this will override any saved value for this.",
"address": "Address to your Atlas installation. This defaults to the publicly\n" +
"hosted version at 'https://atlas.hashicorp.com/'. This address\n" +
"should contain the full HTTP scheme to use.",
}

View File

@ -0,0 +1,49 @@
package atlas
import (
"os"
"testing"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/terraform"
)
func TestImpl(t *testing.T) {
var _ backend.Backend = new(Backend)
var _ backend.CLI = new(Backend)
}
func TestConfigure_envAddr(t *testing.T) {
defer os.Setenv("ATLAS_ADDRESS", os.Getenv("ATLAS_ADDRESS"))
os.Setenv("ATLAS_ADDRESS", "http://foo.com")
b := &Backend{}
err := b.Configure(terraform.NewResourceConfig(config.TestRawConfig(t, map[string]interface{}{
"name": "foo/bar",
})))
if err != nil {
t.Fatalf("err: %s", err)
}
if b.stateClient.Server != "http://foo.com" {
t.Fatalf("bad: %#v", b.stateClient)
}
}
func TestConfigure_envToken(t *testing.T) {
defer os.Setenv("ATLAS_TOKEN", os.Getenv("ATLAS_TOKEN"))
os.Setenv("ATLAS_TOKEN", "foo")
b := &Backend{}
err := b.Configure(terraform.NewResourceConfig(config.TestRawConfig(t, map[string]interface{}{
"name": "foo/bar",
})))
if err != nil {
t.Fatalf("err: %s", err)
}
if b.stateClient.AccessToken != "foo" {
t.Fatalf("bad: %#v", b.stateClient)
}
}

13
backend/atlas/cli.go Normal file
View File

@ -0,0 +1,13 @@
package atlas
import (
"github.com/hashicorp/terraform/backend"
)
// backend.CLI impl.
func (b *Backend) CLIInit(opts *backend.CLIOpts) error {
b.CLI = opts.CLI
b.CLIColor = opts.CLIColor
b.ContextOpts = opts.ContextOpts
return nil
}

View File

@ -0,0 +1,319 @@
package atlas
import (
"bytes"
"crypto/md5"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"fmt"
"io"
"log"
"net/http"
"net/url"
"os"
"path"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-retryablehttp"
"github.com/hashicorp/go-rootcerts"
"github.com/hashicorp/terraform/state/remote"
"github.com/hashicorp/terraform/terraform"
)
const (
// defaultAtlasServer is used when no address is given
defaultAtlasServer = "https://atlas.hashicorp.com/"
atlasTokenHeader = "X-Atlas-Token"
)
// AtlasClient implements the Client interface for an Atlas compatible server.
type stateClient struct {
Server string
ServerURL *url.URL
User string
Name string
AccessToken string
RunId string
HTTPClient *retryablehttp.Client
conflictHandlingAttempted bool
}
func (c *stateClient) Get() (*remote.Payload, error) {
// Make the HTTP request
req, err := retryablehttp.NewRequest("GET", c.url().String(), nil)
if err != nil {
return nil, fmt.Errorf("Failed to make HTTP request: %v", err)
}
req.Header.Set(atlasTokenHeader, c.AccessToken)
// Request the url
client, err := c.http()
if err != nil {
return nil, err
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
// Handle the common status codes
switch resp.StatusCode {
case http.StatusOK:
// Handled after
case http.StatusNoContent:
return nil, nil
case http.StatusNotFound:
return nil, nil
case http.StatusUnauthorized:
return nil, fmt.Errorf("HTTP remote state endpoint requires auth")
case http.StatusForbidden:
return nil, fmt.Errorf("HTTP remote state endpoint invalid auth")
case http.StatusInternalServerError:
return nil, fmt.Errorf("HTTP remote state internal server error")
default:
return nil, fmt.Errorf(
"Unexpected HTTP response code: %d\n\nBody: %s",
resp.StatusCode, c.readBody(resp.Body))
}
// Read in the body
buf := bytes.NewBuffer(nil)
if _, err := io.Copy(buf, resp.Body); err != nil {
return nil, fmt.Errorf("Failed to read remote state: %v", err)
}
// Create the payload
payload := &remote.Payload{
Data: buf.Bytes(),
}
if len(payload.Data) == 0 {
return nil, nil
}
// Check for the MD5
if raw := resp.Header.Get("Content-MD5"); raw != "" {
md5, err := base64.StdEncoding.DecodeString(raw)
if err != nil {
return nil, fmt.Errorf("Failed to decode Content-MD5 '%s': %v", raw, err)
}
payload.MD5 = md5
} else {
// Generate the MD5
hash := md5.Sum(payload.Data)
payload.MD5 = hash[:]
}
return payload, nil
}
func (c *stateClient) Put(state []byte) error {
// Get the target URL
base := c.url()
// Generate the MD5
hash := md5.Sum(state)
b64 := base64.StdEncoding.EncodeToString(hash[:])
// Make the HTTP client and request
req, err := retryablehttp.NewRequest("PUT", base.String(), bytes.NewReader(state))
if err != nil {
return fmt.Errorf("Failed to make HTTP request: %v", err)
}
// Prepare the request
req.Header.Set(atlasTokenHeader, c.AccessToken)
req.Header.Set("Content-MD5", b64)
req.Header.Set("Content-Type", "application/json")
req.ContentLength = int64(len(state))
// Make the request
client, err := c.http()
if err != nil {
return err
}
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("Failed to upload state: %v", err)
}
defer resp.Body.Close()
// Handle the error codes
switch resp.StatusCode {
case http.StatusOK:
return nil
case http.StatusConflict:
return c.handleConflict(c.readBody(resp.Body), state)
default:
return fmt.Errorf(
"HTTP error: %d\n\nBody: %s",
resp.StatusCode, c.readBody(resp.Body))
}
}
func (c *stateClient) Delete() error {
// Make the HTTP request
req, err := retryablehttp.NewRequest("DELETE", c.url().String(), nil)
if err != nil {
return fmt.Errorf("Failed to make HTTP request: %v", err)
}
req.Header.Set(atlasTokenHeader, c.AccessToken)
// Make the request
client, err := c.http()
if err != nil {
return err
}
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("Failed to delete state: %v", err)
}
defer resp.Body.Close()
// Handle the error codes
switch resp.StatusCode {
case http.StatusOK:
return nil
case http.StatusNoContent:
return nil
case http.StatusNotFound:
return nil
default:
return fmt.Errorf(
"HTTP error: %d\n\nBody: %s",
resp.StatusCode, c.readBody(resp.Body))
}
}
func (c *stateClient) readBody(b io.Reader) string {
var buf bytes.Buffer
if _, err := io.Copy(&buf, b); err != nil {
return fmt.Sprintf("Error reading body: %s", err)
}
result := buf.String()
if result == "" {
result = "<empty>"
}
return result
}
func (c *stateClient) url() *url.URL {
values := url.Values{}
values.Add("atlas_run_id", c.RunId)
return &url.URL{
Scheme: c.ServerURL.Scheme,
Host: c.ServerURL.Host,
Path: path.Join("api/v1/terraform/state", c.User, c.Name),
RawQuery: values.Encode(),
}
}
func (c *stateClient) http() (*retryablehttp.Client, error) {
if c.HTTPClient != nil {
return c.HTTPClient, nil
}
tlsConfig := &tls.Config{}
err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{
CAFile: os.Getenv("ATLAS_CAFILE"),
CAPath: os.Getenv("ATLAS_CAPATH"),
})
if err != nil {
return nil, err
}
rc := retryablehttp.NewClient()
rc.CheckRetry = func(resp *http.Response, err error) (bool, error) {
if err != nil {
// don't bother retrying if the certs don't match
if err, ok := err.(*url.Error); ok {
if _, ok := err.Err.(x509.UnknownAuthorityError); ok {
return false, nil
}
}
// continue retrying
return true, nil
}
return retryablehttp.DefaultRetryPolicy(resp, err)
}
t := cleanhttp.DefaultTransport()
t.TLSClientConfig = tlsConfig
rc.HTTPClient.Transport = t
c.HTTPClient = rc
return rc, nil
}
// Atlas returns an HTTP 409 - Conflict if the pushed state reports the same
// Serial number but the checksum of the raw content differs. This can
// sometimes happen when Terraform changes state representation internally
// between versions in a way that's semantically neutral but affects the JSON
// output and therefore the checksum.
//
// Here we detect and handle this situation by ticking the serial and retrying
// iff for the previous state and the proposed state:
//
// * the serials match
// * the parsed states are Equal (semantically equivalent)
//
// In other words, in this situation Terraform can override Atlas's detected
// conflict by asserting that the state it is pushing is indeed correct.
func (c *stateClient) handleConflict(msg string, state []byte) error {
log.Printf("[DEBUG] Handling Atlas conflict response: %s", msg)
if c.conflictHandlingAttempted {
log.Printf("[DEBUG] Already attempted conflict resolution; returning conflict.")
} else {
c.conflictHandlingAttempted = true
log.Printf("[DEBUG] Atlas reported conflict, checking for equivalent states.")
payload, err := c.Get()
if err != nil {
return conflictHandlingError(err)
}
currentState, err := terraform.ReadState(bytes.NewReader(payload.Data))
if err != nil {
return conflictHandlingError(err)
}
proposedState, err := terraform.ReadState(bytes.NewReader(state))
if err != nil {
return conflictHandlingError(err)
}
if statesAreEquivalent(currentState, proposedState) {
log.Printf("[DEBUG] States are equivalent, incrementing serial and retrying.")
proposedState.Serial++
var buf bytes.Buffer
if err := terraform.WriteState(proposedState, &buf); err != nil {
return conflictHandlingError(err)
}
return c.Put(buf.Bytes())
} else {
log.Printf("[DEBUG] States are not equivalent, returning conflict.")
}
}
return fmt.Errorf(
"Atlas detected a remote state conflict.\n\nMessage: %s", msg)
}
func conflictHandlingError(err error) error {
return fmt.Errorf(
"Error while handling a conflict response from Atlas: %s", err)
}
func statesAreEquivalent(current, proposed *terraform.State) bool {
return current.Serial == proposed.Serial && current.Equal(proposed)
}

View File

@ -0,0 +1,384 @@
package atlas
import (
"bytes"
"crypto/md5"
"crypto/tls"
"crypto/x509"
"encoding/json"
"net/http"
"net/http/httptest"
"net/url"
"os"
"testing"
"time"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/state/remote"
"github.com/hashicorp/terraform/terraform"
)
func testStateClient(t *testing.T, c map[string]interface{}) remote.Client {
b := backend.TestBackendConfig(t, &Backend{}, c)
raw, err := b.State(backend.DefaultStateName)
if err != nil {
t.Fatalf("err: %s", err)
}
s := raw.(*remote.State)
return s.Client
}
func TestStateClient_impl(t *testing.T) {
var _ remote.Client = new(stateClient)
}
func TestStateClient(t *testing.T) {
acctest.RemoteTestPrecheck(t)
token := os.Getenv("ATLAS_TOKEN")
if token == "" {
t.Skipf("skipping, ATLAS_TOKEN must be set")
}
client := testStateClient(t, map[string]interface{}{
"access_token": token,
"name": "hashicorp/test-remote-state",
})
remote.TestClient(t, client)
}
func TestStateClient_noRetryOnBadCerts(t *testing.T) {
acctest.RemoteTestPrecheck(t)
client := testStateClient(t, map[string]interface{}{
"access_token": "NOT_REQUIRED",
"name": "hashicorp/test-remote-state",
})
ac := client.(*stateClient)
// trigger the StateClient to build the http client and assign HTTPClient
httpClient, err := ac.http()
if err != nil {
t.Fatal(err)
}
// remove the CA certs from the client
brokenCfg := &tls.Config{
RootCAs: new(x509.CertPool),
}
httpClient.HTTPClient.Transport.(*http.Transport).TLSClientConfig = brokenCfg
// Instrument CheckRetry to make sure we didn't retry
retries := 0
oldCheck := httpClient.CheckRetry
httpClient.CheckRetry = func(resp *http.Response, err error) (bool, error) {
if retries > 0 {
t.Fatal("retried after certificate error")
}
retries++
return oldCheck(resp, err)
}
_, err = client.Get()
if err != nil {
if err, ok := err.(*url.Error); ok {
if _, ok := err.Err.(x509.UnknownAuthorityError); ok {
return
}
}
}
t.Fatalf("expected x509.UnknownAuthorityError, got %v", err)
}
func TestStateClient_ReportedConflictEqualStates(t *testing.T) {
fakeAtlas := newFakeAtlas(t, testStateModuleOrderChange)
srv := fakeAtlas.Server()
defer srv.Close()
client := testStateClient(t, map[string]interface{}{
"access_token": "sometoken",
"name": "someuser/some-test-remote-state",
"address": srv.URL,
})
state, err := terraform.ReadState(bytes.NewReader(testStateModuleOrderChange))
if err != nil {
t.Fatalf("err: %s", err)
}
var stateJson bytes.Buffer
if err := terraform.WriteState(state, &stateJson); err != nil {
t.Fatalf("err: %s", err)
}
if err := client.Put(stateJson.Bytes()); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestStateClient_NoConflict(t *testing.T) {
fakeAtlas := newFakeAtlas(t, testStateSimple)
srv := fakeAtlas.Server()
defer srv.Close()
client := testStateClient(t, map[string]interface{}{
"access_token": "sometoken",
"name": "someuser/some-test-remote-state",
"address": srv.URL,
})
state, err := terraform.ReadState(bytes.NewReader(testStateSimple))
if err != nil {
t.Fatalf("err: %s", err)
}
fakeAtlas.NoConflictAllowed(true)
var stateJson bytes.Buffer
if err := terraform.WriteState(state, &stateJson); err != nil {
t.Fatalf("err: %s", err)
}
if err := client.Put(stateJson.Bytes()); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestStateClient_LegitimateConflict(t *testing.T) {
fakeAtlas := newFakeAtlas(t, testStateSimple)
srv := fakeAtlas.Server()
defer srv.Close()
client := testStateClient(t, map[string]interface{}{
"access_token": "sometoken",
"name": "someuser/some-test-remote-state",
"address": srv.URL,
})
state, err := terraform.ReadState(bytes.NewReader(testStateSimple))
if err != nil {
t.Fatalf("err: %s", err)
}
var buf bytes.Buffer
terraform.WriteState(state, &buf)
// Changing the state but not the serial. Should generate a conflict.
state.RootModule().Outputs["drift"] = &terraform.OutputState{
Type: "string",
Sensitive: false,
Value: "happens",
}
var stateJson bytes.Buffer
if err := terraform.WriteState(state, &stateJson); err != nil {
t.Fatalf("err: %s", err)
}
if err := client.Put(stateJson.Bytes()); err == nil {
t.Fatal("Expected error from state conflict, got none.")
}
}
func TestStateClient_UnresolvableConflict(t *testing.T) {
fakeAtlas := newFakeAtlas(t, testStateSimple)
// Something unexpected causes Atlas to conflict in a way that we can't fix.
fakeAtlas.AlwaysConflict(true)
srv := fakeAtlas.Server()
defer srv.Close()
client := testStateClient(t, map[string]interface{}{
"access_token": "sometoken",
"name": "someuser/some-test-remote-state",
"address": srv.URL,
})
state, err := terraform.ReadState(bytes.NewReader(testStateSimple))
if err != nil {
t.Fatalf("err: %s", err)
}
var stateJson bytes.Buffer
if err := terraform.WriteState(state, &stateJson); err != nil {
t.Fatalf("err: %s", err)
}
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
if err := client.Put(stateJson.Bytes()); err == nil {
t.Fatal("Expected error from state conflict, got none.")
}
}()
select {
case <-doneCh:
// OK
case <-time.After(500 * time.Millisecond):
t.Fatalf("Timed out after 500ms, probably because retrying infinitely.")
}
}
// Stub Atlas HTTP API for a given state JSON string; does checksum-based
// conflict detection equivalent to Atlas's.
type fakeAtlas struct {
state []byte
t *testing.T
// Used to test that we only do the special conflict handling retry once.
alwaysConflict bool
// Used to fail the test immediately if a conflict happens.
noConflictAllowed bool
}
func newFakeAtlas(t *testing.T, state []byte) *fakeAtlas {
return &fakeAtlas{
state: state,
t: t,
}
}
func (f *fakeAtlas) Server() *httptest.Server {
return httptest.NewServer(http.HandlerFunc(f.handler))
}
func (f *fakeAtlas) CurrentState() *terraform.State {
// we read the state manually here, because terraform may alter state
// during read
currentState := &terraform.State{}
err := json.Unmarshal(f.state, currentState)
if err != nil {
f.t.Fatalf("err: %s", err)
}
return currentState
}
func (f *fakeAtlas) CurrentSerial() int64 {
return f.CurrentState().Serial
}
func (f *fakeAtlas) CurrentSum() [md5.Size]byte {
return md5.Sum(f.state)
}
func (f *fakeAtlas) AlwaysConflict(b bool) {
f.alwaysConflict = b
}
func (f *fakeAtlas) NoConflictAllowed(b bool) {
f.noConflictAllowed = b
}
func (f *fakeAtlas) handler(resp http.ResponseWriter, req *http.Request) {
// access tokens should only be sent as a header
if req.FormValue("access_token") != "" {
http.Error(resp, "access_token in request params", http.StatusBadRequest)
return
}
if req.Header.Get(atlasTokenHeader) == "" {
http.Error(resp, "missing access token", http.StatusBadRequest)
return
}
switch req.Method {
case "GET":
// Respond with the current stored state.
resp.Header().Set("Content-Type", "application/json")
resp.Write(f.state)
case "PUT":
var buf bytes.Buffer
buf.ReadFrom(req.Body)
sum := md5.Sum(buf.Bytes())
// we read the state manually here, because terraform may alter state
// during read
state := &terraform.State{}
err := json.Unmarshal(buf.Bytes(), state)
if err != nil {
f.t.Fatalf("err: %s", err)
}
conflict := f.CurrentSerial() == state.Serial && f.CurrentSum() != sum
conflict = conflict || f.alwaysConflict
if conflict {
if f.noConflictAllowed {
f.t.Fatal("Got conflict when NoConflictAllowed was set.")
}
http.Error(resp, "Conflict", 409)
} else {
f.state = buf.Bytes()
resp.WriteHeader(200)
}
}
}
// This is a tfstate file with the module order changed, which is a structural
// but not a semantic difference. Terraform will sort these modules as it
// loads the state.
var testStateModuleOrderChange = []byte(
`{
"version": 3,
"serial": 1,
"modules": [
{
"path": [
"root",
"child2",
"grandchild"
],
"outputs": {
"foo": {
"sensitive": false,
"type": "string",
"value": "bar"
}
},
"resources": null
},
{
"path": [
"root",
"child1",
"grandchild"
],
"outputs": {
"foo": {
"sensitive": false,
"type": "string",
"value": "bar"
}
},
"resources": null
}
]
}
`)
var testStateSimple = []byte(
`{
"version": 3,
"serial": 2,
"lineage": "c00ad9ac-9b35-42fe-846e-b06f0ef877e9",
"modules": [
{
"path": [
"root"
],
"outputs": {
"foo": {
"sensitive": false,
"type": "string",
"value": "bar"
}
},
"resources": {},
"depends_on": []
}
]
}
`)

166
backend/backend.go Normal file
View File

@ -0,0 +1,166 @@
// Package backend provides interfaces that the CLI uses to interact with
// Terraform. A backend provides the abstraction that allows the same CLI
// to simultaneously support both local and remote operations for seamlessly
// using Terraform in a team environment.
package backend
import (
"context"
"errors"
"time"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/terraform"
)
// This is the name of the default, initial state that every backend
// must have. This state cannot be deleted.
const DefaultStateName = "default"
// Error value to return when a named state operation isn't supported.
// This must be returned rather than a custom error so that the Terraform
// CLI can detect it and handle it appropriately.
var ErrNamedStatesNotSupported = errors.New("named states not supported")
// Backend is the minimal interface that must be implemented to enable Terraform.
type Backend interface {
// Ask for input and configure the backend. Similar to
// terraform.ResourceProvider.
Input(terraform.UIInput, *terraform.ResourceConfig) (*terraform.ResourceConfig, error)
Validate(*terraform.ResourceConfig) ([]string, []error)
Configure(*terraform.ResourceConfig) error
// State returns the current state for this environment. This state may
// not be loaded locally: the proper APIs should be called on state.State
// to load the state. If the state.State is a state.Locker, it's up to the
// caller to call Lock and Unlock as needed.
//
// If the named state doesn't exist it will be created. The "default" state
// is always assumed to exist.
State(name string) (state.State, error)
// DeleteState removes the named state if it exists. It is an error
// to delete the default state.
//
// DeleteState does not prevent deleting a state that is in use. It is the
// responsibility of the caller to hold a Lock on the state when calling
// this method.
DeleteState(name string) error
// States returns a list of configured named states.
States() ([]string, error)
}
// Enhanced implements additional behavior on top of a normal backend.
//
// Enhanced backends allow customizing the behavior of Terraform operations.
// This allows Terraform to potentially run operations remotely, load
// configurations from external sources, etc.
type Enhanced interface {
Backend
// Operation performs a Terraform operation such as refresh, plan, apply.
// It is up to the implementation to determine what "performing" means.
// This DOES NOT BLOCK. The context returned as part of RunningOperation
// should be used to block for completion.
// If the state used in the operation can be locked, it is the
// responsibility of the Backend to lock the state for the duration of the
// running operation.
Operation(context.Context, *Operation) (*RunningOperation, error)
}
// Local implements additional behavior on a Backend that allows local
// operations in addition to remote operations.
//
// This enables more behaviors of Terraform that require more data such
// as `console`, `import`, `graph`. These require direct access to
// configurations, variables, and more. Not all backends may support this
// so we separate it out into its own optional interface.
type Local interface {
// Context returns a runnable terraform Context. The operation parameter
// doesn't need a Type set but it needs other options set such as Module.
Context(*Operation) (*terraform.Context, state.State, error)
}
// An operation represents an operation for Terraform to execute.
//
// Note that not all fields are supported by all backends and can result
// in an error if set. All backend implementations should show user-friendly
// errors explaining any incorrectly set values. For example, the local
// backend doesn't support a PlanId being set.
//
// The operation options are purposely designed to have maximal compatibility
// between Terraform and Terraform Servers (a commercial product offered by
// HashiCorp). Therefore, it isn't expected that other implementation support
// every possible option. The struct here is generalized in order to allow
// even partial implementations to exist in the open, without walling off
// remote functionality 100% behind a commercial wall. Anyone can implement
// against this interface and have Terraform interact with it just as it
// would with HashiCorp-provided Terraform Servers.
type Operation struct {
// Type is the operation to perform.
Type OperationType
// PlanId is an opaque value that backends can use to execute a specific
// plan for an apply operation.
//
// PlanOutBackend is the backend to store with the plan. This is the
// backend that will be used when applying the plan.
PlanId string
PlanRefresh bool // PlanRefresh will do a refresh before a plan
PlanOutPath string // PlanOutPath is the path to save the plan
PlanOutBackend *terraform.BackendState
// Module settings specify the root module to use for operations.
Module *module.Tree
// Plan is a plan that was passed as an argument. This is valid for
// plan and apply arguments but may not work for all backends.
Plan *terraform.Plan
// The options below are more self-explanatory and affect the runtime
// behavior of the operation.
Destroy bool
Targets []string
Variables map[string]interface{}
// Input/output/control options.
UIIn terraform.UIInput
UIOut terraform.UIOutput
// If LockState is true, the Operation must Lock any
// state.Lockers for its duration, and Unlock when complete.
LockState bool
// The duration to retry obtaining a State lock.
StateLockTimeout time.Duration
// Environment is the named state that should be loaded from the Backend.
Environment string
}
// RunningOperation is the result of starting an operation.
type RunningOperation struct {
// Context should be used to track Done and Err for errors.
//
// For implementers of a backend, this context should not wrap the
// passed in context. Otherwise, canceling the parent context will
// immediately mark this context as "done" but those aren't the semantics
// we want: we want this context to be done only when the operation itself
// is fully done.
context.Context
// Err is the error of the operation. This is populated after
// the operation has completed.
Err error
// PlanEmpty is populated after a Plan operation completes without error
// to note whether a plan is empty or has changes.
PlanEmpty bool
// State is the final state after the operation completed. Persisting
// this state is managed by the backend. This should only be read
// after the operation completes to avoid read/write races.
State *terraform.State
}

74
backend/cli.go Normal file
View File

@ -0,0 +1,74 @@
package backend
import (
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/cli"
"github.com/mitchellh/colorstring"
)
// CLI is an optional interface that can be implemented to be initialized
// with information from the Terraform CLI. If this is implemented, this
// initialization function will be called with data to help interact better
// with a CLI.
//
// This interface was created to improve backend interaction with the
// official Terraform CLI while making it optional for API users to have
// to provide full CLI interaction to every backend.
//
// If you're implementing a Backend, it is acceptable to require CLI
// initialization. In this case, your backend should be coded to error
// on other methods (such as State, Operation) if CLI initialization was not
// done with all required fields.
type CLI interface {
Backend
// CLIIinit is called once with options. The options passed to this
// function may not be modified after calling this since they can be
// read/written at any time by the Backend implementation.
//
// This may be called before or after Configure is called, so if settings
// here affect configurable settings, care should be taken to handle
// whether they should be overwritten or not.
CLIInit(*CLIOpts) error
}
// CLIOpts are the options passed into CLIInit for the CLI interface.
//
// These options represent the functionality the CLI exposes and often
// maps to meta-flags available on every CLI (such as -input).
//
// When implementing a backend, it isn't expected that every option applies.
// Your backend should be documented clearly to explain to end users what
// options have an affect and what won't. In some cases, it may even make sense
// to error in your backend when an option is set so that users don't make
// a critically incorrect assumption about behavior.
type CLIOpts struct {
// CLI and Colorize control the CLI output. If CLI is nil then no CLI
// output will be done. If CLIColor is nil then no coloring will be done.
CLI cli.Ui
CLIColor *colorstring.Colorize
// StatePath is the local path where state is read from.
//
// StateOutPath is the local path where the state will be written.
// If this is empty, it will default to StatePath.
//
// StateBackupPath is the local path where a backup file will be written.
// If this is empty, no backup will be taken.
StatePath string
StateOutPath string
StateBackupPath string
// ContextOpts are the base context options to set when initializing a
// Terraform context. Many of these will be overridden or merged by
// Operation. See Operation for more details.
ContextOpts *terraform.ContextOpts
// Input will ask for necessary input prior to performing any operations.
//
// Validation will perform validation prior to running an operation. The
// variable naming doesn't match the style of others since we have a func
// Validate.
Input bool
Validation bool
}

73
backend/init/init.go Normal file
View File

@ -0,0 +1,73 @@
// Package init contains the list of backends that can be initialized and
// basic helper functions for initializing those backends.
package init
import (
"sync"
"github.com/hashicorp/terraform/backend"
backendatlas "github.com/hashicorp/terraform/backend/atlas"
backendlegacy "github.com/hashicorp/terraform/backend/legacy"
backendlocal "github.com/hashicorp/terraform/backend/local"
backendconsul "github.com/hashicorp/terraform/backend/remote-state/consul"
backendinmem "github.com/hashicorp/terraform/backend/remote-state/inmem"
backendS3 "github.com/hashicorp/terraform/backend/remote-state/s3"
)
// backends is the list of available backends. This is a global variable
// because backends are currently hardcoded into Terraform and can't be
// modified without recompilation.
//
// To read an available backend, use the Backend function. This ensures
// safe concurrent read access to the list of built-in backends.
//
// Backends are hardcoded into Terraform because the API for backends uses
// complex structures and supporting that over the plugin system is currently
// prohibitively difficult. For those wanting to implement a custom backend,
// they can do so with recompilation.
var backends map[string]func() backend.Backend
var backendsLock sync.Mutex
func init() {
// Our hardcoded backends. We don't need to acquire a lock here
// since init() code is serial and can't spawn goroutines.
backends = map[string]func() backend.Backend{
"atlas": func() backend.Backend { return &backendatlas.Backend{} },
"local": func() backend.Backend { return &backendlocal.Local{} },
"consul": func() backend.Backend { return backendconsul.New() },
"inmem": func() backend.Backend { return backendinmem.New() },
"s3": func() backend.Backend { return backendS3.New() },
}
// Add the legacy remote backends that haven't yet been convertd to
// the new backend API.
backendlegacy.Init(backends)
}
// Backend returns the initialization factory for the given backend, or
// nil if none exists.
func Backend(name string) func() backend.Backend {
backendsLock.Lock()
defer backendsLock.Unlock()
return backends[name]
}
// Set sets a new backend in the list of backends. If f is nil then the
// backend will be removed from the map. If this backend already exists
// then it will be overwritten.
//
// This method sets this backend globally and care should be taken to do
// this only before Terraform is executing to prevent odd behavior of backends
// changing mid-execution.
func Set(name string, f func() backend.Backend) {
backendsLock.Lock()
defer backendsLock.Unlock()
if f == nil {
delete(backends, name)
return
}
backends[name] = f
}

75
backend/legacy/backend.go Normal file
View File

@ -0,0 +1,75 @@
package legacy
import (
"fmt"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/state/remote"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/mapstructure"
)
// Backend is an implementation of backend.Backend for legacy remote state
// clients.
type Backend struct {
// Type is the type of remote state client to support
Type string
// client is set after Configure is called and client is initialized.
client remote.Client
}
func (b *Backend) Input(
ui terraform.UIInput, c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
// Return the config as-is, legacy doesn't support input
return c, nil
}
func (b *Backend) Validate(*terraform.ResourceConfig) ([]string, []error) {
// No validation was supported for old clients
return nil, nil
}
func (b *Backend) Configure(c *terraform.ResourceConfig) error {
// Legacy remote state was only map[string]string config
var conf map[string]string
if err := mapstructure.Decode(c.Raw, &conf); err != nil {
return fmt.Errorf(
"Failed to decode %q configuration: %s\n\n"+
"This backend expects all configuration keys and values to be\n"+
"strings. Please verify your configuration and try again.",
b.Type, err)
}
client, err := remote.NewClient(b.Type, conf)
if err != nil {
return fmt.Errorf(
"Failed to configure remote backend %q: %s",
b.Type, err)
}
// Set our client
b.client = client
return nil
}
func (b *Backend) State(name string) (state.State, error) {
if name != backend.DefaultStateName {
return nil, backend.ErrNamedStatesNotSupported
}
if b.client == nil {
panic("State called with nil remote state client")
}
return &remote.State{Client: b.client}, nil
}
func (b *Backend) States() ([]string, error) {
return nil, backend.ErrNamedStatesNotSupported
}
func (b *Backend) DeleteState(string) error {
return backend.ErrNamedStatesNotSupported
}

View File

@ -0,0 +1,49 @@
package legacy
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/terraform"
)
func TestBackend_impl(t *testing.T) {
var _ backend.Backend = new(Backend)
}
func TestBackend(t *testing.T) {
td, err := ioutil.TempDir("", "tf")
if err != nil {
t.Fatalf("err: %s", err)
}
defer os.RemoveAll(td)
b := &Backend{Type: "local"}
conf := terraform.NewResourceConfig(config.TestRawConfig(t, map[string]interface{}{
"path": filepath.Join(td, "data"),
}))
// Config
if err := b.Configure(conf); err != nil {
t.Fatalf("err: %s", err)
}
// Grab state
s, err := b.State(backend.DefaultStateName)
if err != nil {
t.Fatalf("err: %s", err)
}
if s == nil {
t.Fatalf("state is nil")
}
// Test it
s.WriteState(state.TestStateInitial())
s.PersistState()
state.TestState(t, s)
}

28
backend/legacy/legacy.go Normal file
View File

@ -0,0 +1,28 @@
// Package legacy contains a backend implementation that can be used
// with the legacy remote state clients.
package legacy
import (
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state/remote"
)
// Init updates the backend/init package map of initializers to support
// all the remote state types.
//
// If a type is already in the map, it will not be added. This will allow
// us to slowly convert the legacy types to first-class backends.
func Init(m map[string]func() backend.Backend) {
for k, _ := range remote.BuiltinClients {
if _, ok := m[k]; !ok {
// Copy the "k" value since the variable "k" is reused for
// each key (address doesn't change).
typ := k
// Build the factory function to return a backend of typ
m[k] = func() backend.Backend {
return &Backend{Type: typ}
}
}
}
}

View File

@ -0,0 +1,34 @@
package legacy
import (
"testing"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state/remote"
)
func TestInit(t *testing.T) {
m := make(map[string]func() backend.Backend)
Init(m)
for k, _ := range remote.BuiltinClients {
b, ok := m[k]
if !ok {
t.Fatalf("missing: %s", k)
}
if typ := b().(*Backend).Type; typ != k {
t.Fatalf("bad type: %s", typ)
}
}
}
func TestInit_ignoreExisting(t *testing.T) {
m := make(map[string]func() backend.Backend)
m["local"] = nil
Init(m)
if v, ok := m["local"]; !ok || v != nil {
t.Fatalf("bad: %#v", m)
}
}

393
backend/local/backend.go Normal file
View File

@ -0,0 +1,393 @@
package local
import (
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/cli"
"github.com/mitchellh/colorstring"
)
const (
DefaultEnvDir = "terraform.tfstate.d"
DefaultEnvFile = "environment"
DefaultStateFilename = "terraform.tfstate"
DefaultDataDir = ".terraform"
DefaultBackupExtension = ".backup"
)
// Local is an implementation of EnhancedBackend that performs all operations
// locally. This is the "default" backend and implements normal Terraform
// behavior as it is well known.
type Local struct {
// CLI and Colorize control the CLI output. If CLI is nil then no CLI
// output will be done. If CLIColor is nil then no coloring will be done.
CLI cli.Ui
CLIColor *colorstring.Colorize
// The State* paths are set from the CLI options, and may be left blank to
// use the defaults. If the actual paths for the local backend state are
// needed, use the StatePaths method.
//
// StatePath is the local path where state is read from.
//
// StateOutPath is the local path where the state will be written.
// If this is empty, it will default to StatePath.
//
// StateBackupPath is the local path where a backup file will be written.
// Set this to "-" to disable state backup.
//
// StateEnvPath is the path to the folder containing environments. This
// defaults to DefaultEnvDir if not set.
StatePath string
StateOutPath string
StateBackupPath string
StateEnvDir string
// We only want to create a single instance of a local state, so store them
// here as they're loaded.
states map[string]state.State
// Terraform context. Many of these will be overridden or merged by
// Operation. See Operation for more details.
ContextOpts *terraform.ContextOpts
// OpInput will ask for necessary input prior to performing any operations.
//
// OpValidation will perform validation prior to running an operation. The
// variable naming doesn't match the style of others since we have a func
// Validate.
OpInput bool
OpValidation bool
// Backend, if non-nil, will use this backend for non-enhanced behavior.
// This allows local behavior with remote state storage. It is a way to
// "upgrade" a non-enhanced backend to an enhanced backend with typical
// behavior.
//
// If this is nil, local performs normal state loading and storage.
Backend backend.Backend
schema *schema.Backend
opLock sync.Mutex
once sync.Once
}
func (b *Local) Input(
ui terraform.UIInput, c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
b.once.Do(b.init)
f := b.schema.Input
if b.Backend != nil {
f = b.Backend.Input
}
return f(ui, c)
}
func (b *Local) Validate(c *terraform.ResourceConfig) ([]string, []error) {
b.once.Do(b.init)
f := b.schema.Validate
if b.Backend != nil {
f = b.Backend.Validate
}
return f(c)
}
func (b *Local) Configure(c *terraform.ResourceConfig) error {
b.once.Do(b.init)
f := b.schema.Configure
if b.Backend != nil {
f = b.Backend.Configure
}
return f(c)
}
func (b *Local) States() ([]string, error) {
// If we have a backend handling state, defer to that.
if b.Backend != nil {
return b.Backend.States()
}
// the listing always start with "default"
envs := []string{backend.DefaultStateName}
entries, err := ioutil.ReadDir(b.stateEnvDir())
// no error if there's no envs configured
if os.IsNotExist(err) {
return envs, nil
}
if err != nil {
return nil, err
}
var listed []string
for _, entry := range entries {
if entry.IsDir() {
listed = append(listed, filepath.Base(entry.Name()))
}
}
sort.Strings(listed)
envs = append(envs, listed...)
return envs, nil
}
// DeleteState removes a named state.
// The "default" state cannot be removed.
func (b *Local) DeleteState(name string) error {
// If we have a backend handling state, defer to that.
if b.Backend != nil {
return b.Backend.DeleteState(name)
}
if name == "" {
return errors.New("empty state name")
}
if name == backend.DefaultStateName {
return errors.New("cannot delete default state")
}
delete(b.states, name)
return os.RemoveAll(filepath.Join(b.stateEnvDir(), name))
}
func (b *Local) State(name string) (state.State, error) {
// If we have a backend handling state, defer to that.
if b.Backend != nil {
return b.Backend.State(name)
}
if s, ok := b.states[name]; ok {
return s, nil
}
if err := b.createState(name); err != nil {
return nil, err
}
statePath, stateOutPath, backupPath := b.StatePaths(name)
// Otherwise, we need to load the state.
var s state.State = &state.LocalState{
Path: statePath,
PathOut: stateOutPath,
}
// If we are backing up the state, wrap it
if backupPath != "" {
s = &state.BackupState{
Real: s,
Path: backupPath,
}
}
if b.states == nil {
b.states = map[string]state.State{}
}
b.states[name] = s
return s, nil
}
// Operation implements backend.Enhanced
//
// This will initialize an in-memory terraform.Context to perform the
// operation within this process.
//
// The given operation parameter will be merged with the ContextOpts on
// the structure with the following rules. If a rule isn't specified and the
// name conflicts, assume that the field is overwritten if set.
func (b *Local) Operation(ctx context.Context, op *backend.Operation) (*backend.RunningOperation, error) {
// Determine the function to call for our operation
var f func(context.Context, *backend.Operation, *backend.RunningOperation)
switch op.Type {
case backend.OperationTypeRefresh:
f = b.opRefresh
case backend.OperationTypePlan:
f = b.opPlan
case backend.OperationTypeApply:
f = b.opApply
default:
return nil, fmt.Errorf(
"Unsupported operation type: %s\n\n"+
"This is a bug in Terraform and should be reported. The local backend\n"+
"is built-in to Terraform and should always support all operations.",
op.Type)
}
// Lock
b.opLock.Lock()
// Build our running operation
runningCtx, runningCtxCancel := context.WithCancel(context.Background())
runningOp := &backend.RunningOperation{Context: runningCtx}
// Do it
go func() {
defer b.opLock.Unlock()
defer runningCtxCancel()
f(ctx, op, runningOp)
}()
// Return
return runningOp, nil
}
// Colorize returns the Colorize structure that can be used for colorizing
// output. This is gauranteed to always return a non-nil value and so is useful
// as a helper to wrap any potentially colored strings.
func (b *Local) Colorize() *colorstring.Colorize {
if b.CLIColor != nil {
return b.CLIColor
}
return &colorstring.Colorize{
Colors: colorstring.DefaultColors,
Disable: true,
}
}
func (b *Local) init() {
b.schema = &schema.Backend{
Schema: map[string]*schema.Schema{
"path": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: "",
},
"environment_dir": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: "",
},
},
ConfigureFunc: b.schemaConfigure,
}
}
func (b *Local) schemaConfigure(ctx context.Context) error {
d := schema.FromContextBackendConfig(ctx)
// Set the path if it is set
pathRaw, ok := d.GetOk("path")
if ok {
path := pathRaw.(string)
if path == "" {
return fmt.Errorf("configured path is empty")
}
b.StatePath = path
b.StateOutPath = path
}
if raw, ok := d.GetOk("environment_dir"); ok {
path := raw.(string)
if path != "" {
b.StateEnvDir = path
}
}
return nil
}
// StatePaths returns the StatePath, StateOutPath, and StateBackupPath as
// configured from the CLI.
func (b *Local) StatePaths(name string) (string, string, string) {
statePath := b.StatePath
stateOutPath := b.StateOutPath
backupPath := b.StateBackupPath
if name == "" {
name = backend.DefaultStateName
}
if name == backend.DefaultStateName {
if statePath == "" {
statePath = DefaultStateFilename
}
} else {
statePath = filepath.Join(b.stateEnvDir(), name, DefaultStateFilename)
}
if stateOutPath == "" {
stateOutPath = statePath
}
switch backupPath {
case "-":
backupPath = ""
case "":
backupPath = stateOutPath + DefaultBackupExtension
}
return statePath, stateOutPath, backupPath
}
// this only ensures that the named directory exists
func (b *Local) createState(name string) error {
if name == backend.DefaultStateName {
return nil
}
stateDir := filepath.Join(b.stateEnvDir(), name)
s, err := os.Stat(stateDir)
if err == nil && s.IsDir() {
// no need to check for os.IsNotExist, since that is covered by os.MkdirAll
// which will catch the other possible errors as well.
return nil
}
err = os.MkdirAll(stateDir, 0755)
if err != nil {
return err
}
return nil
}
// stateEnvDir returns the directory where state environments are stored.
func (b *Local) stateEnvDir() string {
if b.StateEnvDir != "" {
return b.StateEnvDir
}
return DefaultEnvDir
}
// currentStateName returns the name of the current named state as set in the
// configuration files.
// If there are no configured environments, currentStateName returns "default"
func (b *Local) currentStateName() (string, error) {
contents, err := ioutil.ReadFile(filepath.Join(DefaultDataDir, DefaultEnvFile))
if os.IsNotExist(err) {
return backend.DefaultStateName, nil
}
if err != nil {
return "", err
}
if fromFile := strings.TrimSpace(string(contents)); fromFile != "" {
return fromFile, nil
}
return backend.DefaultStateName, nil
}

View File

@ -0,0 +1,196 @@
package local
import (
"context"
"fmt"
"log"
"strings"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/command/clistate"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/terraform"
)
func (b *Local) opApply(
ctx context.Context,
op *backend.Operation,
runningOp *backend.RunningOperation) {
log.Printf("[INFO] backend/local: starting Apply operation")
// If we have a nil module at this point, then set it to an empty tree
// to avoid any potential crashes.
if op.Plan == nil && op.Module == nil && !op.Destroy {
runningOp.Err = fmt.Errorf(strings.TrimSpace(applyErrNoConfig))
return
}
// If we have a nil module at this point, then set it to an empty tree
// to avoid any potential crashes.
if op.Module == nil {
op.Module = module.NewEmptyTree()
}
// Setup our count hook that keeps track of resource changes
countHook := new(CountHook)
stateHook := new(StateHook)
if b.ContextOpts == nil {
b.ContextOpts = new(terraform.ContextOpts)
}
old := b.ContextOpts.Hooks
defer func() { b.ContextOpts.Hooks = old }()
b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, countHook, stateHook)
// Get our context
tfCtx, opState, err := b.context(op)
if err != nil {
runningOp.Err = err
return
}
if op.LockState {
lockCtx, cancel := context.WithTimeout(ctx, op.StateLockTimeout)
defer cancel()
lockInfo := state.NewLockInfo()
lockInfo.Operation = op.Type.String()
lockID, err := clistate.Lock(lockCtx, opState, lockInfo, b.CLI, b.Colorize())
if err != nil {
runningOp.Err = errwrap.Wrapf("Error locking state: {{err}}", err)
return
}
defer func() {
if err := clistate.Unlock(opState, lockID, b.CLI, b.Colorize()); err != nil {
runningOp.Err = multierror.Append(runningOp.Err, err)
}
}()
}
// Setup the state
runningOp.State = tfCtx.State()
// If we weren't given a plan, then we refresh/plan
if op.Plan == nil {
// If we're refreshing before apply, perform that
if op.PlanRefresh {
log.Printf("[INFO] backend/local: apply calling Refresh")
_, err := tfCtx.Refresh()
if err != nil {
runningOp.Err = errwrap.Wrapf("Error refreshing state: {{err}}", err)
return
}
}
// Perform the plan
log.Printf("[INFO] backend/local: apply calling Plan")
if _, err := tfCtx.Plan(); err != nil {
runningOp.Err = errwrap.Wrapf("Error running plan: {{err}}", err)
return
}
}
// Setup our hook for continuous state updates
stateHook.State = opState
// Start the apply in a goroutine so that we can be interrupted.
var applyState *terraform.State
var applyErr error
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
_, applyErr = tfCtx.Apply()
// we always want the state, even if apply failed
applyState = tfCtx.State()
/*
// Record any shadow errors for later
if err := ctx.ShadowError(); err != nil {
shadowErr = multierror.Append(shadowErr, multierror.Prefix(
err, "apply operation:"))
}
*/
}()
// Wait for the apply to finish or for us to be interrupted so
// we can handle it properly.
err = nil
select {
case <-ctx.Done():
if b.CLI != nil {
b.CLI.Output("Interrupt received. Gracefully shutting down...")
}
// Stop execution
go tfCtx.Stop()
// Wait for completion still
<-doneCh
case <-doneCh:
}
// Store the final state
runningOp.State = applyState
// Persist the state
if err := opState.WriteState(applyState); err != nil {
runningOp.Err = fmt.Errorf("Failed to save state: %s", err)
return
}
if err := opState.PersistState(); err != nil {
runningOp.Err = fmt.Errorf("Failed to save state: %s", err)
return
}
if applyErr != nil {
runningOp.Err = fmt.Errorf(
"Error applying plan:\n\n"+
"%s\n\n"+
"Terraform does not automatically rollback in the face of errors.\n"+
"Instead, your Terraform state file has been partially updated with\n"+
"any resources that successfully completed. Please address the error\n"+
"above and apply again to incrementally change your infrastructure.",
multierror.Flatten(applyErr))
return
}
// If we have a UI, output the results
if b.CLI != nil {
if op.Destroy {
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
"[reset][bold][green]\n"+
"Destroy complete! Resources: %d destroyed.",
countHook.Removed)))
} else {
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
"[reset][bold][green]\n"+
"Apply complete! Resources: %d added, %d changed, %d destroyed.",
countHook.Added,
countHook.Changed,
countHook.Removed)))
}
if countHook.Added > 0 || countHook.Changed > 0 {
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
"[reset]\n"+
"The state of your infrastructure has been saved to the path\n"+
"below. This state is required to modify and destroy your\n"+
"infrastructure, so keep it safe. To inspect the complete state\n"+
"use the `terraform show` command.\n\n"+
"State path: %s",
b.StateOutPath)))
}
}
}
const applyErrNoConfig = `
No configuration files found!
Apply requires configuration to be present. Applying without a configuration
would mark everything for destruction, which is normally not what is desired.
If you would like to destroy everything, please run 'terraform destroy' instead
which does not require any configuration files.
`

View File

@ -0,0 +1,185 @@
package local
import (
"context"
"fmt"
"os"
"sync"
"testing"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/terraform"
)
func TestLocal_applyBasic(t *testing.T) {
b := TestLocal(t)
p := TestLocalProvider(t, b, "test")
p.ApplyReturn = &terraform.InstanceState{ID: "yes"}
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply")
defer modCleanup()
op := testOperationApply()
op.Module = mod
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("err: %s", err)
}
if p.RefreshCalled {
t.Fatal("refresh should not be called")
}
if !p.DiffCalled {
t.Fatal("diff should be called")
}
if !p.ApplyCalled {
t.Fatal("apply should be called")
}
checkState(t, b.StateOutPath, `
test_instance.foo:
ID = yes
`)
}
func TestLocal_applyEmptyDir(t *testing.T) {
b := TestLocal(t)
p := TestLocalProvider(t, b, "test")
p.ApplyReturn = &terraform.InstanceState{ID: "yes"}
op := testOperationApply()
op.Module = nil
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Err == nil {
t.Fatal("should error")
}
if p.ApplyCalled {
t.Fatal("apply should not be called")
}
if _, err := os.Stat(b.StateOutPath); err == nil {
t.Fatal("should not exist")
}
}
func TestLocal_applyEmptyDirDestroy(t *testing.T) {
b := TestLocal(t)
p := TestLocalProvider(t, b, "test")
p.ApplyReturn = nil
op := testOperationApply()
op.Module = nil
op.Destroy = true
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("err: %s", err)
}
if p.ApplyCalled {
t.Fatal("apply should not be called")
}
checkState(t, b.StateOutPath, `<no state>`)
}
func TestLocal_applyError(t *testing.T) {
b := TestLocal(t)
p := TestLocalProvider(t, b, "test")
var lock sync.Mutex
errored := false
p.ApplyFn = func(
info *terraform.InstanceInfo,
s *terraform.InstanceState,
d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
lock.Lock()
defer lock.Unlock()
if !errored && info.Id == "test_instance.bar" {
errored = true
return nil, fmt.Errorf("error")
}
return &terraform.InstanceState{ID: "foo"}, nil
}
p.DiffFn = func(
*terraform.InstanceInfo,
*terraform.InstanceState,
*terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
return &terraform.InstanceDiff{
Attributes: map[string]*terraform.ResourceAttrDiff{
"ami": &terraform.ResourceAttrDiff{
New: "bar",
},
},
}, nil
}
mod, modCleanup := module.TestTree(t, "./test-fixtures/apply-error")
defer modCleanup()
op := testOperationApply()
op.Module = mod
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Err == nil {
t.Fatal("should error")
}
checkState(t, b.StateOutPath, `
test_instance.foo:
ID = foo
`)
}
func testOperationApply() *backend.Operation {
return &backend.Operation{
Type: backend.OperationTypeApply,
}
}
// testApplyState is just a common state that we use for testing refresh.
func testApplyState() *terraform.State {
return &terraform.State{
Version: 2,
Modules: []*terraform.ModuleState{
&terraform.ModuleState{
Path: []string{"root"},
Resources: map[string]*terraform.ResourceState{
"test_instance.foo": &terraform.ResourceState{
Type: "test_instance",
Primary: &terraform.InstanceState{
ID: "bar",
},
},
},
},
},
}
}

View File

@ -0,0 +1,114 @@
package local
import (
"fmt"
"log"
"strings"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/terraform"
)
// backend.Local implementation.
func (b *Local) Context(op *backend.Operation) (*terraform.Context, state.State, error) {
// Make sure the type is invalid. We use this as a way to know not
// to ask for input/validate.
op.Type = backend.OperationTypeInvalid
return b.context(op)
}
func (b *Local) context(op *backend.Operation) (*terraform.Context, state.State, error) {
// Get the state.
s, err := b.State(op.Environment)
if err != nil {
return nil, nil, errwrap.Wrapf("Error loading state: {{err}}", err)
}
if err := s.RefreshState(); err != nil {
return nil, nil, errwrap.Wrapf("Error loading state: {{err}}", err)
}
// Initialize our context options
var opts terraform.ContextOpts
if v := b.ContextOpts; v != nil {
opts = *v
}
// Copy set options from the operation
opts.Destroy = op.Destroy
opts.Module = op.Module
opts.Targets = op.Targets
opts.UIInput = op.UIIn
if op.Variables != nil {
opts.Variables = op.Variables
}
// Load our state
opts.State = s.State()
// Build the context
var tfCtx *terraform.Context
if op.Plan != nil {
tfCtx, err = op.Plan.Context(&opts)
} else {
tfCtx, err = terraform.NewContext(&opts)
}
if err != nil {
return nil, nil, err
}
// If we have an operation, then we automatically do the input/validate
// here since every option requires this.
if op.Type != backend.OperationTypeInvalid {
// If input asking is enabled, then do that
if op.Plan == nil && b.OpInput {
mode := terraform.InputModeProvider
mode |= terraform.InputModeVar
mode |= terraform.InputModeVarUnset
if err := tfCtx.Input(mode); err != nil {
return nil, nil, errwrap.Wrapf("Error asking for user input: {{err}}", err)
}
}
// If validation is enabled, validate
if b.OpValidation {
// We ignore warnings here on purpose. We expect users to be listening
// to the terraform.Hook called after a validation.
ws, es := tfCtx.Validate()
if len(ws) > 0 {
// Log just in case the CLI isn't enabled
log.Printf("[WARN] backend/local: %d warnings: %v", len(ws), ws)
// If we have a CLI, output the warnings
if b.CLI != nil {
b.CLI.Warn(strings.TrimSpace(validateWarnHeader) + "\n")
for _, w := range ws {
b.CLI.Warn(fmt.Sprintf(" * %s", w))
}
// Make a newline before continuing
b.CLI.Output("")
}
}
if len(es) > 0 {
return nil, nil, multierror.Append(nil, es...)
}
}
}
return tfCtx, s, nil
}
const validateWarnHeader = `
There are warnings related to your configuration. If no errors occurred,
Terraform will continue despite these warnings. It is a good idea to resolve
these warnings in the near future.
Warnings:
`

View File

@ -0,0 +1,211 @@
package local
import (
"context"
"fmt"
"log"
"os"
"strings"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/command/clistate"
"github.com/hashicorp/terraform/command/format"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/terraform"
)
func (b *Local) opPlan(
ctx context.Context,
op *backend.Operation,
runningOp *backend.RunningOperation) {
log.Printf("[INFO] backend/local: starting Plan operation")
if b.CLI != nil && op.Plan != nil {
b.CLI.Output(b.Colorize().Color(
"[reset][bold][yellow]" +
"The plan command received a saved plan file as input. This command\n" +
"will output the saved plan. This will not modify the already-existing\n" +
"plan. If you wish to generate a new plan, please pass in a configuration\n" +
"directory as an argument.\n\n"))
}
// A local plan requires either a plan or a module
if op.Plan == nil && op.Module == nil && !op.Destroy {
runningOp.Err = fmt.Errorf(strings.TrimSpace(planErrNoConfig))
return
}
// If we have a nil module at this point, then set it to an empty tree
// to avoid any potential crashes.
if op.Module == nil {
op.Module = module.NewEmptyTree()
}
// Setup our count hook that keeps track of resource changes
countHook := new(CountHook)
if b.ContextOpts == nil {
b.ContextOpts = new(terraform.ContextOpts)
}
old := b.ContextOpts.Hooks
defer func() { b.ContextOpts.Hooks = old }()
b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, countHook)
// Get our context
tfCtx, opState, err := b.context(op)
if err != nil {
runningOp.Err = err
return
}
if op.LockState {
lockCtx, cancel := context.WithTimeout(ctx, op.StateLockTimeout)
defer cancel()
lockInfo := state.NewLockInfo()
lockInfo.Operation = op.Type.String()
lockID, err := clistate.Lock(lockCtx, opState, lockInfo, b.CLI, b.Colorize())
if err != nil {
runningOp.Err = errwrap.Wrapf("Error locking state: {{err}}", err)
return
}
defer func() {
if err := clistate.Unlock(opState, lockID, b.CLI, b.Colorize()); err != nil {
runningOp.Err = multierror.Append(runningOp.Err, err)
}
}()
}
// Setup the state
runningOp.State = tfCtx.State()
// If we're refreshing before plan, perform that
if op.PlanRefresh {
log.Printf("[INFO] backend/local: plan calling Refresh")
if b.CLI != nil {
b.CLI.Output(b.Colorize().Color(strings.TrimSpace(planRefreshing) + "\n"))
}
_, err := tfCtx.Refresh()
if err != nil {
runningOp.Err = errwrap.Wrapf("Error refreshing state: {{err}}", err)
return
}
}
// Perform the plan
log.Printf("[INFO] backend/local: plan calling Plan")
plan, err := tfCtx.Plan()
if err != nil {
runningOp.Err = errwrap.Wrapf("Error running plan: {{err}}", err)
return
}
// Record state
runningOp.PlanEmpty = plan.Diff.Empty()
// Save the plan to disk
if path := op.PlanOutPath; path != "" {
// Write the backend if we have one
plan.Backend = op.PlanOutBackend
// This works around a bug (#12871) which is no longer possible to
// trigger but will exist for already corrupted upgrades.
if plan.Backend != nil && plan.State != nil {
plan.State.Remote = nil
}
log.Printf("[INFO] backend/local: writing plan output to: %s", path)
f, err := os.Create(path)
if err == nil {
err = terraform.WritePlan(plan, f)
}
f.Close()
if err != nil {
runningOp.Err = fmt.Errorf("Error writing plan file: %s", err)
return
}
}
// Perform some output tasks if we have a CLI to output to.
if b.CLI != nil {
if plan.Diff.Empty() {
b.CLI.Output(b.Colorize().Color(strings.TrimSpace(planNoChanges)))
return
}
if path := op.PlanOutPath; path == "" {
b.CLI.Output(strings.TrimSpace(planHeaderNoOutput) + "\n")
} else {
b.CLI.Output(fmt.Sprintf(
strings.TrimSpace(planHeaderYesOutput)+"\n",
path))
}
b.CLI.Output(format.Plan(&format.PlanOpts{
Plan: plan,
Color: b.Colorize(),
ModuleDepth: -1,
}))
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
"[reset][bold]Plan:[reset] "+
"%d to add, %d to change, %d to destroy.",
countHook.ToAdd+countHook.ToRemoveAndAdd,
countHook.ToChange,
countHook.ToRemove+countHook.ToRemoveAndAdd)))
}
}
const planErrNoConfig = `
No configuration files found!
Plan requires configuration to be present. Planning without a configuration
would mark everything for destruction, which is normally not what is desired.
If you would like to destroy everything, please run plan with the "-destroy"
flag or create a single empty configuration file. Otherwise, please create
a Terraform configuration file in the path being executed and try again.
`
const planHeaderNoOutput = `
The Terraform execution plan has been generated and is shown below.
Resources are shown in alphabetical order for quick scanning. Green resources
will be created (or destroyed and then created if an existing resource
exists), yellow resources are being changed in-place, and red resources
will be destroyed. Cyan entries are data sources to be read.
Note: You didn't specify an "-out" parameter to save this plan, so when
"apply" is called, Terraform can't guarantee this is what will execute.
`
const planHeaderYesOutput = `
The Terraform execution plan has been generated and is shown below.
Resources are shown in alphabetical order for quick scanning. Green resources
will be created (or destroyed and then created if an existing resource
exists), yellow resources are being changed in-place, and red resources
will be destroyed. Cyan entries are data sources to be read.
Your plan was also saved to the path below. Call the "apply" subcommand
with this plan file and Terraform will exactly execute this execution
plan.
Path: %s
`
const planNoChanges = `
[reset][bold][green]No changes. Infrastructure is up-to-date.[reset][green]
This means that Terraform did not detect any differences between your
configuration and real physical resources that exist. As a result, Terraform
doesn't need to do anything.
`
const planRefreshing = `
[reset][bold]Refreshing Terraform state in-memory prior to plan...[reset]
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
`

View File

@ -0,0 +1,249 @@
package local
import (
"context"
"os"
"path/filepath"
"strings"
"testing"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/terraform"
)
func TestLocal_planBasic(t *testing.T) {
b := TestLocal(t)
p := TestLocalProvider(t, b, "test")
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan")
defer modCleanup()
op := testOperationPlan()
op.Module = mod
op.PlanRefresh = true
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("err: %s", err)
}
if !p.DiffCalled {
t.Fatal("diff should be called")
}
}
func TestLocal_planNoConfig(t *testing.T) {
b := TestLocal(t)
TestLocalProvider(t, b, "test")
op := testOperationPlan()
op.Module = nil
op.PlanRefresh = true
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
err = run.Err
if err == nil {
t.Fatal("should error")
}
if !strings.Contains(err.Error(), "configuration") {
t.Fatalf("bad: %s", err)
}
}
func TestLocal_planRefreshFalse(t *testing.T) {
b := TestLocal(t)
p := TestLocalProvider(t, b, "test")
terraform.TestStateFile(t, b.StatePath, testPlanState())
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan")
defer modCleanup()
op := testOperationPlan()
op.Module = mod
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("err: %s", err)
}
if p.RefreshCalled {
t.Fatal("refresh should not be called")
}
if !run.PlanEmpty {
t.Fatal("plan should be empty")
}
}
func TestLocal_planDestroy(t *testing.T) {
b := TestLocal(t)
p := TestLocalProvider(t, b, "test")
terraform.TestStateFile(t, b.StatePath, testPlanState())
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan")
defer modCleanup()
outDir := testTempDir(t)
defer os.RemoveAll(outDir)
planPath := filepath.Join(outDir, "plan.tfplan")
op := testOperationPlan()
op.Destroy = true
op.PlanRefresh = true
op.Module = mod
op.PlanOutPath = planPath
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("err: %s", err)
}
if !p.RefreshCalled {
t.Fatal("refresh should be called")
}
if run.PlanEmpty {
t.Fatal("plan should not be empty")
}
plan := testReadPlan(t, planPath)
for _, m := range plan.Diff.Modules {
for _, r := range m.Resources {
if !r.Destroy {
t.Fatalf("bad: %#v", r)
}
}
}
}
func TestLocal_planDestroyNoConfig(t *testing.T) {
b := TestLocal(t)
p := TestLocalProvider(t, b, "test")
terraform.TestStateFile(t, b.StatePath, testPlanState())
outDir := testTempDir(t)
defer os.RemoveAll(outDir)
planPath := filepath.Join(outDir, "plan.tfplan")
op := testOperationPlan()
op.Destroy = true
op.PlanRefresh = true
op.Module = nil
op.PlanOutPath = planPath
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("err: %s", err)
}
if !p.RefreshCalled {
t.Fatal("refresh should be called")
}
if run.PlanEmpty {
t.Fatal("plan should not be empty")
}
plan := testReadPlan(t, planPath)
for _, m := range plan.Diff.Modules {
for _, r := range m.Resources {
if !r.Destroy {
t.Fatalf("bad: %#v", r)
}
}
}
}
func TestLocal_planOutPathNoChange(t *testing.T) {
b := TestLocal(t)
TestLocalProvider(t, b, "test")
terraform.TestStateFile(t, b.StatePath, testPlanState())
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan")
defer modCleanup()
outDir := testTempDir(t)
defer os.RemoveAll(outDir)
planPath := filepath.Join(outDir, "plan.tfplan")
op := testOperationPlan()
op.Module = mod
op.PlanOutPath = planPath
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("err: %s", err)
}
plan := testReadPlan(t, planPath)
if !plan.Diff.Empty() {
t.Fatalf("expected empty plan to be written")
}
}
func testOperationPlan() *backend.Operation {
return &backend.Operation{
Type: backend.OperationTypePlan,
}
}
// testPlanState is just a common state that we use for testing refresh.
func testPlanState() *terraform.State {
return &terraform.State{
Version: 2,
Modules: []*terraform.ModuleState{
&terraform.ModuleState{
Path: []string{"root"},
Resources: map[string]*terraform.ResourceState{
"test_instance.foo": &terraform.ResourceState{
Type: "test_instance",
Primary: &terraform.InstanceState{
ID: "bar",
},
},
},
},
},
}
}
func testReadPlan(t *testing.T, path string) *terraform.Plan {
f, err := os.Open(path)
if err != nil {
t.Fatalf("err: %s", err)
}
defer f.Close()
p, err := terraform.ReadPlan(f)
if err != nil {
t.Fatalf("err: %s", err)
}
return p
}

View File

@ -0,0 +1,106 @@
package local
import (
"context"
"fmt"
"os"
"strings"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/command/clistate"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/state"
)
func (b *Local) opRefresh(
ctx context.Context,
op *backend.Operation,
runningOp *backend.RunningOperation) {
// Check if our state exists if we're performing a refresh operation. We
// only do this if we're managing state with this backend.
if b.Backend == nil {
if _, err := os.Stat(b.StatePath); err != nil {
if os.IsNotExist(err) {
err = nil
}
if err != nil {
runningOp.Err = fmt.Errorf(
"There was an error reading the Terraform state that is needed\n"+
"for refreshing. The path and error are shown below.\n\n"+
"Path: %s\n\nError: %s",
b.StatePath, err)
return
}
}
}
// If we have no config module given to use, create an empty tree to
// avoid crashes when Terraform.Context is initialized.
if op.Module == nil {
op.Module = module.NewEmptyTree()
}
// Get our context
tfCtx, opState, err := b.context(op)
if err != nil {
runningOp.Err = err
return
}
if op.LockState {
lockCtx, cancel := context.WithTimeout(ctx, op.StateLockTimeout)
defer cancel()
lockInfo := state.NewLockInfo()
lockInfo.Operation = op.Type.String()
lockID, err := clistate.Lock(lockCtx, opState, lockInfo, b.CLI, b.Colorize())
if err != nil {
runningOp.Err = errwrap.Wrapf("Error locking state: {{err}}", err)
return
}
defer func() {
if err := clistate.Unlock(opState, lockID, b.CLI, b.Colorize()); err != nil {
runningOp.Err = multierror.Append(runningOp.Err, err)
}
}()
}
// Set our state
runningOp.State = opState.State()
if runningOp.State.Empty() || !runningOp.State.HasResources() {
if b.CLI != nil {
b.CLI.Output(b.Colorize().Color(
strings.TrimSpace(refreshNoState) + "\n"))
}
}
// Perform operation and write the resulting state to the running op
newState, err := tfCtx.Refresh()
runningOp.State = newState
if err != nil {
runningOp.Err = errwrap.Wrapf("Error refreshing state: {{err}}", err)
return
}
// Write and persist the state
if err := opState.WriteState(newState); err != nil {
runningOp.Err = errwrap.Wrapf("Error writing state: {{err}}", err)
return
}
if err := opState.PersistState(); err != nil {
runningOp.Err = errwrap.Wrapf("Error saving state: {{err}}", err)
return
}
}
const refreshNoState = `
[reset][bold][yellow]Empty or non-existent state file.[reset][yellow]
Refresh will do nothing. Refresh does not error or return an erroneous
exit status because many automation scripts use refresh, plan, then apply
and may not have a state file yet for the first run.
`

View File

@ -0,0 +1,201 @@
package local
import (
"context"
"fmt"
"testing"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/terraform"
)
func TestLocal_refresh(t *testing.T) {
b := TestLocal(t)
p := TestLocalProvider(t, b, "test")
terraform.TestStateFile(t, b.StatePath, testRefreshState())
p.RefreshFn = nil
p.RefreshReturn = &terraform.InstanceState{ID: "yes"}
mod, modCleanup := module.TestTree(t, "./test-fixtures/refresh")
defer modCleanup()
op := testOperationRefresh()
op.Module = mod
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if !p.RefreshCalled {
t.Fatal("refresh should be called")
}
checkState(t, b.StateOutPath, `
test_instance.foo:
ID = yes
`)
}
func TestLocal_refreshNilModule(t *testing.T) {
b := TestLocal(t)
p := TestLocalProvider(t, b, "test")
terraform.TestStateFile(t, b.StatePath, testRefreshState())
p.RefreshFn = nil
p.RefreshReturn = &terraform.InstanceState{ID: "yes"}
op := testOperationRefresh()
op.Module = nil
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if !p.RefreshCalled {
t.Fatal("refresh should be called")
}
checkState(t, b.StateOutPath, `
test_instance.foo:
ID = yes
`)
}
// GH-12174
func TestLocal_refreshNilModuleWithInput(t *testing.T) {
b := TestLocal(t)
p := TestLocalProvider(t, b, "test")
terraform.TestStateFile(t, b.StatePath, testRefreshState())
p.RefreshFn = nil
p.RefreshReturn = &terraform.InstanceState{ID: "yes"}
b.OpInput = true
op := testOperationRefresh()
op.Module = nil
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if !p.RefreshCalled {
t.Fatal("refresh should be called")
}
checkState(t, b.StateOutPath, `
test_instance.foo:
ID = yes
`)
}
func TestLocal_refreshInput(t *testing.T) {
b := TestLocal(t)
p := TestLocalProvider(t, b, "test")
terraform.TestStateFile(t, b.StatePath, testRefreshState())
p.ConfigureFn = func(c *terraform.ResourceConfig) error {
if v, ok := c.Get("value"); !ok || v != "bar" {
return fmt.Errorf("no value set")
}
return nil
}
p.RefreshFn = nil
p.RefreshReturn = &terraform.InstanceState{ID: "yes"}
mod, modCleanup := module.TestTree(t, "./test-fixtures/refresh-var-unset")
defer modCleanup()
// Enable input asking since it is normally disabled by default
b.OpInput = true
b.ContextOpts.UIInput = &terraform.MockUIInput{InputReturnString: "bar"}
op := testOperationRefresh()
op.Module = mod
op.UIIn = b.ContextOpts.UIInput
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if !p.RefreshCalled {
t.Fatal("refresh should be called")
}
checkState(t, b.StateOutPath, `
test_instance.foo:
ID = yes
`)
}
func TestLocal_refreshValidate(t *testing.T) {
b := TestLocal(t)
p := TestLocalProvider(t, b, "test")
terraform.TestStateFile(t, b.StatePath, testRefreshState())
p.RefreshFn = nil
p.RefreshReturn = &terraform.InstanceState{ID: "yes"}
mod, modCleanup := module.TestTree(t, "./test-fixtures/refresh")
defer modCleanup()
// Enable validation
b.OpValidation = true
op := testOperationRefresh()
op.Module = mod
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if !p.ValidateCalled {
t.Fatal("validate should be called")
}
checkState(t, b.StateOutPath, `
test_instance.foo:
ID = yes
`)
}
func testOperationRefresh() *backend.Operation {
return &backend.Operation{
Type: backend.OperationTypeRefresh,
}
}
// testRefreshState is just a common state that we use for testing refresh.
func testRefreshState() *terraform.State {
return &terraform.State{
Version: 2,
Modules: []*terraform.ModuleState{
&terraform.ModuleState{
Path: []string{"root"},
Resources: map[string]*terraform.ResourceState{
"test_instance.foo": &terraform.ResourceState{
Type: "test_instance",
Primary: &terraform.InstanceState{
ID: "bar",
},
},
},
Outputs: map[string]*terraform.OutputState{},
},
},
}
}

View File

@ -0,0 +1,232 @@
package local
import (
"errors"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/terraform"
)
func TestLocal_impl(t *testing.T) {
var _ backend.Enhanced = new(Local)
var _ backend.Local = new(Local)
var _ backend.CLI = new(Local)
}
func TestLocal_backend(t *testing.T) {
defer testTmpDir(t)()
b := &Local{}
backend.TestBackend(t, b, b)
}
func checkState(t *testing.T, path, expected string) {
// Read the state
f, err := os.Open(path)
if err != nil {
t.Fatalf("err: %s", err)
}
state, err := terraform.ReadState(f)
f.Close()
if err != nil {
t.Fatalf("err: %s", err)
}
actual := strings.TrimSpace(state.String())
expected = strings.TrimSpace(expected)
if actual != expected {
t.Fatalf("state does not match! actual:\n%s\n\nexpected:\n%s", actual, expected)
}
}
func TestLocal_StatePaths(t *testing.T) {
b := &Local{}
// Test the defaults
path, out, back := b.StatePaths("")
if path != DefaultStateFilename {
t.Fatalf("expected %q, got %q", DefaultStateFilename, path)
}
if out != DefaultStateFilename {
t.Fatalf("expected %q, got %q", DefaultStateFilename, out)
}
dfltBackup := DefaultStateFilename + DefaultBackupExtension
if back != dfltBackup {
t.Fatalf("expected %q, got %q", dfltBackup, back)
}
// check with env
testEnv := "test_env"
path, out, back = b.StatePaths(testEnv)
expectedPath := filepath.Join(DefaultEnvDir, testEnv, DefaultStateFilename)
expectedOut := expectedPath
expectedBackup := expectedPath + DefaultBackupExtension
if path != expectedPath {
t.Fatalf("expected %q, got %q", expectedPath, path)
}
if out != expectedOut {
t.Fatalf("expected %q, got %q", expectedOut, out)
}
if back != expectedBackup {
t.Fatalf("expected %q, got %q", expectedBackup, back)
}
}
func TestLocal_addAndRemoveStates(t *testing.T) {
defer testTmpDir(t)()
dflt := backend.DefaultStateName
expectedStates := []string{dflt}
b := &Local{}
states, err := b.States()
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(states, expectedStates) {
t.Fatalf("expected []string{%q}, got %q", dflt, states)
}
expectedA := "test_A"
if _, err := b.State(expectedA); err != nil {
t.Fatal(err)
}
states, err = b.States()
if err != nil {
t.Fatal(err)
}
expectedStates = append(expectedStates, expectedA)
if !reflect.DeepEqual(states, expectedStates) {
t.Fatalf("expected %q, got %q", expectedStates, states)
}
expectedB := "test_B"
if _, err := b.State(expectedB); err != nil {
t.Fatal(err)
}
states, err = b.States()
if err != nil {
t.Fatal(err)
}
expectedStates = append(expectedStates, expectedB)
if !reflect.DeepEqual(states, expectedStates) {
t.Fatalf("expected %q, got %q", expectedStates, states)
}
if err := b.DeleteState(expectedA); err != nil {
t.Fatal(err)
}
states, err = b.States()
if err != nil {
t.Fatal(err)
}
expectedStates = []string{dflt, expectedB}
if !reflect.DeepEqual(states, expectedStates) {
t.Fatalf("expected %q, got %q", expectedStates, states)
}
if err := b.DeleteState(expectedB); err != nil {
t.Fatal(err)
}
states, err = b.States()
if err != nil {
t.Fatal(err)
}
expectedStates = []string{dflt}
if !reflect.DeepEqual(states, expectedStates) {
t.Fatalf("expected %q, got %q", expectedStates, states)
}
if err := b.DeleteState(dflt); err == nil {
t.Fatal("expected error deleting default state")
}
}
// a local backend which returns sentinel errors for NamedState methods to
// verify it's being called.
type testDelegateBackend struct {
*Local
}
var errTestDelegateState = errors.New("State called")
var errTestDelegateStates = errors.New("States called")
var errTestDelegateDeleteState = errors.New("Delete called")
func (b *testDelegateBackend) State(name string) (state.State, error) {
return nil, errTestDelegateState
}
func (b *testDelegateBackend) States() ([]string, error) {
return nil, errTestDelegateStates
}
func (b *testDelegateBackend) DeleteState(name string) error {
return errTestDelegateDeleteState
}
// verify that the MultiState methods are dispatched to the correct Backend.
func TestLocal_multiStateBackend(t *testing.T) {
// assign a separate backend where we can read the state
b := &Local{
Backend: &testDelegateBackend{},
}
if _, err := b.State("test"); err != errTestDelegateState {
t.Fatal("expected errTestDelegateState, got:", err)
}
if _, err := b.States(); err != errTestDelegateStates {
t.Fatal("expected errTestDelegateStates, got:", err)
}
if err := b.DeleteState("test"); err != errTestDelegateDeleteState {
t.Fatal("expected errTestDelegateDeleteState, got:", err)
}
}
// change into a tmp dir and return a deferable func to change back and cleanup
func testTmpDir(t *testing.T) func() {
tmp, err := ioutil.TempDir("", "tf")
if err != nil {
t.Fatal(err)
}
old, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
if err := os.Chdir(tmp); err != nil {
t.Fatal(err)
}
return func() {
// ignore errors and try to clean up
os.Chdir(old)
os.RemoveAll(tmp)
}
}

23
backend/local/cli.go Normal file
View File

@ -0,0 +1,23 @@
package local
import (
"github.com/hashicorp/terraform/backend"
)
// backend.CLI impl.
func (b *Local) CLIInit(opts *backend.CLIOpts) error {
b.CLI = opts.CLI
b.CLIColor = opts.CLIColor
b.ContextOpts = opts.ContextOpts
b.OpInput = opts.Input
b.OpValidation = opts.Validation
// Only configure state paths if we didn't do so via the configure func.
if b.StatePath == "" {
b.StatePath = opts.StatePath
b.StateOutPath = opts.StateOutPath
b.StateBackupPath = opts.StateBackupPath
}
return nil
}

View File

@ -0,0 +1,16 @@
// Code generated by "stringer -type=countHookAction hook_count_action.go"; DO NOT EDIT
package local
import "fmt"
const _countHookAction_name = "countHookActionAddcountHookActionChangecountHookActionRemove"
var _countHookAction_index = [...]uint8{0, 18, 39, 60}
func (i countHookAction) String() string {
if i >= countHookAction(len(_countHookAction_index)-1) {
return fmt.Sprintf("countHookAction(%d)", i)
}
return _countHookAction_name[_countHookAction_index[i]:_countHookAction_index[i+1]]
}

111
backend/local/hook_count.go Normal file
View File

@ -0,0 +1,111 @@
package local
import (
"strings"
"sync"
"github.com/hashicorp/terraform/terraform"
)
// CountHook is a hook that counts the number of resources
// added, removed, changed during the course of an apply.
type CountHook struct {
Added int
Changed int
Removed int
ToAdd int
ToChange int
ToRemove int
ToRemoveAndAdd int
pending map[string]countHookAction
sync.Mutex
terraform.NilHook
}
func (h *CountHook) Reset() {
h.Lock()
defer h.Unlock()
h.pending = nil
h.Added = 0
h.Changed = 0
h.Removed = 0
}
func (h *CountHook) PreApply(
n *terraform.InstanceInfo,
s *terraform.InstanceState,
d *terraform.InstanceDiff) (terraform.HookAction, error) {
h.Lock()
defer h.Unlock()
if h.pending == nil {
h.pending = make(map[string]countHookAction)
}
action := countHookActionChange
if d.GetDestroy() {
action = countHookActionRemove
} else if s.ID == "" {
action = countHookActionAdd
}
h.pending[n.HumanId()] = action
return terraform.HookActionContinue, nil
}
func (h *CountHook) PostApply(
n *terraform.InstanceInfo,
s *terraform.InstanceState,
e error) (terraform.HookAction, error) {
h.Lock()
defer h.Unlock()
if h.pending != nil {
if a, ok := h.pending[n.HumanId()]; ok {
delete(h.pending, n.HumanId())
if e == nil {
switch a {
case countHookActionAdd:
h.Added += 1
case countHookActionChange:
h.Changed += 1
case countHookActionRemove:
h.Removed += 1
}
}
}
}
return terraform.HookActionContinue, nil
}
func (h *CountHook) PostDiff(
n *terraform.InstanceInfo, d *terraform.InstanceDiff) (
terraform.HookAction, error) {
h.Lock()
defer h.Unlock()
// We don't count anything for data sources
if strings.HasPrefix(n.Id, "data.") {
return terraform.HookActionContinue, nil
}
switch d.ChangeType() {
case terraform.DiffDestroyCreate:
h.ToRemoveAndAdd += 1
case terraform.DiffCreate:
h.ToAdd += 1
case terraform.DiffDestroy:
h.ToRemove += 1
case terraform.DiffUpdate:
h.ToChange += 1
}
return terraform.HookActionContinue, nil
}

View File

@ -0,0 +1,11 @@
package local
//go:generate stringer -type=countHookAction hook_count_action.go
type countHookAction byte
const (
countHookActionAdd countHookAction = iota
countHookActionChange
countHookActionRemove
)

View File

@ -0,0 +1,243 @@
package local
import (
"reflect"
"testing"
"github.com/hashicorp/terraform/terraform"
)
func TestCountHook_impl(t *testing.T) {
var _ terraform.Hook = new(CountHook)
}
func TestCountHookPostDiff_DestroyDeposed(t *testing.T) {
h := new(CountHook)
resources := map[string]*terraform.InstanceDiff{
"lorem": &terraform.InstanceDiff{DestroyDeposed: true},
}
n := &terraform.InstanceInfo{} // TODO
for _, d := range resources {
h.PostDiff(n, d)
}
expected := new(CountHook)
expected.ToAdd = 0
expected.ToChange = 0
expected.ToRemoveAndAdd = 0
expected.ToRemove = 1
if !reflect.DeepEqual(expected, h) {
t.Fatalf("Expected %#v, got %#v instead.",
expected, h)
}
}
func TestCountHookPostDiff_DestroyOnly(t *testing.T) {
h := new(CountHook)
resources := map[string]*terraform.InstanceDiff{
"foo": &terraform.InstanceDiff{Destroy: true},
"bar": &terraform.InstanceDiff{Destroy: true},
"lorem": &terraform.InstanceDiff{Destroy: true},
"ipsum": &terraform.InstanceDiff{Destroy: true},
}
n := &terraform.InstanceInfo{} // TODO
for _, d := range resources {
h.PostDiff(n, d)
}
expected := new(CountHook)
expected.ToAdd = 0
expected.ToChange = 0
expected.ToRemoveAndAdd = 0
expected.ToRemove = 4
if !reflect.DeepEqual(expected, h) {
t.Fatalf("Expected %#v, got %#v instead.",
expected, h)
}
}
func TestCountHookPostDiff_AddOnly(t *testing.T) {
h := new(CountHook)
resources := map[string]*terraform.InstanceDiff{
"foo": &terraform.InstanceDiff{
Attributes: map[string]*terraform.ResourceAttrDiff{
"foo": &terraform.ResourceAttrDiff{RequiresNew: true},
},
},
"bar": &terraform.InstanceDiff{
Attributes: map[string]*terraform.ResourceAttrDiff{
"foo": &terraform.ResourceAttrDiff{RequiresNew: true},
},
},
"lorem": &terraform.InstanceDiff{
Attributes: map[string]*terraform.ResourceAttrDiff{
"foo": &terraform.ResourceAttrDiff{RequiresNew: true},
},
},
}
n := &terraform.InstanceInfo{}
for _, d := range resources {
h.PostDiff(n, d)
}
expected := new(CountHook)
expected.ToAdd = 3
expected.ToChange = 0
expected.ToRemoveAndAdd = 0
expected.ToRemove = 0
if !reflect.DeepEqual(expected, h) {
t.Fatalf("Expected %#v, got %#v instead.",
expected, h)
}
}
func TestCountHookPostDiff_ChangeOnly(t *testing.T) {
h := new(CountHook)
resources := map[string]*terraform.InstanceDiff{
"foo": &terraform.InstanceDiff{
Destroy: false,
Attributes: map[string]*terraform.ResourceAttrDiff{
"foo": &terraform.ResourceAttrDiff{},
},
},
"bar": &terraform.InstanceDiff{
Destroy: false,
Attributes: map[string]*terraform.ResourceAttrDiff{
"foo": &terraform.ResourceAttrDiff{},
},
},
"lorem": &terraform.InstanceDiff{
Destroy: false,
Attributes: map[string]*terraform.ResourceAttrDiff{
"foo": &terraform.ResourceAttrDiff{},
},
},
}
n := &terraform.InstanceInfo{}
for _, d := range resources {
h.PostDiff(n, d)
}
expected := new(CountHook)
expected.ToAdd = 0
expected.ToChange = 3
expected.ToRemoveAndAdd = 0
expected.ToRemove = 0
if !reflect.DeepEqual(expected, h) {
t.Fatalf("Expected %#v, got %#v instead.",
expected, h)
}
}
func TestCountHookPostDiff_Mixed(t *testing.T) {
h := new(CountHook)
resources := map[string]*terraform.InstanceDiff{
"foo": &terraform.InstanceDiff{
Destroy: true,
},
"bar": &terraform.InstanceDiff{},
"lorem": &terraform.InstanceDiff{
Destroy: false,
Attributes: map[string]*terraform.ResourceAttrDiff{
"foo": &terraform.ResourceAttrDiff{},
},
},
"ipsum": &terraform.InstanceDiff{Destroy: true},
}
n := &terraform.InstanceInfo{}
for _, d := range resources {
h.PostDiff(n, d)
}
expected := new(CountHook)
expected.ToAdd = 0
expected.ToChange = 1
expected.ToRemoveAndAdd = 0
expected.ToRemove = 2
if !reflect.DeepEqual(expected, h) {
t.Fatalf("Expected %#v, got %#v instead.",
expected, h)
}
}
func TestCountHookPostDiff_NoChange(t *testing.T) {
h := new(CountHook)
resources := map[string]*terraform.InstanceDiff{
"foo": &terraform.InstanceDiff{},
"bar": &terraform.InstanceDiff{},
"lorem": &terraform.InstanceDiff{},
"ipsum": &terraform.InstanceDiff{},
}
n := &terraform.InstanceInfo{}
for _, d := range resources {
h.PostDiff(n, d)
}
expected := new(CountHook)
expected.ToAdd = 0
expected.ToChange = 0
expected.ToRemoveAndAdd = 0
expected.ToRemove = 0
if !reflect.DeepEqual(expected, h) {
t.Fatalf("Expected %#v, got %#v instead.",
expected, h)
}
}
func TestCountHookPostDiff_DataSource(t *testing.T) {
h := new(CountHook)
resources := map[string]*terraform.InstanceDiff{
"data.foo": &terraform.InstanceDiff{
Destroy: true,
},
"data.bar": &terraform.InstanceDiff{},
"data.lorem": &terraform.InstanceDiff{
Destroy: false,
Attributes: map[string]*terraform.ResourceAttrDiff{
"foo": &terraform.ResourceAttrDiff{},
},
},
"data.ipsum": &terraform.InstanceDiff{Destroy: true},
}
for k, d := range resources {
n := &terraform.InstanceInfo{Id: k}
h.PostDiff(n, d)
}
expected := new(CountHook)
expected.ToAdd = 0
expected.ToChange = 0
expected.ToRemoveAndAdd = 0
expected.ToRemove = 0
if !reflect.DeepEqual(expected, h) {
t.Fatalf("Expected %#v, got %#v instead.",
expected, h)
}
}

View File

@ -0,0 +1,33 @@
package local
import (
"sync"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/terraform"
)
// StateHook is a hook that continuously updates the state by calling
// WriteState on a state.State.
type StateHook struct {
terraform.NilHook
sync.Mutex
State state.State
}
func (h *StateHook) PostStateUpdate(
s *terraform.State) (terraform.HookAction, error) {
h.Lock()
defer h.Unlock()
if h.State != nil {
// Write the new state
if err := h.State.WriteState(s); err != nil {
return terraform.HookActionHalt, err
}
}
// Continue forth
return terraform.HookActionContinue, nil
}

View File

@ -0,0 +1,29 @@
package local
import (
"testing"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/terraform"
)
func TestStateHook_impl(t *testing.T) {
var _ terraform.Hook = new(StateHook)
}
func TestStateHook(t *testing.T) {
is := &state.InmemState{}
var hook terraform.Hook = &StateHook{State: is}
s := state.TestStateInitial()
action, err := hook.PostStateUpdate(s)
if err != nil {
t.Fatalf("err: %s", err)
}
if action != terraform.HookActionContinue {
t.Fatalf("bad: %v", action)
}
if !is.State().Equal(s) {
t.Fatalf("bad state: %#v", is.State())
}
}

View File

@ -0,0 +1,25 @@
package local
import (
"flag"
"io/ioutil"
"log"
"os"
"testing"
"github.com/hashicorp/terraform/helper/logging"
)
func TestMain(m *testing.M) {
flag.Parse()
if testing.Verbose() {
// if we're verbose, use the logging requested by TF_LOG
logging.SetOutput()
} else {
// otherwise silence all logs
log.SetOutput(ioutil.Discard)
}
os.Exit(m.Run())
}

View File

@ -0,0 +1 @@
This is an empty dir

View File

@ -0,0 +1,7 @@
resource "test_instance" "foo" {
ami = "bar"
}
resource "test_instance" "bar" {
error = "true"
}

View File

@ -0,0 +1,3 @@
resource "test_instance" "foo" {
ami = "bar"
}

View File

@ -0,0 +1,9 @@
resource "test_instance" "foo" {
ami = "bar"
# This is here because at some point it caused a test failure
network_interface {
device_index = 0
description = "Main network interface"
}
}

View File

@ -0,0 +1,9 @@
variable "should_ask" {}
provider "test" {
value = "${var.should_ask}"
}
resource "test_instance" "foo" {
foo = "bar"
}

View File

@ -0,0 +1,3 @@
resource "test_instance" "foo" {
ami = "bar"
}

101
backend/local/testing.go Normal file
View File

@ -0,0 +1,101 @@
package local
import (
"io/ioutil"
"path/filepath"
"testing"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/terraform"
)
// TestLocal returns a configured Local struct with temporary paths and
// in-memory ContextOpts.
//
// No operations will be called on the returned value, so you can still set
// public fields without any locks.
func TestLocal(t *testing.T) *Local {
tempDir := testTempDir(t)
return &Local{
StatePath: filepath.Join(tempDir, "state.tfstate"),
StateOutPath: filepath.Join(tempDir, "state.tfstate"),
StateBackupPath: filepath.Join(tempDir, "state.tfstate.bak"),
StateEnvDir: filepath.Join(tempDir, "state.tfstate.d"),
ContextOpts: &terraform.ContextOpts{},
}
}
// TestLocalProvider modifies the ContextOpts of the *Local parameter to
// have a provider with the given name.
func TestLocalProvider(t *testing.T, b *Local, name string) *terraform.MockResourceProvider {
// Build a mock resource provider for in-memory operations
p := new(terraform.MockResourceProvider)
p.DiffReturn = &terraform.InstanceDiff{}
p.RefreshFn = func(
info *terraform.InstanceInfo,
s *terraform.InstanceState) (*terraform.InstanceState, error) {
return s, nil
}
p.ResourcesReturn = []terraform.ResourceType{
terraform.ResourceType{
Name: "test_instance",
},
}
// Initialize the opts
if b.ContextOpts == nil {
b.ContextOpts = &terraform.ContextOpts{}
}
if b.ContextOpts.Providers == nil {
b.ContextOpts.Providers = make(map[string]terraform.ResourceProviderFactory)
}
// Setup our provider
b.ContextOpts.Providers[name] = func() (terraform.ResourceProvider, error) {
return p, nil
}
return p
}
// TestNewLocalSingle is a factory for creating a TestLocalSingleState.
// This function matches the signature required for backend/init.
func TestNewLocalSingle() backend.Backend {
return &TestLocalSingleState{}
}
// TestLocalSingleState is a backend implementation that wraps Local
// and modifies it to only support single states (returns
// ErrNamedStatesNotSupported for multi-state operations).
//
// This isn't an actual use case, this is exported just to provide a
// easy way to test that behavior.
type TestLocalSingleState struct {
Local
}
func (b *TestLocalSingleState) State(name string) (state.State, error) {
if name != backend.DefaultStateName {
return nil, backend.ErrNamedStatesNotSupported
}
return b.Local.State(name)
}
func (b *TestLocalSingleState) States() ([]string, error) {
return nil, backend.ErrNamedStatesNotSupported
}
func (b *TestLocalSingleState) DeleteState(string) error {
return backend.ErrNamedStatesNotSupported
}
func testTempDir(t *testing.T) string {
d, err := ioutil.TempDir("", "tf")
if err != nil {
t.Fatalf("err: %s", err)
}
return d
}

39
backend/nil.go Normal file
View File

@ -0,0 +1,39 @@
package backend
import (
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/terraform"
)
// Nil is a no-op implementation of Backend.
//
// This is useful to embed within another struct to implement all of the
// backend interface for testing.
type Nil struct{}
func (Nil) Input(
ui terraform.UIInput,
c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
return c, nil
}
func (Nil) Validate(*terraform.ResourceConfig) ([]string, []error) {
return nil, nil
}
func (Nil) Configure(*terraform.ResourceConfig) error {
return nil
}
func (Nil) State(string) (state.State, error) {
// We have to return a non-nil state to adhere to the interface
return &state.InmemState{}, nil
}
func (Nil) DeleteState(string) error {
return nil
}
func (Nil) States() ([]string, error) {
return []string{DefaultStateName}, nil
}

9
backend/nil_test.go Normal file
View File

@ -0,0 +1,9 @@
package backend
import (
"testing"
)
func TestNil_impl(t *testing.T) {
var _ Backend = new(Nil)
}

14
backend/operation_type.go Normal file
View File

@ -0,0 +1,14 @@
package backend
//go:generate stringer -type=OperationType operation_type.go
// OperationType is an enum used with Operation to specify the operation
// type to perform for Terraform.
type OperationType uint
const (
OperationTypeInvalid OperationType = iota
OperationTypeRefresh
OperationTypePlan
OperationTypeApply
)

View File

@ -0,0 +1,16 @@
// Code generated by "stringer -type=OperationType operation_type.go"; DO NOT EDIT
package backend
import "fmt"
const _OperationType_name = "OperationTypeInvalidOperationTypeRefreshOperationTypePlanOperationTypeApply"
var _OperationType_index = [...]uint8{0, 20, 40, 57, 75}
func (i OperationType) String() string {
if i >= OperationType(len(_OperationType_index)-1) {
return fmt.Sprintf("OperationType(%d)", i)
}
return _OperationType_name[_OperationType_index[i]:_OperationType_index[i+1]]
}

View File

@ -0,0 +1,70 @@
// Package remotestate implements a Backend for remote state implementations
// from the state/remote package that also implement a backend schema for
// configuration.
package remotestate
import (
"context"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/state/remote"
"github.com/hashicorp/terraform/terraform"
)
// Backend implements backend.Backend for remote state backends.
//
// All exported fields should be set. This struct should only be used
// by implementers of backends, not by consumers. If you're consuming, please
// use a higher level package such as Consul backends.
type Backend struct {
// Backend should be set to the configuration schema. ConfigureFunc
// should not be set on the schema.
*schema.Backend
// ConfigureFunc takes the ctx from a schema.Backend and returns a
// fully configured remote client to use for state operations.
ConfigureFunc func(ctx context.Context) (remote.Client, error)
client remote.Client
}
func (b *Backend) Configure(rc *terraform.ResourceConfig) error {
// Set our configureFunc manually
b.Backend.ConfigureFunc = func(ctx context.Context) error {
c, err := b.ConfigureFunc(ctx)
if err != nil {
return err
}
// Set the client for later
b.client = c
return nil
}
// Run the normal configuration
return b.Backend.Configure(rc)
}
func (b *Backend) States() ([]string, error) {
return nil, backend.ErrNamedStatesNotSupported
}
func (b *Backend) DeleteState(name string) error {
return backend.ErrNamedStatesNotSupported
}
func (b *Backend) State(name string) (state.State, error) {
// This shouldn't happen
if b.client == nil {
panic("nil remote client")
}
if name != backend.DefaultStateName {
return nil, backend.ErrNamedStatesNotSupported
}
s := &remote.State{Client: b.client}
return s, nil
}

View File

@ -0,0 +1,11 @@
package remotestate
import (
"testing"
"github.com/hashicorp/terraform/backend"
)
func TestBackend_impl(t *testing.T) {
var _ backend.Backend = new(Backend)
}

View File

@ -0,0 +1,134 @@
package consul
import (
"context"
"strings"
consulapi "github.com/hashicorp/consul/api"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/helper/schema"
)
// New creates a new backend for Consul remote state.
func New() backend.Backend {
s := &schema.Backend{
Schema: map[string]*schema.Schema{
"path": &schema.Schema{
Type: schema.TypeString,
Required: true,
Description: "Path to store state in Consul",
},
"access_token": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "Access token for a Consul ACL",
Default: "", // To prevent input
},
"address": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "Address to the Consul Cluster",
Default: "", // To prevent input
},
"scheme": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "Scheme to communicate to Consul with",
Default: "", // To prevent input
},
"datacenter": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "Datacenter to communicate with",
Default: "", // To prevent input
},
"http_auth": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "HTTP Auth in the format of 'username:password'",
Default: "", // To prevent input
},
"gzip": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Description: "Compress the state data using gzip",
Default: false,
},
"lock": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Description: "Lock state access",
Default: true,
},
},
}
result := &Backend{Backend: s}
result.Backend.ConfigureFunc = result.configure
return result
}
type Backend struct {
*schema.Backend
// The fields below are set from configure
configData *schema.ResourceData
lock bool
}
func (b *Backend) configure(ctx context.Context) error {
// Grab the resource data
b.configData = schema.FromContextBackendConfig(ctx)
// Store the lock information
b.lock = b.configData.Get("lock").(bool)
// Initialize a client to test config
_, err := b.clientRaw()
return err
}
func (b *Backend) clientRaw() (*consulapi.Client, error) {
data := b.configData
// Configure the client
config := consulapi.DefaultConfig()
if v, ok := data.GetOk("access_token"); ok && v.(string) != "" {
config.Token = v.(string)
}
if v, ok := data.GetOk("address"); ok && v.(string) != "" {
config.Address = v.(string)
}
if v, ok := data.GetOk("scheme"); ok && v.(string) != "" {
config.Scheme = v.(string)
}
if v, ok := data.GetOk("datacenter"); ok && v.(string) != "" {
config.Datacenter = v.(string)
}
if v, ok := data.GetOk("http_auth"); ok && v.(string) != "" {
auth := v.(string)
var username, password string
if strings.Contains(auth, ":") {
split := strings.SplitN(auth, ":", 2)
username = split[0]
password = split[1]
} else {
username = auth
}
config.HttpAuth = &consulapi.HttpBasicAuth{
Username: username,
Password: password,
}
}
return consulapi.NewClient(config)
}

View File

@ -0,0 +1,167 @@
package consul
import (
"fmt"
"strings"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/state/remote"
"github.com/hashicorp/terraform/terraform"
)
const (
keyEnvPrefix = "-env:"
)
func (b *Backend) States() ([]string, error) {
// Get the Consul client
client, err := b.clientRaw()
if err != nil {
return nil, err
}
// List our raw path
prefix := b.configData.Get("path").(string) + keyEnvPrefix
keys, _, err := client.KV().Keys(prefix, "/", nil)
if err != nil {
return nil, err
}
// Find the envs, we use a map since we can get duplicates with
// path suffixes.
envs := map[string]struct{}{}
for _, key := range keys {
// Consul should ensure this but it doesn't hurt to check again
if strings.HasPrefix(key, prefix) {
key = strings.TrimPrefix(key, prefix)
// Ignore anything with a "/" in it since we store the state
// directly in a key not a directory.
if idx := strings.IndexRune(key, '/'); idx >= 0 {
continue
}
envs[key] = struct{}{}
}
}
result := make([]string, 1, len(envs)+1)
result[0] = backend.DefaultStateName
for k, _ := range envs {
result = append(result, k)
}
return result, nil
}
func (b *Backend) DeleteState(name string) error {
if name == backend.DefaultStateName || name == "" {
return fmt.Errorf("can't delete default state")
}
// Get the Consul API client
client, err := b.clientRaw()
if err != nil {
return err
}
// Determine the path of the data
path := b.path(name)
// Delete it. We just delete it without any locking since
// the DeleteState API is documented as such.
_, err = client.KV().Delete(path, nil)
return err
}
func (b *Backend) State(name string) (state.State, error) {
// Get the Consul API client
client, err := b.clientRaw()
if err != nil {
return nil, err
}
// Determine the path of the data
path := b.path(name)
// Determine whether to gzip or not
gzip := b.configData.Get("gzip").(bool)
// Build the state client
var stateMgr state.State = &remote.State{
Client: &RemoteClient{
Client: client,
Path: path,
GZip: gzip,
},
}
// If we're not locking, disable it
if !b.lock {
stateMgr = &state.LockDisabled{Inner: stateMgr}
}
// Grab a lock, we use this to write an empty state if one doesn't
// exist already. We have to write an empty state as a sentinel value
// so States() knows it exists.
lockInfo := state.NewLockInfo()
lockInfo.Operation = "init"
lockId, err := stateMgr.Lock(lockInfo)
if err != nil {
return nil, fmt.Errorf("failed to lock state in Consul: %s", err)
}
// Local helper function so we can call it multiple places
lockUnlock := func(parent error) error {
if err := stateMgr.Unlock(lockId); err != nil {
return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err)
}
return parent
}
// Grab the value
if err := stateMgr.RefreshState(); err != nil {
err = lockUnlock(err)
return nil, err
}
// If we have no state, we have to create an empty state
if v := stateMgr.State(); v == nil {
if err := stateMgr.WriteState(terraform.NewState()); err != nil {
err = lockUnlock(err)
return nil, err
}
if err := stateMgr.PersistState(); err != nil {
err = lockUnlock(err)
return nil, err
}
}
// Unlock, the state should now be initialized
if err := lockUnlock(nil); err != nil {
return nil, err
}
return stateMgr, nil
}
func (b *Backend) path(name string) string {
path := b.configData.Get("path").(string)
if name != backend.DefaultStateName {
path += fmt.Sprintf("%s%s", keyEnvPrefix, name)
}
return path
}
const errStateUnlock = `
Error unlocking Consul state. Lock ID: %s
Error: %s
You may have to force-unlock this state in order to use it again.
The Consul backend acquires a lock during initialization to ensure
the minimum required key/values are prepared.
`

View File

@ -0,0 +1,94 @@
package consul
import (
"fmt"
"io/ioutil"
"os"
"testing"
"time"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/terraform/backend"
)
func TestBackend_impl(t *testing.T) {
var _ backend.Backend = new(Backend)
}
func newConsulTestServer(t *testing.T) *testutil.TestServer {
skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_CONSUL_TEST") == ""
if skip {
t.Log("consul server tests require setting TF_ACC or TF_CONSUL_TEST")
t.Skip()
}
srv := testutil.NewTestServerConfig(t, func(c *testutil.TestServerConfig) {
c.LogLevel = "warn"
if !testing.Verbose() {
c.Stdout = ioutil.Discard
c.Stderr = ioutil.Discard
}
})
return srv
}
func TestBackend(t *testing.T) {
srv := newConsulTestServer(t)
defer srv.Stop()
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
// Get the backend. We need two to test locking.
b1 := backend.TestBackendConfig(t, New(), map[string]interface{}{
"address": srv.HTTPAddr,
"path": path,
})
b2 := backend.TestBackendConfig(t, New(), map[string]interface{}{
"address": srv.HTTPAddr,
"path": path,
})
// Test
backend.TestBackend(t, b1, b2)
}
func TestBackend_lockDisabled(t *testing.T) {
srv := newConsulTestServer(t)
defer srv.Stop()
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
// Get the backend. We need two to test locking.
b1 := backend.TestBackendConfig(t, New(), map[string]interface{}{
"address": srv.HTTPAddr,
"path": path,
"lock": false,
})
b2 := backend.TestBackendConfig(t, New(), map[string]interface{}{
"address": srv.HTTPAddr,
"path": path + "different", // Diff so locking test would fail if it was locking
"lock": false,
})
// Test
backend.TestBackend(t, b1, b2)
}
func TestBackend_gzip(t *testing.T) {
srv := newConsulTestServer(t)
defer srv.Stop()
// Get the backend
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
"address": srv.HTTPAddr,
"path": fmt.Sprintf("tf-unit/%s", time.Now().String()),
"gzip": true,
})
// Test
backend.TestBackend(t, b, nil)
}

View File

@ -0,0 +1,232 @@
package consul
import (
"bytes"
"compress/gzip"
"crypto/md5"
"encoding/json"
"errors"
"fmt"
"time"
consulapi "github.com/hashicorp/consul/api"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/state/remote"
)
const (
lockSuffix = "/.lock"
lockInfoSuffix = "/.lockinfo"
)
// RemoteClient is a remote client that stores data in Consul.
type RemoteClient struct {
Client *consulapi.Client
Path string
GZip bool
consulLock *consulapi.Lock
lockCh <-chan struct{}
}
func (c *RemoteClient) Get() (*remote.Payload, error) {
pair, _, err := c.Client.KV().Get(c.Path, nil)
if err != nil {
return nil, err
}
if pair == nil {
return nil, nil
}
payload := pair.Value
// If the payload starts with 0x1f, it's gzip, not json
if len(pair.Value) >= 1 && pair.Value[0] == '\x1f' {
if data, err := uncompressState(pair.Value); err == nil {
payload = data
} else {
return nil, err
}
}
md5 := md5.Sum(pair.Value)
return &remote.Payload{
Data: payload,
MD5: md5[:],
}, nil
}
func (c *RemoteClient) Put(data []byte) error {
payload := data
if c.GZip {
if compressedState, err := compressState(data); err == nil {
payload = compressedState
} else {
return err
}
}
kv := c.Client.KV()
_, err := kv.Put(&consulapi.KVPair{
Key: c.Path,
Value: payload,
}, nil)
return err
}
func (c *RemoteClient) Delete() error {
kv := c.Client.KV()
_, err := kv.Delete(c.Path, nil)
return err
}
func (c *RemoteClient) putLockInfo(info *state.LockInfo) error {
info.Path = c.Path
info.Created = time.Now().UTC()
kv := c.Client.KV()
_, err := kv.Put(&consulapi.KVPair{
Key: c.Path + lockInfoSuffix,
Value: info.Marshal(),
}, nil)
return err
}
func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) {
path := c.Path + lockInfoSuffix
pair, _, err := c.Client.KV().Get(path, nil)
if err != nil {
return nil, err
}
if pair == nil {
return nil, nil
}
li := &state.LockInfo{}
err = json.Unmarshal(pair.Value, li)
if err != nil {
return nil, fmt.Errorf("error unmarshaling lock info: %s", err)
}
return li, nil
}
func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) {
select {
case <-c.lockCh:
// We had a lock, but lost it.
// Since we typically only call lock once, we shouldn't ever see this.
return "", errors.New("lost consul lock")
default:
if c.lockCh != nil {
// we have an active lock already
return "", fmt.Errorf("state %q already locked", c.Path)
}
}
if c.consulLock == nil {
opts := &consulapi.LockOptions{
Key: c.Path + lockSuffix,
// only wait briefly, so terraform has the choice to fail fast or
// retry as needed.
LockWaitTime: time.Second,
LockTryOnce: true,
}
lock, err := c.Client.LockOpts(opts)
if err != nil {
return "", err
}
c.consulLock = lock
}
lockErr := &state.LockError{}
lockCh, err := c.consulLock.Lock(make(chan struct{}))
if err != nil {
lockErr.Err = err
return "", lockErr
}
if lockCh == nil {
lockInfo, e := c.getLockInfo()
if e != nil {
lockErr.Err = e
return "", lockErr
}
lockErr.Info = lockInfo
return "", lockErr
}
c.lockCh = lockCh
err = c.putLockInfo(info)
if err != nil {
if unlockErr := c.Unlock(info.ID); unlockErr != nil {
err = multierror.Append(err, unlockErr)
}
return "", err
}
return info.ID, nil
}
func (c *RemoteClient) Unlock(id string) error {
// this doesn't use the lock id, because the lock is tied to the consul client.
if c.consulLock == nil || c.lockCh == nil {
return nil
}
select {
case <-c.lockCh:
return errors.New("consul lock was lost")
default:
}
err := c.consulLock.Unlock()
c.lockCh = nil
// This is only cleanup, and will fail if the lock was immediately taken by
// another client, so we don't report an error to the user here.
c.consulLock.Destroy()
kv := c.Client.KV()
_, delErr := kv.Delete(c.Path+lockInfoSuffix, nil)
if delErr != nil {
err = multierror.Append(err, delErr)
}
return err
}
func compressState(data []byte) ([]byte, error) {
b := new(bytes.Buffer)
gz := gzip.NewWriter(b)
if _, err := gz.Write(data); err != nil {
return nil, err
}
if err := gz.Flush(); err != nil {
return nil, err
}
if err := gz.Close(); err != nil {
return nil, err
}
return b.Bytes(), nil
}
func uncompressState(data []byte) ([]byte, error) {
b := new(bytes.Buffer)
gz, err := gzip.NewReader(bytes.NewReader(data))
if err != nil {
return nil, err
}
b.ReadFrom(gz)
if err := gz.Close(); err != nil {
return nil, err
}
return b.Bytes(), nil
}

View File

@ -0,0 +1,141 @@
package consul
import (
"fmt"
"testing"
"time"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/state/remote"
)
func TestRemoteClient_impl(t *testing.T) {
var _ remote.Client = new(RemoteClient)
var _ remote.ClientLocker = new(RemoteClient)
}
func TestRemoteClient(t *testing.T) {
srv := newConsulTestServer(t)
defer srv.Stop()
// Get the backend
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
"address": srv.HTTPAddr,
"path": fmt.Sprintf("tf-unit/%s", time.Now().String()),
})
// Grab the client
state, err := b.State(backend.DefaultStateName)
if err != nil {
t.Fatalf("err: %s", err)
}
// Test
remote.TestClient(t, state.(*remote.State).Client)
}
// test the gzip functionality of the client
func TestRemoteClient_gzipUpgrade(t *testing.T) {
srv := newConsulTestServer(t)
defer srv.Stop()
statePath := fmt.Sprintf("tf-unit/%s", time.Now().String())
// Get the backend
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
"address": srv.HTTPAddr,
"path": statePath,
})
// Grab the client
state, err := b.State(backend.DefaultStateName)
if err != nil {
t.Fatalf("err: %s", err)
}
// Test
remote.TestClient(t, state.(*remote.State).Client)
// create a new backend with gzip
b = backend.TestBackendConfig(t, New(), map[string]interface{}{
"address": srv.HTTPAddr,
"path": statePath,
"gzip": true,
})
// Grab the client
state, err = b.State(backend.DefaultStateName)
if err != nil {
t.Fatalf("err: %s", err)
}
// Test
remote.TestClient(t, state.(*remote.State).Client)
}
func TestConsul_stateLock(t *testing.T) {
srv := newConsulTestServer(t)
defer srv.Stop()
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
// create 2 instances to get 2 remote.Clients
sA, err := backend.TestBackendConfig(t, New(), map[string]interface{}{
"address": srv.HTTPAddr,
"path": path,
}).State(backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
sB, err := backend.TestBackendConfig(t, New(), map[string]interface{}{
"address": srv.HTTPAddr,
"path": path,
}).State(backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
remote.TestRemoteLocks(t, sA.(*remote.State).Client, sB.(*remote.State).Client)
}
func TestConsul_destroyLock(t *testing.T) {
srv := newConsulTestServer(t)
defer srv.Stop()
// Get the backend
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
"address": srv.HTTPAddr,
"path": fmt.Sprintf("tf-unit/%s", time.Now().String()),
})
// Grab the client
s, err := b.State(backend.DefaultStateName)
if err != nil {
t.Fatalf("err: %s", err)
}
c := s.(*remote.State).Client.(*RemoteClient)
info := state.NewLockInfo()
id, err := c.Lock(info)
if err != nil {
t.Fatal(err)
}
lockPath := c.Path + lockSuffix
if err := c.Unlock(id); err != nil {
t.Fatal(err)
}
// get the lock val
pair, _, err := c.Client.KV().Get(lockPath, nil)
if err != nil {
t.Fatal(err)
}
if pair != nil {
t.Fatalf("lock key not cleaned up at: %s", pair.Key)
}
}

View File

@ -0,0 +1,41 @@
package inmem
import (
"context"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/backend/remote-state"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/state/remote"
)
// New creates a new backend for Inmem remote state.
func New() backend.Backend {
return &remotestate.Backend{
ConfigureFunc: configure,
// Set the schema
Backend: &schema.Backend{
Schema: map[string]*schema.Schema{
"lock_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "initializes the state in a locked configuration",
},
},
},
}
}
func configure(ctx context.Context) (remote.Client, error) {
data := schema.FromContextBackendConfig(ctx)
if v, ok := data.GetOk("lock_id"); ok && v.(string) != "" {
info := state.NewLockInfo()
info.ID = v.(string)
info.Operation = "test"
info.Info = "test config"
return &RemoteClient{LockInfo: info}, nil
}
return &RemoteClient{}, nil
}

View File

@ -0,0 +1,79 @@
package inmem
import (
"crypto/md5"
"errors"
"time"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/state/remote"
)
// RemoteClient is a remote client that stores data in memory for testing.
type RemoteClient struct {
Data []byte
MD5 []byte
LockInfo *state.LockInfo
}
func (c *RemoteClient) Get() (*remote.Payload, error) {
if c.Data == nil {
return nil, nil
}
return &remote.Payload{
Data: c.Data,
MD5: c.MD5,
}, nil
}
func (c *RemoteClient) Put(data []byte) error {
md5 := md5.Sum(data)
c.Data = data
c.MD5 = md5[:]
return nil
}
func (c *RemoteClient) Delete() error {
c.Data = nil
c.MD5 = nil
return nil
}
func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) {
lockErr := &state.LockError{
Info: &state.LockInfo{},
}
if c.LockInfo != nil {
lockErr.Err = errors.New("state locked")
// make a copy of the lock info to avoid any testing shenanigans
*lockErr.Info = *c.LockInfo
return "", lockErr
}
info.Created = time.Now().UTC()
c.LockInfo = info
return c.LockInfo.ID, nil
}
func (c *RemoteClient) Unlock(id string) error {
if c.LockInfo == nil {
return errors.New("state not locked")
}
lockErr := &state.LockError{
Info: &state.LockInfo{},
}
if id != c.LockInfo.ID {
lockErr.Err = errors.New("invalid lock id")
*lockErr.Info = *c.LockInfo
return lockErr
}
c.LockInfo = nil
return nil
}

View File

@ -0,0 +1,28 @@
package inmem
import (
"testing"
"github.com/hashicorp/terraform/backend"
remotestate "github.com/hashicorp/terraform/backend/remote-state"
"github.com/hashicorp/terraform/state/remote"
)
func TestRemoteClient_impl(t *testing.T) {
var _ remote.Client = new(RemoteClient)
var _ remote.ClientLocker = new(RemoteClient)
}
func TestRemoteClient(t *testing.T) {
b := backend.TestBackendConfig(t, New(), nil)
remotestate.TestClient(t, b)
}
func TestInmemLocks(t *testing.T) {
s, err := backend.TestBackendConfig(t, New(), nil).State(backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
remote.TestRemoteLocks(t, s.(*remote.State).Client, s.(*remote.State).Client)
}

View File

@ -0,0 +1,195 @@
package s3
import (
"context"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/helper/schema"
terraformAWS "github.com/hashicorp/terraform/builtin/providers/aws"
)
// New creates a new backend for S3 remote state.
func New() backend.Backend {
s := &schema.Backend{
Schema: map[string]*schema.Schema{
"bucket": {
Type: schema.TypeString,
Required: true,
Description: "The name of the S3 bucket",
},
"key": {
Type: schema.TypeString,
Required: true,
Description: "The path to the state file inside the bucket",
},
"region": {
Type: schema.TypeString,
Required: true,
Description: "The region of the S3 bucket.",
DefaultFunc: schema.EnvDefaultFunc("AWS_DEFAULT_REGION", nil),
},
"endpoint": {
Type: schema.TypeString,
Optional: true,
Description: "A custom endpoint for the S3 API",
DefaultFunc: schema.EnvDefaultFunc("AWS_S3_ENDPOINT", ""),
},
"encrypt": {
Type: schema.TypeBool,
Optional: true,
Description: "Whether to enable server side encryption of the state file",
Default: false,
},
"acl": {
Type: schema.TypeString,
Optional: true,
Description: "Canned ACL to be applied to the state file",
Default: "",
},
"access_key": {
Type: schema.TypeString,
Optional: true,
Description: "AWS access key",
Default: "",
},
"secret_key": {
Type: schema.TypeString,
Optional: true,
Description: "AWS secret key",
Default: "",
},
"kms_key_id": {
Type: schema.TypeString,
Optional: true,
Description: "The ARN of a KMS Key to use for encrypting the state",
Default: "",
},
"lock_table": {
Type: schema.TypeString,
Optional: true,
Description: "DynamoDB table for state locking",
Default: "",
},
"profile": {
Type: schema.TypeString,
Optional: true,
Description: "AWS profile name",
Default: "",
},
"shared_credentials_file": {
Type: schema.TypeString,
Optional: true,
Description: "Path to a shared credentials file",
Default: "",
},
"token": {
Type: schema.TypeString,
Optional: true,
Description: "MFA token",
Default: "",
},
"role_arn": {
Type: schema.TypeString,
Optional: true,
Description: "The role to be assumed",
Default: "",
},
"session_name": {
Type: schema.TypeString,
Optional: true,
Description: "The session name to use when assuming the role.",
Default: "",
},
"external_id": {
Type: schema.TypeString,
Optional: true,
Description: "The external ID to use when assuming the role",
Default: "",
},
"assume_role_policy": {
Type: schema.TypeString,
Optional: true,
Description: "The permissions applied when assuming a role.",
Default: "",
},
},
}
result := &Backend{Backend: s}
result.Backend.ConfigureFunc = result.configure
return result
}
type Backend struct {
*schema.Backend
// The fields below are set from configure
s3Client *s3.S3
dynClient *dynamodb.DynamoDB
bucketName string
keyName string
serverSideEncryption bool
acl string
kmsKeyID string
lockTable string
}
func (b *Backend) configure(ctx context.Context) error {
if b.s3Client != nil {
return nil
}
// Grab the resource data
data := schema.FromContextBackendConfig(ctx)
b.bucketName = data.Get("bucket").(string)
b.keyName = data.Get("key").(string)
b.serverSideEncryption = data.Get("encrypt").(bool)
b.acl = data.Get("acl").(string)
b.kmsKeyID = data.Get("kms_key_id").(string)
b.lockTable = data.Get("lock_table").(string)
cfg := &terraformAWS.Config{
AccessKey: data.Get("access_key").(string),
AssumeRoleARN: data.Get("role_arn").(string),
AssumeRoleExternalID: data.Get("external_id").(string),
AssumeRolePolicy: data.Get("assume_role_policy").(string),
AssumeRoleSessionName: data.Get("session_name").(string),
CredsFilename: data.Get("shared_credentials_file").(string),
Profile: data.Get("profile").(string),
Region: data.Get("region").(string),
S3Endpoint: data.Get("endpoint").(string),
SecretKey: data.Get("secret_key").(string),
Token: data.Get("token").(string),
}
client, err := cfg.Client()
if err != nil {
return err
}
b.s3Client = client.(*terraformAWS.AWSClient).S3()
b.dynClient = client.(*terraformAWS.AWSClient).DynamoDB()
return nil
}

View File

@ -0,0 +1,169 @@
package s3
import (
"errors"
"fmt"
"sort"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/state/remote"
"github.com/hashicorp/terraform/terraform"
)
const (
// This will be used as directory name, the odd looking colon is simply to
// reduce the chance of name conflicts with existing objects.
keyEnvPrefix = "env:"
)
func (b *Backend) States() ([]string, error) {
params := &s3.ListObjectsInput{
Bucket: &b.bucketName,
Prefix: aws.String(keyEnvPrefix + "/"),
}
resp, err := b.s3Client.ListObjects(params)
if err != nil {
return nil, err
}
envs := []string{backend.DefaultStateName}
for _, obj := range resp.Contents {
env := b.keyEnv(*obj.Key)
if env != "" {
envs = append(envs, env)
}
}
sort.Strings(envs[1:])
return envs, nil
}
// extract the env name from the S3 key
func (b *Backend) keyEnv(key string) string {
// we have 3 parts, the prefix, the env name, and the key name
parts := strings.SplitN(key, "/", 3)
if len(parts) < 3 {
// no env here
return ""
}
// shouldn't happen since we listed by prefix
if parts[0] != keyEnvPrefix {
return ""
}
// not our key, so don't include it in our listing
if parts[2] != b.keyName {
return ""
}
return parts[1]
}
func (b *Backend) DeleteState(name string) error {
if name == backend.DefaultStateName || name == "" {
return fmt.Errorf("can't delete default state")
}
params := &s3.DeleteObjectInput{
Bucket: &b.bucketName,
Key: aws.String(b.path(name)),
}
_, err := b.s3Client.DeleteObject(params)
if err != nil {
return err
}
return nil
}
func (b *Backend) State(name string) (state.State, error) {
if name == "" {
return nil, errors.New("missing state name")
}
client := &RemoteClient{
s3Client: b.s3Client,
dynClient: b.dynClient,
bucketName: b.bucketName,
path: b.path(name),
serverSideEncryption: b.serverSideEncryption,
acl: b.acl,
kmsKeyID: b.kmsKeyID,
lockTable: b.lockTable,
}
stateMgr := &remote.State{Client: client}
//if this isn't the default state name, we need to create the object so
//it's listed by States.
if name != backend.DefaultStateName {
// take a lock on this state while we write it
lockInfo := state.NewLockInfo()
lockInfo.Operation = "init"
lockId, err := client.Lock(lockInfo)
if err != nil {
return nil, fmt.Errorf("failed to lock s3 state: %s", err)
}
// Local helper function so we can call it multiple places
lockUnlock := func(parent error) error {
if err := stateMgr.Unlock(lockId); err != nil {
return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err)
}
return parent
}
// Grab the value
if err := stateMgr.RefreshState(); err != nil {
err = lockUnlock(err)
return nil, err
}
// If we have no state, we have to create an empty state
if v := stateMgr.State(); v == nil {
if err := stateMgr.WriteState(terraform.NewState()); err != nil {
err = lockUnlock(err)
return nil, err
}
if err := stateMgr.PersistState(); err != nil {
err = lockUnlock(err)
return nil, err
}
}
// Unlock, the state should now be initialized
if err := lockUnlock(nil); err != nil {
return nil, err
}
}
return stateMgr, nil
}
func (b *Backend) client() *RemoteClient {
return &RemoteClient{}
}
func (b *Backend) path(name string) string {
if name == backend.DefaultStateName {
return b.keyName
}
return strings.Join([]string{keyEnvPrefix, name, b.keyName}, "/")
}
const errStateUnlock = `
Error unlocking S3 state. Lock ID: %s
Error: %s
You may have to force-unlock this state in order to use it again.
`

View File

@ -0,0 +1,339 @@
package s3
import (
"fmt"
"os"
"reflect"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state/remote"
"github.com/hashicorp/terraform/terraform"
)
// verify that we are doing ACC tests or the S3 tests specifically
func testACC(t *testing.T) {
skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_S3_TEST") == ""
if skip {
t.Log("s3 backend tests require setting TF_ACC or TF_S3_TEST")
t.Skip()
}
if os.Getenv("AWS_DEFAULT_REGION") == "" {
os.Setenv("AWS_DEFAULT_REGION", "us-west-2")
}
}
func TestBackend_impl(t *testing.T) {
var _ backend.Backend = new(Backend)
}
func TestBackendConfig(t *testing.T) {
testACC(t)
config := map[string]interface{}{
"region": "us-west-1",
"bucket": "tf-test",
"key": "state",
"encrypt": true,
"lock_table": "dynamoTable",
}
b := backend.TestBackendConfig(t, New(), config).(*Backend)
if *b.s3Client.Config.Region != "us-west-1" {
t.Fatalf("Incorrect region was populated")
}
if b.bucketName != "tf-test" {
t.Fatalf("Incorrect bucketName was populated")
}
if b.keyName != "state" {
t.Fatalf("Incorrect keyName was populated")
}
credentials, err := b.s3Client.Config.Credentials.Get()
if err != nil {
t.Fatalf("Error when requesting credentials")
}
if credentials.AccessKeyID == "" {
t.Fatalf("No Access Key Id was populated")
}
if credentials.SecretAccessKey == "" {
t.Fatalf("No Secret Access Key was populated")
}
}
func TestBackend(t *testing.T) {
testACC(t)
bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix())
keyName := "testState"
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
"bucket": bucketName,
"key": keyName,
"encrypt": true,
}).(*Backend)
createS3Bucket(t, b.s3Client, bucketName)
defer deleteS3Bucket(t, b.s3Client, bucketName)
backend.TestBackend(t, b, nil)
}
func TestBackendLocked(t *testing.T) {
testACC(t)
bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix())
keyName := "test/state"
b1 := backend.TestBackendConfig(t, New(), map[string]interface{}{
"bucket": bucketName,
"key": keyName,
"encrypt": true,
"lock_table": bucketName,
}).(*Backend)
b2 := backend.TestBackendConfig(t, New(), map[string]interface{}{
"bucket": bucketName,
"key": keyName,
"encrypt": true,
"lock_table": bucketName,
}).(*Backend)
createS3Bucket(t, b1.s3Client, bucketName)
defer deleteS3Bucket(t, b1.s3Client, bucketName)
createDynamoDBTable(t, b1.dynClient, bucketName)
defer deleteDynamoDBTable(t, b1.dynClient, bucketName)
backend.TestBackend(t, b1, b2)
}
// add some extra junk in S3 to try and confuse the env listing.
func TestBackendExtraPaths(t *testing.T) {
testACC(t)
bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix())
keyName := "test/state/tfstate"
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
"bucket": bucketName,
"key": keyName,
"encrypt": true,
}).(*Backend)
createS3Bucket(t, b.s3Client, bucketName)
defer deleteS3Bucket(t, b.s3Client, bucketName)
// put multiple states in old env paths.
s1 := terraform.NewState()
s2 := terraform.NewState()
// RemoteClient to Put things in various paths
client := &RemoteClient{
s3Client: b.s3Client,
dynClient: b.dynClient,
bucketName: b.bucketName,
path: b.path("s1"),
serverSideEncryption: b.serverSideEncryption,
acl: b.acl,
kmsKeyID: b.kmsKeyID,
lockTable: b.lockTable,
}
stateMgr := &remote.State{Client: client}
stateMgr.WriteState(s1)
if err := stateMgr.PersistState(); err != nil {
t.Fatal(err)
}
client.path = b.path("s2")
stateMgr.WriteState(s2)
if err := stateMgr.PersistState(); err != nil {
t.Fatal(err)
}
if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil {
t.Fatal(err)
}
// put a state in an env directory name
client.path = keyEnvPrefix + "/error"
stateMgr.WriteState(terraform.NewState())
if err := stateMgr.PersistState(); err != nil {
t.Fatal(err)
}
if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil {
t.Fatal(err)
}
// add state with the wrong key for an existing env
client.path = keyEnvPrefix + "/s2/notTestState"
stateMgr.WriteState(terraform.NewState())
if err := stateMgr.PersistState(); err != nil {
t.Fatal(err)
}
if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil {
t.Fatal(err)
}
// remove the state with extra subkey
if err := b.DeleteState("s2"); err != nil {
t.Fatal(err)
}
if err := checkStateList(b, []string{"default", "s1"}); err != nil {
t.Fatal(err)
}
// fetch that state again, which should produce a new lineage
s2Mgr, err := b.State("s2")
if err != nil {
t.Fatal(err)
}
if err := s2Mgr.RefreshState(); err != nil {
t.Fatal(err)
}
if s2Mgr.State().Lineage == s2.Lineage {
t.Fatal("state s2 was not deleted")
}
s2 = s2Mgr.State()
// add a state with a key that matches an existing environment dir name
client.path = keyEnvPrefix + "/s2/"
stateMgr.WriteState(terraform.NewState())
if err := stateMgr.PersistState(); err != nil {
t.Fatal(err)
}
// make sure s2 is OK
s2Mgr, err = b.State("s2")
if err != nil {
t.Fatal(err)
}
if err := s2Mgr.RefreshState(); err != nil {
t.Fatal(err)
}
if s2Mgr.State().Lineage != s2.Lineage {
t.Fatal("we got the wrong state for s2")
}
if err := checkStateList(b, []string{"default", "s1", "s2"}); err != nil {
t.Fatal(err)
}
}
func checkStateList(b backend.Backend, expected []string) error {
states, err := b.States()
if err != nil {
return err
}
if !reflect.DeepEqual(states, expected) {
return fmt.Errorf("incorrect states listed: %q", states)
}
return nil
}
func createS3Bucket(t *testing.T, s3Client *s3.S3, bucketName string) {
createBucketReq := &s3.CreateBucketInput{
Bucket: &bucketName,
}
// Be clear about what we're doing in case the user needs to clean
// this up later.
t.Logf("creating S3 bucket %s in %s", bucketName, *s3Client.Config.Region)
_, err := s3Client.CreateBucket(createBucketReq)
if err != nil {
t.Fatal("failed to create test S3 bucket:", err)
}
}
func deleteS3Bucket(t *testing.T, s3Client *s3.S3, bucketName string) {
warning := "WARNING: Failed to delete the test S3 bucket. It may have been left in your AWS account and may incur storage charges. (error was %s)"
// first we have to get rid of the env objects, or we can't delete the bucket
resp, err := s3Client.ListObjects(&s3.ListObjectsInput{Bucket: &bucketName})
if err != nil {
t.Logf(warning, err)
return
}
for _, obj := range resp.Contents {
if _, err := s3Client.DeleteObject(&s3.DeleteObjectInput{Bucket: &bucketName, Key: obj.Key}); err != nil {
// this will need cleanup no matter what, so just warn and exit
t.Logf(warning, err)
return
}
}
if _, err := s3Client.DeleteBucket(&s3.DeleteBucketInput{Bucket: &bucketName}); err != nil {
t.Logf(warning, err)
}
}
// create the dynamoDB table, and wait until we can query it.
func createDynamoDBTable(t *testing.T, dynClient *dynamodb.DynamoDB, tableName string) {
createInput := &dynamodb.CreateTableInput{
AttributeDefinitions: []*dynamodb.AttributeDefinition{
{
AttributeName: aws.String("LockID"),
AttributeType: aws.String("S"),
},
},
KeySchema: []*dynamodb.KeySchemaElement{
{
AttributeName: aws.String("LockID"),
KeyType: aws.String("HASH"),
},
},
ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
ReadCapacityUnits: aws.Int64(5),
WriteCapacityUnits: aws.Int64(5),
},
TableName: aws.String(tableName),
}
_, err := dynClient.CreateTable(createInput)
if err != nil {
t.Fatal(err)
}
// now wait until it's ACTIVE
start := time.Now()
time.Sleep(time.Second)
describeInput := &dynamodb.DescribeTableInput{
TableName: aws.String(tableName),
}
for {
resp, err := dynClient.DescribeTable(describeInput)
if err != nil {
t.Fatal(err)
}
if *resp.Table.TableStatus == "ACTIVE" {
return
}
if time.Since(start) > time.Minute {
t.Fatalf("timed out creating DynamoDB table %s", tableName)
}
time.Sleep(3 * time.Second)
}
}
func deleteDynamoDBTable(t *testing.T, dynClient *dynamodb.DynamoDB, tableName string) {
params := &dynamodb.DeleteTableInput{
TableName: aws.String(tableName),
}
_, err := dynClient.DeleteTable(params)
if err != nil {
t.Logf("WARNING: Failed to delete the test DynamoDB table %q. It has been left in your AWS account and may incur charges. (error was %s)", tableName, err)
}
}

View File

@ -0,0 +1,219 @@
package s3
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/s3"
multierror "github.com/hashicorp/go-multierror"
uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/state/remote"
)
type RemoteClient struct {
s3Client *s3.S3
dynClient *dynamodb.DynamoDB
bucketName string
path string
serverSideEncryption bool
acl string
kmsKeyID string
lockTable string
}
func (c *RemoteClient) Get() (*remote.Payload, error) {
output, err := c.s3Client.GetObject(&s3.GetObjectInput{
Bucket: &c.bucketName,
Key: &c.path,
})
if err != nil {
if awserr := err.(awserr.Error); awserr != nil {
if awserr.Code() == "NoSuchKey" {
return nil, nil
} else {
return nil, err
}
} else {
return nil, err
}
}
defer output.Body.Close()
buf := bytes.NewBuffer(nil)
if _, err := io.Copy(buf, output.Body); err != nil {
return nil, fmt.Errorf("Failed to read remote state: %s", err)
}
payload := &remote.Payload{
Data: buf.Bytes(),
}
// If there was no data, then return nil
if len(payload.Data) == 0 {
return nil, nil
}
return payload, nil
}
func (c *RemoteClient) Put(data []byte) error {
contentType := "application/json"
contentLength := int64(len(data))
i := &s3.PutObjectInput{
ContentType: &contentType,
ContentLength: &contentLength,
Body: bytes.NewReader(data),
Bucket: &c.bucketName,
Key: &c.path,
}
if c.serverSideEncryption {
if c.kmsKeyID != "" {
i.SSEKMSKeyId = &c.kmsKeyID
i.ServerSideEncryption = aws.String("aws:kms")
} else {
i.ServerSideEncryption = aws.String("AES256")
}
}
if c.acl != "" {
i.ACL = aws.String(c.acl)
}
log.Printf("[DEBUG] Uploading remote state to S3: %#v", i)
if _, err := c.s3Client.PutObject(i); err == nil {
return nil
} else {
return fmt.Errorf("Failed to upload state: %v", err)
}
}
func (c *RemoteClient) Delete() error {
_, err := c.s3Client.DeleteObject(&s3.DeleteObjectInput{
Bucket: &c.bucketName,
Key: &c.path,
})
return err
}
func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) {
if c.lockTable == "" {
return "", nil
}
info.Path = c.lockPath()
if info.ID == "" {
lockID, err := uuid.GenerateUUID()
if err != nil {
return "", err
}
info.ID = lockID
}
putParams := &dynamodb.PutItemInput{
Item: map[string]*dynamodb.AttributeValue{
"LockID": {S: aws.String(c.lockPath())},
"Info": {S: aws.String(string(info.Marshal()))},
},
TableName: aws.String(c.lockTable),
ConditionExpression: aws.String("attribute_not_exists(LockID)"),
}
_, err := c.dynClient.PutItem(putParams)
if err != nil {
lockInfo, infoErr := c.getLockInfo()
if infoErr != nil {
err = multierror.Append(err, infoErr)
}
lockErr := &state.LockError{
Err: err,
Info: lockInfo,
}
return "", lockErr
}
return info.ID, nil
}
func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) {
getParams := &dynamodb.GetItemInput{
Key: map[string]*dynamodb.AttributeValue{
"LockID": {S: aws.String(c.lockPath())},
},
ProjectionExpression: aws.String("LockID, Info"),
TableName: aws.String(c.lockTable),
}
resp, err := c.dynClient.GetItem(getParams)
if err != nil {
return nil, err
}
var infoData string
if v, ok := resp.Item["Info"]; ok && v.S != nil {
infoData = *v.S
}
lockInfo := &state.LockInfo{}
err = json.Unmarshal([]byte(infoData), lockInfo)
if err != nil {
return nil, err
}
return lockInfo, nil
}
func (c *RemoteClient) Unlock(id string) error {
if c.lockTable == "" {
return nil
}
lockErr := &state.LockError{}
// TODO: store the path and lock ID in separate fields, and have proper
// projection expression only delete the lock if both match, rather than
// checking the ID from the info field first.
lockInfo, err := c.getLockInfo()
if err != nil {
lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err)
return lockErr
}
lockErr.Info = lockInfo
if lockInfo.ID != id {
lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id)
return lockErr
}
params := &dynamodb.DeleteItemInput{
Key: map[string]*dynamodb.AttributeValue{
"LockID": {S: aws.String(c.lockPath())},
},
TableName: aws.String(c.lockTable),
}
_, err = c.dynClient.DeleteItem(params)
if err != nil {
lockErr.Err = err
return lockErr
}
return nil
}
func (c *RemoteClient) lockPath() string {
return fmt.Sprintf("%s/%s", c.bucketName, c.path)
}

View File

@ -0,0 +1,76 @@
package s3
import (
"fmt"
"testing"
"time"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state/remote"
)
func TestRemoteClient_impl(t *testing.T) {
var _ remote.Client = new(RemoteClient)
var _ remote.ClientLocker = new(RemoteClient)
}
func TestRemoteClient(t *testing.T) {
testACC(t)
bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix())
keyName := "testState"
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
"bucket": bucketName,
"key": keyName,
"encrypt": true,
}).(*Backend)
state, err := b.State(backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
createS3Bucket(t, b.s3Client, bucketName)
defer deleteS3Bucket(t, b.s3Client, bucketName)
remote.TestClient(t, state.(*remote.State).Client)
}
func TestRemoteClientLocks(t *testing.T) {
testACC(t)
bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix())
keyName := "testState"
b1 := backend.TestBackendConfig(t, New(), map[string]interface{}{
"bucket": bucketName,
"key": keyName,
"encrypt": true,
"lock_table": bucketName,
}).(*Backend)
b2 := backend.TestBackendConfig(t, New(), map[string]interface{}{
"bucket": bucketName,
"key": keyName,
"encrypt": true,
"lock_table": bucketName,
}).(*Backend)
s1, err := b1.State(backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
s2, err := b2.State(backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
createS3Bucket(t, b1.s3Client, bucketName)
defer deleteS3Bucket(t, b1.s3Client, bucketName)
createDynamoDBTable(t, b1.dynClient, bucketName)
defer deleteDynamoDBTable(t, b1.dynClient, bucketName)
remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client)
}

View File

@ -0,0 +1,17 @@
package remotestate
import (
"testing"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state/remote"
)
func TestClient(t *testing.T, raw backend.Backend) {
b, ok := raw.(*Backend)
if !ok {
t.Fatalf("not Backend: %T", raw)
}
remote.TestClient(t, b.client)
}

278
backend/testing.go Normal file
View File

@ -0,0 +1,278 @@
package backend
import (
"reflect"
"sort"
"testing"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/terraform"
)
// TestBackendConfig validates and configures the backend with the
// given configuration.
func TestBackendConfig(t *testing.T, b Backend, c map[string]interface{}) Backend {
// Get the proper config structure
rc, err := config.NewRawConfig(c)
if err != nil {
t.Fatalf("bad: %s", err)
}
conf := terraform.NewResourceConfig(rc)
// Validate
warns, errs := b.Validate(conf)
if len(warns) > 0 {
t.Fatalf("warnings: %s", warns)
}
if len(errs) > 0 {
t.Fatalf("errors: %s", errs)
}
// Configure
if err := b.Configure(conf); err != nil {
t.Fatalf("err: %s", err)
}
return b
}
// TestBackend will test the functionality of a Backend. The backend is
// assumed to already be configured. This will test state functionality.
// If the backend reports it doesn't support multi-state by returning the
// error ErrNamedStatesNotSupported, then it will not test that.
//
// If you want to test locking, two backends must be given. If b2 is nil,
// then state lockign won't be tested.
func TestBackend(t *testing.T, b1, b2 Backend) {
testBackendStates(t, b1)
if b2 != nil {
testBackendStateLock(t, b1, b2)
}
}
func testBackendStates(t *testing.T, b Backend) {
states, err := b.States()
if err == ErrNamedStatesNotSupported {
t.Logf("TestBackend: named states not supported in %T, skipping", b)
return
}
// Test it starts with only the default
if len(states) != 1 || states[0] != DefaultStateName {
t.Fatalf("should only have default to start: %#v", states)
}
// Create a couple states
foo, err := b.State("foo")
if err != nil {
t.Fatalf("error: %s", err)
}
if err := foo.RefreshState(); err != nil {
t.Fatalf("bad: %s", err)
}
if v := foo.State(); v.HasResources() {
t.Fatalf("should be empty: %s", v)
}
bar, err := b.State("bar")
if err != nil {
t.Fatalf("error: %s", err)
}
if err := bar.RefreshState(); err != nil {
t.Fatalf("bad: %s", err)
}
if v := bar.State(); v.HasResources() {
t.Fatalf("should be empty: %s", v)
}
// Verify they are distinct states that can be read back from storage
{
// start with a fresh state, and record the lineage being
// written to "bar"
barState := terraform.NewState()
barLineage := barState.Lineage
// the foo lineage should be distinct from bar, and unchanged after
// modifying bar
fooState := terraform.NewState()
fooLineage := fooState.Lineage
// write a known state to foo
if err := foo.WriteState(fooState); err != nil {
t.Fatal("error writing foo state:", err)
}
if err := foo.PersistState(); err != nil {
t.Fatal("error persisting foo state:", err)
}
// write a distinct known state to bar
if err := bar.WriteState(barState); err != nil {
t.Fatalf("bad: %s", err)
}
if err := bar.PersistState(); err != nil {
t.Fatalf("bad: %s", err)
}
// verify that foo is unchanged with the existing state manager
if err := foo.RefreshState(); err != nil {
t.Fatal("error refreshing foo:", err)
}
fooState = foo.State()
switch {
case fooState == nil:
t.Fatal("nil state read from foo")
case fooState.Lineage == barLineage:
t.Fatalf("bar lineage read from foo: %#v", fooState)
case fooState.Lineage != fooLineage:
t.Fatal("foo lineage alterred")
}
// fetch foo again from the backend
foo, err = b.State("foo")
if err != nil {
t.Fatal("error re-fetching state:", err)
}
if err := foo.RefreshState(); err != nil {
t.Fatal("error refreshing foo:", err)
}
fooState = foo.State()
switch {
case fooState == nil:
t.Fatal("nil state read from foo")
case fooState.Lineage != fooLineage:
t.Fatal("incorrect state returned from backend")
}
// fetch the bar again from the backend
bar, err = b.State("bar")
if err != nil {
t.Fatal("error re-fetching state:", err)
}
if err := bar.RefreshState(); err != nil {
t.Fatal("error refreshing bar:", err)
}
barState = bar.State()
switch {
case barState == nil:
t.Fatal("nil state read from bar")
case barState.Lineage != barLineage:
t.Fatal("incorrect state returned from backend")
}
}
// Verify we can now list them
{
// we determined that named stated are supported earlier
states, err := b.States()
if err != nil {
t.Fatal(err)
}
sort.Strings(states)
expected := []string{"bar", "default", "foo"}
if !reflect.DeepEqual(states, expected) {
t.Fatalf("bad: %#v", states)
}
}
// Delete some states
if err := b.DeleteState("foo"); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the default state can't be deleted
if err := b.DeleteState(DefaultStateName); err == nil {
t.Fatal("expected error")
}
// Verify deletion
{
states, err := b.States()
if err == ErrNamedStatesNotSupported {
t.Logf("TestBackend: named states not supported in %T, skipping", b)
return
}
sort.Strings(states)
expected := []string{"bar", "default"}
if !reflect.DeepEqual(states, expected) {
t.Fatalf("bad: %#v", states)
}
}
}
func testBackendStateLock(t *testing.T, b1, b2 Backend) {
// Get the default state for each
b1StateMgr, err := b1.State(DefaultStateName)
if err != nil {
t.Fatalf("error: %s", err)
}
if err := b1StateMgr.RefreshState(); err != nil {
t.Fatalf("bad: %s", err)
}
// Fast exit if this doesn't support locking at all
if _, ok := b1StateMgr.(state.Locker); !ok {
t.Logf("TestBackend: backend %T doesn't support state locking, not testing", b1)
return
}
t.Logf("TestBackend: testing state locking for %T", b1)
b2StateMgr, err := b2.State(DefaultStateName)
if err != nil {
t.Fatalf("error: %s", err)
}
if err := b2StateMgr.RefreshState(); err != nil {
t.Fatalf("bad: %s", err)
}
// Reassign so its obvious whats happening
lockerA := b1StateMgr.(state.Locker)
lockerB := b2StateMgr.(state.Locker)
infoA := state.NewLockInfo()
infoA.Operation = "test"
infoA.Who = "clientA"
infoB := state.NewLockInfo()
infoB.Operation = "test"
infoB.Who = "clientB"
lockIDA, err := lockerA.Lock(infoA)
if err != nil {
t.Fatal("unable to get initial lock:", err)
}
// If the lock ID is blank, assume locking is disabled
if lockIDA == "" {
t.Logf("TestBackend: %T: empty string returned for lock, assuming disabled", b1)
return
}
_, err = lockerB.Lock(infoB)
if err == nil {
lockerA.Unlock(lockIDA)
t.Fatal("client B obtained lock while held by client A")
}
if err := lockerA.Unlock(lockIDA); err != nil {
t.Fatal("error unlocking client A", err)
}
lockIDB, err := lockerB.Lock(infoB)
if err != nil {
t.Fatal("unable to obtain lock from client B")
}
if lockIDB == lockIDA {
t.Fatalf("duplicate lock IDs: %q", lockIDB)
}
if err = lockerB.Unlock(lockIDB); err != nil {
t.Fatal("error unlocking client B:", err)
}
}

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/hashicorp/terraform/builtin/providers/arukas"
"github.com/hashicorp/terraform/plugin"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProviderFunc: arukas.Provider,
})
}

View File

@ -0,0 +1 @@
package main

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/hashicorp/terraform/builtin/providers/dns"
"github.com/hashicorp/terraform/plugin"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProviderFunc: dns.Provider,
})
}

View File

@ -0,0 +1 @@
package main

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/hashicorp/terraform/builtin/providers/ignition"
"github.com/hashicorp/terraform/plugin"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProviderFunc: ignition.Provider,
})
}

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/hashicorp/terraform/builtin/providers/localfile"
"github.com/hashicorp/terraform/plugin"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProviderFunc: localfile.Provider,
})
}

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/hashicorp/terraform/builtin/providers/ns1"
"github.com/hashicorp/terraform/plugin"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProviderFunc: ns1.Provider,
})
}

View File

@ -0,0 +1 @@
package main

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/hashicorp/terraform/builtin/providers/opc"
"github.com/hashicorp/terraform/plugin"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProviderFunc: opc.Provider,
})
}

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/hashicorp/terraform/builtin/providers/opsgenie"
"github.com/hashicorp/terraform/plugin"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProviderFunc: opsgenie.Provider,
})
}

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/hashicorp/terraform/builtin/providers/profitbricks"
"github.com/hashicorp/terraform/plugin"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProviderFunc: profitbricks.Provider,
})
}

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/hashicorp/terraform/builtin/providers/spotinst"
"github.com/hashicorp/terraform/plugin"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProviderFunc: spotinst.Provider,
})
}

View File

@ -3,13 +3,10 @@ package main
import (
"github.com/hashicorp/terraform/builtin/provisioners/file"
"github.com/hashicorp/terraform/plugin"
"github.com/hashicorp/terraform/terraform"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProvisionerFunc: func() terraform.ResourceProvisioner {
return new(file.ResourceProvisioner)
},
ProvisionerFunc: file.Provisioner,
})
}

View File

@ -3,13 +3,10 @@ package main
import (
"github.com/hashicorp/terraform/builtin/provisioners/local-exec"
"github.com/hashicorp/terraform/plugin"
"github.com/hashicorp/terraform/terraform"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProvisionerFunc: func() terraform.ResourceProvisioner {
return new(localexec.ResourceProvisioner)
},
ProvisionerFunc: localexec.Provisioner,
})
}

View File

@ -3,13 +3,10 @@ package main
import (
"github.com/hashicorp/terraform/builtin/provisioners/remote-exec"
"github.com/hashicorp/terraform/plugin"
"github.com/hashicorp/terraform/terraform"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProvisionerFunc: func() terraform.ResourceProvisioner {
return new(remoteexec.ResourceProvisioner)
},
ProvisionerFunc: remoteexec.Provisioner,
})
}

View File

@ -0,0 +1,93 @@
package alicloud
import (
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/hashicorp/terraform/helper/schema"
)
type InstanceNetWork string
const (
ClassicNet = InstanceNetWork("classic")
VpcNet = InstanceNetWork("vpc")
)
// timeout for common product, ecs e.g.
const defaultTimeout = 120
// timeout for long time progerss product, rds e.g.
const defaultLongTimeout = 1000
func getRegion(d *schema.ResourceData, meta interface{}) common.Region {
return meta.(*AliyunClient).Region
}
func notFoundError(err error) bool {
if e, ok := err.(*common.Error); ok &&
(e.StatusCode == 404 || e.ErrorResponse.Message == "Not found" || e.Code == InstanceNotfound) {
return true
}
return false
}
// Protocol represents network protocol
type Protocol string
// Constants of protocol definition
const (
Http = Protocol("http")
Https = Protocol("https")
Tcp = Protocol("tcp")
Udp = Protocol("udp")
)
// ValidProtocols network protocol list
var ValidProtocols = []Protocol{Http, Https, Tcp, Udp}
// simple array value check method, support string type only
func isProtocolValid(value string) bool {
res := false
for _, v := range ValidProtocols {
if string(v) == value {
res = true
}
}
return res
}
var DefaultBusinessInfo = ecs.BusinessInfo{
Pack: "terraform",
}
// default region for all resource
const DEFAULT_REGION = "cn-beijing"
// default security ip for db
const DEFAULT_DB_SECURITY_IP = "127.0.0.1"
// we the count of create instance is only one
const DEFAULT_INSTANCE_COUNT = 1
// symbol of multiIZ
const MULTI_IZ_SYMBOL = "MAZ"
// default connect port of db
const DB_DEFAULT_CONNECT_PORT = "3306"
const COMMA_SEPARATED = ","
const COLON_SEPARATED = ":"
const LOCAL_HOST_IP = "127.0.0.1"
// Takes the result of flatmap.Expand for an array of strings
// and returns a []string
func expandStringList(configured []interface{}) []string {
vs := make([]string, 0, len(configured))
for _, v := range configured {
vs = append(vs, v.(string))
}
return vs
}

View File

@ -0,0 +1,138 @@
package alicloud
import (
"fmt"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/denverdino/aliyungo/ess"
"github.com/denverdino/aliyungo/rds"
"github.com/denverdino/aliyungo/slb"
)
// Config of aliyun
type Config struct {
AccessKey string
SecretKey string
Region common.Region
}
// AliyunClient of aliyun
type AliyunClient struct {
Region common.Region
ecsconn *ecs.Client
essconn *ess.Client
rdsconn *rds.Client
// use new version
ecsNewconn *ecs.Client
vpcconn *ecs.Client
slbconn *slb.Client
}
// Client for AliyunClient
func (c *Config) Client() (*AliyunClient, error) {
err := c.loadAndValidate()
if err != nil {
return nil, err
}
ecsconn, err := c.ecsConn()
if err != nil {
return nil, err
}
ecsNewconn, err := c.ecsConn()
if err != nil {
return nil, err
}
ecsNewconn.SetVersion(EcsApiVersion20160314)
rdsconn, err := c.rdsConn()
if err != nil {
return nil, err
}
slbconn, err := c.slbConn()
if err != nil {
return nil, err
}
vpcconn, err := c.vpcConn()
if err != nil {
return nil, err
}
essconn, err := c.essConn()
if err != nil {
return nil, err
}
return &AliyunClient{
Region: c.Region,
ecsconn: ecsconn,
ecsNewconn: ecsNewconn,
vpcconn: vpcconn,
slbconn: slbconn,
rdsconn: rdsconn,
essconn: essconn,
}, nil
}
const BusinessInfoKey = "Terraform"
func (c *Config) loadAndValidate() error {
err := c.validateRegion()
if err != nil {
return err
}
return nil
}
func (c *Config) validateRegion() error {
for _, valid := range common.ValidRegions {
if c.Region == valid {
return nil
}
}
return fmt.Errorf("Not a valid region: %s", c.Region)
}
func (c *Config) ecsConn() (*ecs.Client, error) {
client := ecs.NewECSClient(c.AccessKey, c.SecretKey, c.Region)
client.SetBusinessInfo(BusinessInfoKey)
_, err := client.DescribeRegions()
if err != nil {
return nil, err
}
return client, nil
}
func (c *Config) rdsConn() (*rds.Client, error) {
client := rds.NewRDSClient(c.AccessKey, c.SecretKey, c.Region)
client.SetBusinessInfo(BusinessInfoKey)
return client, nil
}
func (c *Config) slbConn() (*slb.Client, error) {
client := slb.NewSLBClient(c.AccessKey, c.SecretKey, c.Region)
client.SetBusinessInfo(BusinessInfoKey)
return client, nil
}
func (c *Config) vpcConn() (*ecs.Client, error) {
client := ecs.NewVPCClient(c.AccessKey, c.SecretKey, c.Region)
client.SetBusinessInfo(BusinessInfoKey)
return client, nil
}
func (c *Config) essConn() (*ess.Client, error) {
client := ess.NewESSClient(c.AccessKey, c.SecretKey, c.Region)
client.SetBusinessInfo(BusinessInfoKey)
return client, nil
}

View File

@ -0,0 +1,18 @@
package alicloud
import (
"bytes"
"fmt"
"github.com/hashicorp/terraform/helper/hashcode"
)
// Generates a hash for the set hash function used by the ID
func dataResourceIdHash(ids []string) string {
var buf bytes.Buffer
for _, id := range ids {
buf.WriteString(fmt.Sprintf("%s-", id))
}
return fmt.Sprintf("%d", hashcode.String(buf.String()))
}

View File

@ -0,0 +1,337 @@
package alicloud
import (
"fmt"
"log"
"regexp"
"sort"
"time"
"github.com/denverdino/aliyungo/ecs"
"github.com/hashicorp/terraform/helper/schema"
)
func dataSourceAlicloudImages() *schema.Resource {
return &schema.Resource{
Read: dataSourceAlicloudImagesRead,
Schema: map[string]*schema.Schema{
"name_regex": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validateNameRegex,
},
"most_recent": {
Type: schema.TypeBool,
Optional: true,
Default: false,
ForceNew: true,
},
"owners": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validateImageOwners,
},
// Computed values.
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"id": {
Type: schema.TypeString,
Computed: true,
},
"image_id": {
Type: schema.TypeString,
Computed: true,
},
"architecture": {
Type: schema.TypeString,
Computed: true,
},
"creation_time": {
Type: schema.TypeString,
Computed: true,
},
"description": {
Type: schema.TypeString,
Computed: true,
},
"image_owner_alias": {
Type: schema.TypeString,
Computed: true,
},
"os_type": {
Type: schema.TypeString,
Computed: true,
},
"os_name": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"platform": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"state": {
Type: schema.TypeString,
Computed: true,
},
"size": {
Type: schema.TypeInt,
Computed: true,
},
// Complex computed values
"disk_device_mappings": {
Type: schema.TypeList,
Computed: true,
//Set: imageDiskDeviceMappingHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"device": {
Type: schema.TypeString,
Computed: true,
},
"size": {
Type: schema.TypeString,
Computed: true,
},
"snapshot_id": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"product_code": {
Type: schema.TypeString,
Computed: true,
},
"is_self_shared": {
Type: schema.TypeString,
Computed: true,
},
"is_subscribed": {
Type: schema.TypeBool,
Computed: true,
},
"is_copied": {
Type: schema.TypeBool,
Computed: true,
},
"is_support_io_optimized": {
Type: schema.TypeBool,
Computed: true,
},
"image_version": {
Type: schema.TypeString,
Computed: true,
},
"progress": {
Type: schema.TypeString,
Computed: true,
},
"usage": {
Type: schema.TypeString,
Computed: true,
},
"tags": tagsSchema(),
},
},
},
},
}
}
// dataSourceAlicloudImagesDescriptionRead performs the Alicloud Image lookup.
func dataSourceAlicloudImagesRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AliyunClient).ecsconn
nameRegex, nameRegexOk := d.GetOk("name_regex")
owners, ownersOk := d.GetOk("owners")
mostRecent, mostRecentOk := d.GetOk("most_recent")
if nameRegexOk == false && ownersOk == false && mostRecentOk == false {
return fmt.Errorf("One of name_regex, owners or most_recent must be assigned")
}
params := &ecs.DescribeImagesArgs{
RegionId: getRegion(d, meta),
}
if ownersOk {
params.ImageOwnerAlias = ecs.ImageOwnerAlias(owners.(string))
}
var allImages []ecs.ImageType
for {
images, paginationResult, err := conn.DescribeImages(params)
if err != nil {
break
}
allImages = append(allImages, images...)
pagination := paginationResult.NextPage()
if pagination == nil {
break
}
params.Pagination = *pagination
}
var filteredImages []ecs.ImageType
if nameRegexOk {
r := regexp.MustCompile(nameRegex.(string))
for _, image := range allImages {
// Check for a very rare case where the response would include no
// image name. No name means nothing to attempt a match against,
// therefore we are skipping such image.
if image.ImageName == "" {
log.Printf("[WARN] Unable to find Image name to match against "+
"for image ID %q, nothing to do.",
image.ImageId)
continue
}
if r.MatchString(image.ImageName) {
filteredImages = append(filteredImages, image)
}
}
} else {
filteredImages = allImages[:]
}
var images []ecs.ImageType
if len(filteredImages) < 1 {
return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.")
}
log.Printf("[DEBUG] alicloud_image - multiple results found and `most_recent` is set to: %t", mostRecent.(bool))
if len(filteredImages) > 1 && mostRecent.(bool) {
// Query returned single result.
images = append(images, mostRecentImage(filteredImages))
} else {
images = filteredImages
}
log.Printf("[DEBUG] alicloud_image - Images found: %#v", images)
return imagesDescriptionAttributes(d, images, meta)
}
// populate the numerous fields that the image description returns.
func imagesDescriptionAttributes(d *schema.ResourceData, images []ecs.ImageType, meta interface{}) error {
var ids []string
var s []map[string]interface{}
for _, image := range images {
mapping := map[string]interface{}{
"id": image.ImageId,
"architecture": image.Architecture,
"creation_time": image.CreationTime.String(),
"description": image.Description,
"image_id": image.ImageId,
"image_owner_alias": image.ImageOwnerAlias,
"os_name": image.OSName,
"os_type": image.OSType,
"name": image.ImageName,
"platform": image.Platform,
"status": image.Status,
"state": image.Status,
"size": image.Size,
"is_self_shared": image.IsSelfShared,
"is_subscribed": image.IsSubscribed,
"is_copied": image.IsCopied,
"is_support_io_optimized": image.IsSupportIoOptimized,
"image_version": image.ImageVersion,
"progress": image.Progress,
"usage": image.Usage,
"product_code": image.ProductCode,
// Complex types get their own functions
"disk_device_mappings": imageDiskDeviceMappings(image.DiskDeviceMappings.DiskDeviceMapping),
"tags": imageTagsMappings(d, image.ImageId, meta),
}
log.Printf("[DEBUG] alicloud_image - adding image mapping: %v", mapping)
ids = append(ids, image.ImageId)
s = append(s, mapping)
}
d.SetId(dataResourceIdHash(ids))
if err := d.Set("images", s); err != nil {
return err
}
return nil
}
//Find most recent image
type imageSort []ecs.ImageType
func (a imageSort) Len() int {
return len(a)
}
func (a imageSort) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (a imageSort) Less(i, j int) bool {
itime, _ := time.Parse(time.RFC3339, a[i].CreationTime.String())
jtime, _ := time.Parse(time.RFC3339, a[j].CreationTime.String())
return itime.Unix() < jtime.Unix()
}
// Returns the most recent Image out of a slice of images.
func mostRecentImage(images []ecs.ImageType) ecs.ImageType {
sortedImages := images
sort.Sort(imageSort(sortedImages))
return sortedImages[len(sortedImages)-1]
}
// Returns a set of disk device mappings.
func imageDiskDeviceMappings(m []ecs.DiskDeviceMapping) []map[string]interface{} {
var s []map[string]interface{}
for _, v := range m {
mapping := map[string]interface{}{
"device": v.Device,
"size": v.Size,
"snapshot_id": v.SnapshotId,
}
log.Printf("[DEBUG] alicloud_image - adding disk device mapping: %v", mapping)
s = append(s, mapping)
}
return s
}
//Returns a mapping of image tags
func imageTagsMappings(d *schema.ResourceData, imageId string, meta interface{}) map[string]string {
client := meta.(*AliyunClient)
conn := client.ecsconn
tags, _, err := conn.DescribeTags(&ecs.DescribeTagsArgs{
RegionId: getRegion(d, meta),
ResourceType: ecs.TagResourceImage,
ResourceId: imageId,
})
if err != nil {
log.Printf("[ERROR] DescribeTags for image got error: %#v", err)
return nil
}
log.Printf("[DEBUG] DescribeTags for image : %v", tags)
return tagsToMap(tags)
}

View File

@ -0,0 +1,155 @@
package alicloud
import (
"regexp"
"testing"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccAlicloudImagesDataSource_images(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccCheckAlicloudImagesDataSourceImagesConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAlicloudDataSourceID("data.alicloud_images.multi_image"),
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.#", "2"),
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.architecture", "x86_64"),
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.disk_device_mappings.#", "0"),
resource.TestMatchResourceAttr("data.alicloud_images.multi_image", "images.0.creation_time", regexp.MustCompile("^20[0-9]{2}-")),
resource.TestMatchResourceAttr("data.alicloud_images.multi_image", "images.0.image_id", regexp.MustCompile("^centos_6\\w{1,5}[64]{1}.")),
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.image_owner_alias", "system"),
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.os_type", "linux"),
resource.TestMatchResourceAttr("data.alicloud_images.multi_image", "images.0.name", regexp.MustCompile("^centos_6[a-zA-Z0-9_]{1,5}[64]{1}.")),
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.progress", "100%"),
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.state", "Available"),
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.status", "Available"),
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.usage", "instance"),
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.0.tags.%", "0"),
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.architecture", "i386"),
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.disk_device_mappings.#", "0"),
resource.TestMatchResourceAttr("data.alicloud_images.multi_image", "images.1.creation_time", regexp.MustCompile("^20[0-9]{2}-")),
resource.TestMatchResourceAttr("data.alicloud_images.multi_image", "images.1.image_id", regexp.MustCompile("^centos_6[a-zA-Z0-9_]{1,5}[32]{1}.")),
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.image_owner_alias", "system"),
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.os_type", "linux"),
resource.TestMatchResourceAttr("data.alicloud_images.multi_image", "images.1.name", regexp.MustCompile("^centos_6\\w{1,5}[32]{1}.")),
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.progress", "100%"),
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.state", "Available"),
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.status", "Available"),
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.usage", "instance"),
resource.TestCheckResourceAttr("data.alicloud_images.multi_image", "images.1.tags.%", "0"),
),
},
},
})
}
func TestAccAlicloudImagesDataSource_owners(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccCheckAlicloudImagesDataSourceOwnersConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAlicloudDataSourceID("data.alicloud_images.owners_filtered_image"),
),
},
},
})
}
func TestAccAlicloudImagesDataSource_ownersEmpty(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccCheckAlicloudImagesDataSourceEmptyOwnersConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAlicloudDataSourceID("data.alicloud_images.empty_owners_filtered_image"),
resource.TestCheckResourceAttr("data.alicloud_images.empty_owners_filtered_image", "most_recent", "true"),
),
},
},
})
}
func TestAccAlicloudImagesDataSource_nameRegexFilter(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccCheckAlicloudImagesDataSourceNameRegexConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAlicloudDataSourceID("data.alicloud_images.name_regex_filtered_image"),
resource.TestMatchResourceAttr("data.alicloud_images.name_regex_filtered_image", "images.0.image_id", regexp.MustCompile("^centos_")),
),
},
},
})
}
func TestAccAlicloudImagesDataSource_imageNotInFirstPage(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccCheckAlicloudImagesDataSourceImageNotInFirstPageConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAlicloudDataSourceID("data.alicloud_images.name_regex_filtered_image"),
resource.TestMatchResourceAttr("data.alicloud_images.name_regex_filtered_image", "images.0.image_id", regexp.MustCompile("^ubuntu_14")),
),
},
},
})
}
// Instance store test - using centos images
const testAccCheckAlicloudImagesDataSourceImagesConfig = `
data "alicloud_images" "multi_image" {
owners = "system"
name_regex = "^centos_6"
}
`
// Testing owner parameter
const testAccCheckAlicloudImagesDataSourceOwnersConfig = `
data "alicloud_images" "owners_filtered_image" {
most_recent = true
owners = "system"
}
`
const testAccCheckAlicloudImagesDataSourceEmptyOwnersConfig = `
data "alicloud_images" "empty_owners_filtered_image" {
most_recent = true
owners = ""
}
`
// Testing name_regex parameter
const testAccCheckAlicloudImagesDataSourceNameRegexConfig = `
data "alicloud_images" "name_regex_filtered_image" {
most_recent = true
owners = "system"
name_regex = "^centos_6\\w{1,5}[64]{1}.*"
}
`
// Testing image not in first page response
const testAccCheckAlicloudImagesDataSourceImageNotInFirstPageConfig = `
data "alicloud_images" "name_regex_filtered_image" {
most_recent = true
owners = "system"
name_regex = "^ubuntu_14.*_64"
}
`

View File

@ -0,0 +1,127 @@
package alicloud
import (
"fmt"
"github.com/denverdino/aliyungo/ecs"
"github.com/hashicorp/terraform/helper/schema"
"log"
)
func dataSourceAlicloudInstanceTypes() *schema.Resource {
return &schema.Resource{
Read: dataSourceAlicloudInstanceTypesRead,
Schema: map[string]*schema.Schema{
"instance_type_family": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"cpu_core_count": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
},
"memory_size": {
Type: schema.TypeFloat,
Optional: true,
ForceNew: true,
},
// Computed values.
"instance_types": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"id": {
Type: schema.TypeString,
Computed: true,
},
"cpu_core_count": {
Type: schema.TypeInt,
Computed: true,
},
"memory_size": {
Type: schema.TypeFloat,
Computed: true,
},
"family": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
},
}
}
func dataSourceAlicloudInstanceTypesRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AliyunClient).ecsconn
cpu, _ := d.Get("cpu_core_count").(int)
mem, _ := d.Get("memory_size").(float64)
args, err := buildAliyunAlicloudInstanceTypesArgs(d, meta)
if err != nil {
return err
}
resp, err := conn.DescribeInstanceTypesNew(args)
if err != nil {
return err
}
var instanceTypes []ecs.InstanceTypeItemType
for _, types := range resp {
if cpu > 0 && types.CpuCoreCount != cpu {
continue
}
if mem > 0 && types.MemorySize != mem {
continue
}
instanceTypes = append(instanceTypes, types)
}
if len(instanceTypes) < 1 {
return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.")
}
log.Printf("[DEBUG] alicloud_instance_type - Types found: %#v", instanceTypes)
return instanceTypesDescriptionAttributes(d, instanceTypes)
}
func instanceTypesDescriptionAttributes(d *schema.ResourceData, types []ecs.InstanceTypeItemType) error {
var ids []string
var s []map[string]interface{}
for _, t := range types {
mapping := map[string]interface{}{
"id": t.InstanceTypeId,
"cpu_core_count": t.CpuCoreCount,
"memory_size": t.MemorySize,
"family": t.InstanceTypeFamily,
}
log.Printf("[DEBUG] alicloud_instance_type - adding type mapping: %v", mapping)
ids = append(ids, t.InstanceTypeId)
s = append(s, mapping)
}
d.SetId(dataResourceIdHash(ids))
if err := d.Set("instance_types", s); err != nil {
return err
}
return nil
}
func buildAliyunAlicloudInstanceTypesArgs(d *schema.ResourceData, meta interface{}) (*ecs.DescribeInstanceTypesArgs, error) {
args := &ecs.DescribeInstanceTypesArgs{}
if v := d.Get("instance_type_family").(string); v != "" {
args.InstanceTypeFamily = v
}
return args, nil
}

View File

@ -0,0 +1,54 @@
package alicloud
import (
"github.com/hashicorp/terraform/helper/resource"
"testing"
)
func TestAccAlicloudInstanceTypesDataSource_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccCheckAlicloudInstanceTypesDataSourceBasicConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAlicloudDataSourceID("data.alicloud_instance_types.4c8g"),
resource.TestCheckResourceAttr("data.alicloud_instance_types.4c8g", "instance_types.0.cpu_core_count", "4"),
resource.TestCheckResourceAttr("data.alicloud_instance_types.4c8g", "instance_types.0.memory_size", "8"),
resource.TestCheckResourceAttr("data.alicloud_instance_types.4c8g", "instance_types.0.id", "ecs.s3.large"),
),
},
resource.TestStep{
Config: testAccCheckAlicloudInstanceTypesDataSourceBasicConfigUpdate,
Check: resource.ComposeTestCheckFunc(
testAccCheckAlicloudDataSourceID("data.alicloud_instance_types.4c8g"),
resource.TestCheckResourceAttr("data.alicloud_instance_types.4c8g", "instance_types.#", "1"),
resource.TestCheckResourceAttr("data.alicloud_instance_types.4c8g", "instance_types.0.cpu_core_count", "4"),
resource.TestCheckResourceAttr("data.alicloud_instance_types.4c8g", "instance_types.0.memory_size", "8"),
),
},
},
})
}
const testAccCheckAlicloudInstanceTypesDataSourceBasicConfig = `
data "alicloud_instance_types" "4c8g" {
cpu_core_count = 4
memory_size = 8
}
`
const testAccCheckAlicloudInstanceTypesDataSourceBasicConfigUpdate = `
data "alicloud_instance_types" "4c8g" {
instance_type_family= "ecs.s3"
cpu_core_count = 4
memory_size = 8
}
`

View File

@ -0,0 +1,114 @@
package alicloud
import (
"fmt"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/hashicorp/terraform/helper/schema"
"log"
)
func dataSourceAlicloudRegions() *schema.Resource {
return &schema.Resource{
Read: dataSourceAlicloudRegionsRead,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"current": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
//Computed value
"regions": &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"id": {
Type: schema.TypeString,
Computed: true,
},
"region_id": {
Type: schema.TypeString,
Computed: true,
},
"local_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
},
}
}
func dataSourceAlicloudRegionsRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AliyunClient).ecsconn
currentRegion := getRegion(d, meta)
resp, err := conn.DescribeRegions()
if err != nil {
return err
}
if resp == nil || len(resp) == 0 {
return fmt.Errorf("no matching regions found")
}
name, nameOk := d.GetOk("name")
current := d.Get("current").(bool)
var filterRegions []ecs.RegionType
for _, region := range resp {
if current {
if nameOk && common.Region(name.(string)) != currentRegion {
return fmt.Errorf("name doesn't match current region: %#v, please input again.", currentRegion)
}
if region.RegionId == currentRegion {
filterRegions = append(filterRegions, region)
break
}
continue
}
if nameOk {
if common.Region(name.(string)) == region.RegionId {
filterRegions = append(filterRegions, region)
break
}
continue
}
filterRegions = append(filterRegions, region)
}
if len(filterRegions) < 1 {
return fmt.Errorf("Your query region returned no results. Please change your search criteria and try again.")
}
return regionsDescriptionAttributes(d, filterRegions)
}
func regionsDescriptionAttributes(d *schema.ResourceData, regions []ecs.RegionType) error {
var ids []string
var s []map[string]interface{}
for _, region := range regions {
mapping := map[string]interface{}{
"id": region.RegionId,
"region_id": region.RegionId,
"local_name": region.LocalName,
}
log.Printf("[DEBUG] alicloud_regions - adding region mapping: %v", mapping)
ids = append(ids, string(region.RegionId))
s = append(s, mapping)
}
d.SetId(dataResourceIdHash(ids))
if err := d.Set("regions", s); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,109 @@
package alicloud
import (
"testing"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccAlicloudRegionsDataSource_regions(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccCheckAlicloudRegionsDataSourceRegionsConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAlicloudDataSourceID("data.alicloud_regions.region"),
resource.TestCheckResourceAttr("data.alicloud_regions.region", "name", "cn-beijing"),
resource.TestCheckResourceAttr("data.alicloud_regions.region", "current", "true"),
resource.TestCheckResourceAttr("data.alicloud_regions.region", "regions.#", "1"),
resource.TestCheckResourceAttr("data.alicloud_regions.region", "regions.0.id", "cn-beijing"),
resource.TestCheckResourceAttr("data.alicloud_regions.region", "regions.0.region_id", "cn-beijing"),
resource.TestCheckResourceAttr("data.alicloud_regions.region", "regions.0.local_name", "华北 2"),
),
},
},
})
}
func TestAccAlicloudRegionsDataSource_name(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccCheckAlicloudRegionsDataSourceNameConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAlicloudDataSourceID("data.alicloud_regions.name_filtered_region"),
resource.TestCheckResourceAttr("data.alicloud_regions.name_filtered_region", "name", "cn-hangzhou")),
},
},
})
}
func TestAccAlicloudRegionsDataSource_current(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccCheckAlicloudRegionsDataSourceCurrentConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAlicloudDataSourceID("data.alicloud_regions.current_filtered_region"),
resource.TestCheckResourceAttr("data.alicloud_regions.current_filtered_region", "current", "true"),
),
},
},
})
}
func TestAccAlicloudRegionsDataSource_empty(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccCheckAlicloudRegionsDataSourceEmptyConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAlicloudDataSourceID("data.alicloud_regions.empty_params_region"),
resource.TestCheckResourceAttr("data.alicloud_regions.empty_params_region", "regions.0.id", "cn-shenzhen"),
resource.TestCheckResourceAttr("data.alicloud_regions.empty_params_region", "regions.0.region_id", "cn-shenzhen"),
resource.TestCheckResourceAttr("data.alicloud_regions.empty_params_region", "regions.0.local_name", "华南 1"),
),
},
},
})
}
// Instance store test - using centos regions
const testAccCheckAlicloudRegionsDataSourceRegionsConfig = `
data "alicloud_regions" "region" {
name = "cn-beijing"
current = true
}
`
// Testing name parameter
const testAccCheckAlicloudRegionsDataSourceNameConfig = `
data "alicloud_regions" "name_filtered_region" {
name = "cn-hangzhou"
}
`
// Testing current parameter
const testAccCheckAlicloudRegionsDataSourceCurrentConfig = `
data "alicloud_regions" "current_filtered_region" {
current = true
}
`
// Testing empty parmas
const testAccCheckAlicloudRegionsDataSourceEmptyConfig = `
data "alicloud_regions" "empty_params_region" {
}
`

View File

@ -0,0 +1,137 @@
package alicloud
import (
"fmt"
"github.com/denverdino/aliyungo/ecs"
"github.com/hashicorp/terraform/helper/schema"
"log"
"reflect"
)
func dataSourceAlicloudZones() *schema.Resource {
return &schema.Resource{
Read: dataSourceAlicloudZonesRead,
Schema: map[string]*schema.Schema{
"available_instance_type": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"available_resource_creation": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"available_disk_category": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
// Computed values.
"zones": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"id": {
Type: schema.TypeString,
Computed: true,
},
"local_name": {
Type: schema.TypeString,
Computed: true,
},
"available_instance_types": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"available_resource_creation": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"available_disk_categories": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
},
},
},
}
}
func dataSourceAlicloudZonesRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AliyunClient).ecsconn
insType, _ := d.Get("available_instance_type").(string)
resType, _ := d.Get("available_resource_creation").(string)
diskType, _ := d.Get("available_disk_category").(string)
resp, err := conn.DescribeZones(getRegion(d, meta))
if err != nil {
return err
}
var zoneTypes []ecs.ZoneType
for _, types := range resp {
if insType != "" && !constraints(types.AvailableInstanceTypes.InstanceTypes, insType) {
continue
}
if resType != "" && !constraints(types.AvailableResourceCreation.ResourceTypes, resType) {
continue
}
if diskType != "" && !constraints(types.AvailableDiskCategories.DiskCategories, diskType) {
continue
}
zoneTypes = append(zoneTypes, types)
}
if len(zoneTypes) < 1 {
return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.")
}
log.Printf("[DEBUG] alicloud_zones - Zones found: %#v", zoneTypes)
return zonesDescriptionAttributes(d, zoneTypes)
}
// check array constraints str
func constraints(arr interface{}, v string) bool {
arrs := reflect.ValueOf(arr)
len := arrs.Len()
for i := 0; i < len; i++ {
if arrs.Index(i).String() == v {
return true
}
}
return false
}
func zonesDescriptionAttributes(d *schema.ResourceData, types []ecs.ZoneType) error {
var ids []string
var s []map[string]interface{}
for _, t := range types {
mapping := map[string]interface{}{
"id": t.ZoneId,
"local_name": t.LocalName,
"available_instance_types": t.AvailableInstanceTypes.InstanceTypes,
"available_resource_creation": t.AvailableResourceCreation.ResourceTypes,
"available_disk_categories": t.AvailableDiskCategories.DiskCategories,
}
log.Printf("[DEBUG] alicloud_zones - adding zone mapping: %v", mapping)
ids = append(ids, t.ZoneId)
s = append(s, mapping)
}
d.SetId(dataResourceIdHash(ids))
if err := d.Set("zones", s); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,132 @@
package alicloud
import (
"fmt"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"strconv"
"testing"
)
func TestAccAlicloudZonesDataSource_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccCheckAlicloudZonesDataSourceBasicConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAlicloudDataSourceID("data.alicloud_zones.foo"),
),
},
},
})
}
func TestAccAlicloudZonesDataSource_filter(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccCheckAlicloudZonesDataSourceFilter,
Check: resource.ComposeTestCheckFunc(
testAccCheckAlicloudDataSourceID("data.alicloud_zones.foo"),
testCheckZoneLength("data.alicloud_zones.foo"),
),
},
resource.TestStep{
Config: testAccCheckAlicloudZonesDataSourceFilterIoOptimized,
Check: resource.ComposeTestCheckFunc(
testAccCheckAlicloudDataSourceID("data.alicloud_zones.foo"),
testCheckZoneLength("data.alicloud_zones.foo"),
),
},
},
})
}
func TestAccAlicloudZonesDataSource_unitRegion(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccCheckAlicloudZonesDataSource_unitRegion,
Check: resource.ComposeTestCheckFunc(
testAccCheckAlicloudDataSourceID("data.alicloud_zones.foo"),
),
},
},
})
}
// the zone length changed occasionally
// check by range to avoid test case failure
func testCheckZoneLength(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
ms := s.RootModule()
rs, ok := ms.Resources[name]
if !ok {
return fmt.Errorf("Not found: %s", name)
}
is := rs.Primary
if is == nil {
return fmt.Errorf("No primary instance: %s", name)
}
i, err := strconv.Atoi(is.Attributes["zones.#"])
if err != nil {
return fmt.Errorf("convert zone length err: %#v", err)
}
if i <= 0 {
return fmt.Errorf("zone length expected greater than 0 got err: %d", i)
}
return nil
}
}
const testAccCheckAlicloudZonesDataSourceBasicConfig = `
data "alicloud_zones" "foo" {
}
`
const testAccCheckAlicloudZonesDataSourceFilter = `
data "alicloud_zones" "foo" {
available_instance_type= "ecs.c2.xlarge"
available_resource_creation= "VSwitch"
available_disk_category= "cloud_efficiency"
}
`
const testAccCheckAlicloudZonesDataSourceFilterIoOptimized = `
data "alicloud_zones" "foo" {
available_instance_type= "ecs.c2.xlarge"
available_resource_creation= "IoOptimized"
available_disk_category= "cloud"
}
`
const testAccCheckAlicloudZonesDataSource_unitRegion = `
provider "alicloud" {
alias = "northeast"
region = "ap-northeast-1"
}
data "alicloud_zones" "foo" {
provider = "alicloud.northeast"
available_resource_creation= "VSwitch"
}
`

View File

@ -0,0 +1,49 @@
package alicloud
import "github.com/denverdino/aliyungo/common"
const (
// common
Notfound = "Not found"
// ecs
InstanceNotfound = "Instance.Notfound"
// disk
DiskIncorrectStatus = "IncorrectDiskStatus"
DiskCreatingSnapshot = "DiskCreatingSnapshot"
InstanceLockedForSecurity = "InstanceLockedForSecurity"
SystemDiskNotFound = "SystemDiskNotFound"
// eip
EipIncorrectStatus = "IncorrectEipStatus"
InstanceIncorrectStatus = "IncorrectInstanceStatus"
HaVipIncorrectStatus = "IncorrectHaVipStatus"
// slb
LoadBalancerNotFound = "InvalidLoadBalancerId.NotFound"
// security_group
InvalidInstanceIdAlreadyExists = "InvalidInstanceId.AlreadyExists"
InvalidSecurityGroupIdNotFound = "InvalidSecurityGroupId.NotFound"
SgDependencyViolation = "DependencyViolation"
//Nat gateway
NatGatewayInvalidRegionId = "Invalid.RegionId"
DependencyViolationBandwidthPackages = "DependencyViolation.BandwidthPackages"
NotFindSnatEntryBySnatId = "NotFindSnatEntryBySnatId"
NotFindForwardEntryByForwardId = "NotFindForwardEntryByForwardId"
// vswitch
VswitcInvalidRegionId = "InvalidRegionId.NotFound"
// ess
InvalidScalingGroupIdNotFound = "InvalidScalingGroupId.NotFound"
IncorrectScalingConfigurationLifecycleState = "IncorrectScalingConfigurationLifecycleState"
)
func GetNotFoundErrorFromString(str string) error {
return &common.Error{
ErrorResponse: common.ErrorResponse{
Code: InstanceNotfound,
Message: str,
},
StatusCode: -1,
}
}

View File

@ -0,0 +1,37 @@
package alicloud
type GroupRuleDirection string
const (
GroupRuleIngress = GroupRuleDirection("ingress")
GroupRuleEgress = GroupRuleDirection("egress")
)
type GroupRuleIpProtocol string
const (
GroupRuleTcp = GroupRuleIpProtocol("tcp")
GroupRuleUdp = GroupRuleIpProtocol("udp")
GroupRuleIcmp = GroupRuleIpProtocol("icmp")
GroupRuleGre = GroupRuleIpProtocol("gre")
GroupRuleAll = GroupRuleIpProtocol("all")
)
type GroupRuleNicType string
const (
GroupRuleInternet = GroupRuleNicType("internet")
GroupRuleIntranet = GroupRuleNicType("intranet")
)
type GroupRulePolicy string
const (
GroupRulePolicyAccept = GroupRulePolicy("accept")
GroupRulePolicyDrop = GroupRulePolicy("drop")
)
const (
EcsApiVersion20160314 = "2016-03-14"
EcsApiVersion20140526 = "2014-05-26"
)

View File

@ -0,0 +1,164 @@
package alicloud
import (
"fmt"
"strings"
"github.com/denverdino/aliyungo/slb"
)
type Listener struct {
slb.HTTPListenerType
InstancePort int
LoadBalancerPort int
Protocol string
//tcp & udp
PersistenceTimeout int
//https
SSLCertificateId string
//tcp
HealthCheckType slb.HealthCheckType
//api interface: http & https is HealthCheckTimeout, tcp & udp is HealthCheckConnectTimeout
HealthCheckConnectTimeout int
}
type ListenerErr struct {
ErrType string
Err error
}
func (e *ListenerErr) Error() string {
return e.ErrType + " " + e.Err.Error()
}
const (
HealthCheckErrType = "healthCheckErrType"
StickySessionErrType = "stickySessionErrType"
CookieTimeOutErrType = "cookieTimeoutErrType"
CookieErrType = "cookieErrType"
)
// Takes the result of flatmap.Expand for an array of listeners and
// returns ELB API compatible objects
func expandListeners(configured []interface{}) ([]*Listener, error) {
listeners := make([]*Listener, 0, len(configured))
// Loop over our configured listeners and create
// an array of aws-sdk-go compatabile objects
for _, lRaw := range configured {
data := lRaw.(map[string]interface{})
ip := data["instance_port"].(int)
lp := data["lb_port"].(int)
l := &Listener{
InstancePort: ip,
LoadBalancerPort: lp,
Protocol: data["lb_protocol"].(string),
}
l.Bandwidth = data["bandwidth"].(int)
if v, ok := data["scheduler"]; ok {
l.Scheduler = slb.SchedulerType(v.(string))
}
if v, ok := data["ssl_certificate_id"]; ok {
l.SSLCertificateId = v.(string)
}
if v, ok := data["sticky_session"]; ok {
l.StickySession = slb.FlagType(v.(string))
}
if v, ok := data["sticky_session_type"]; ok {
l.StickySessionType = slb.StickySessionType(v.(string))
}
if v, ok := data["cookie_timeout"]; ok {
l.CookieTimeout = v.(int)
}
if v, ok := data["cookie"]; ok {
l.Cookie = v.(string)
}
if v, ok := data["persistence_timeout"]; ok {
l.PersistenceTimeout = v.(int)
}
if v, ok := data["health_check"]; ok {
l.HealthCheck = slb.FlagType(v.(string))
}
if v, ok := data["health_check_type"]; ok {
l.HealthCheckType = slb.HealthCheckType(v.(string))
}
if v, ok := data["health_check_domain"]; ok {
l.HealthCheckDomain = v.(string)
}
if v, ok := data["health_check_uri"]; ok {
l.HealthCheckURI = v.(string)
}
if v, ok := data["health_check_connect_port"]; ok {
l.HealthCheckConnectPort = v.(int)
}
if v, ok := data["healthy_threshold"]; ok {
l.HealthyThreshold = v.(int)
}
if v, ok := data["unhealthy_threshold"]; ok {
l.UnhealthyThreshold = v.(int)
}
if v, ok := data["health_check_timeout"]; ok {
l.HealthCheckTimeout = v.(int)
}
if v, ok := data["health_check_interval"]; ok {
l.HealthCheckInterval = v.(int)
}
if v, ok := data["health_check_http_code"]; ok {
l.HealthCheckHttpCode = slb.HealthCheckHttpCodeType(v.(string))
}
var valid bool
if l.SSLCertificateId != "" {
// validate the protocol is correct
for _, p := range []string{"https", "ssl"} {
if strings.ToLower(l.Protocol) == p {
valid = true
}
}
} else {
valid = true
}
if valid {
listeners = append(listeners, l)
} else {
return nil, fmt.Errorf("[ERR] SLB Listener: ssl_certificate_id may be set only when protocol is 'https' or 'ssl'")
}
}
return listeners, nil
}
func expandBackendServers(list []interface{}) []slb.BackendServerType {
result := make([]slb.BackendServerType, 0, len(list))
for _, i := range list {
if i.(string) != "" {
result = append(result, slb.BackendServerType{ServerId: i.(string), Weight: 100})
}
}
return result
}

View File

@ -0,0 +1,43 @@
package alicloud
import (
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
)
type Tag struct {
Key string
Value string
}
type AddTagsArgs struct {
ResourceId string
ResourceType ecs.TagResourceType //image, instance, snapshot or disk
RegionId common.Region
Tag []Tag
}
type RemoveTagsArgs struct {
ResourceId string
ResourceType ecs.TagResourceType //image, instance, snapshot or disk
RegionId common.Region
Tag []Tag
}
func AddTags(client *ecs.Client, args *AddTagsArgs) error {
response := ecs.AddTagsResponse{}
err := client.Invoke("AddTags", args, &response)
if err != nil {
return err
}
return err
}
func RemoveTags(client *ecs.Client, args *RemoveTagsArgs) error {
response := ecs.RemoveTagsResponse{}
err := client.Invoke("RemoveTags", args, &response)
if err != nil {
return err
}
return err
}

View File

@ -0,0 +1,95 @@
package alicloud
import (
"github.com/denverdino/aliyungo/common"
"github.com/hashicorp/terraform/helper/mutexkv"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
)
// Provider returns a schema.Provider for alicloud
func Provider() terraform.ResourceProvider {
return &schema.Provider{
Schema: map[string]*schema.Schema{
"access_key": &schema.Schema{
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ACCESS_KEY", nil),
Description: descriptions["access_key"],
},
"secret_key": &schema.Schema{
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECRET_KEY", nil),
Description: descriptions["secret_key"],
},
"region": &schema.Schema{
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_REGION", DEFAULT_REGION),
Description: descriptions["region"],
},
},
DataSourcesMap: map[string]*schema.Resource{
"alicloud_images": dataSourceAlicloudImages(),
"alicloud_regions": dataSourceAlicloudRegions(),
"alicloud_zones": dataSourceAlicloudZones(),
"alicloud_instance_types": dataSourceAlicloudInstanceTypes(),
},
ResourcesMap: map[string]*schema.Resource{
"alicloud_instance": resourceAliyunInstance(),
"alicloud_disk": resourceAliyunDisk(),
"alicloud_disk_attachment": resourceAliyunDiskAttachment(),
"alicloud_security_group": resourceAliyunSecurityGroup(),
"alicloud_security_group_rule": resourceAliyunSecurityGroupRule(),
"alicloud_db_instance": resourceAlicloudDBInstance(),
"alicloud_ess_scaling_group": resourceAlicloudEssScalingGroup(),
"alicloud_ess_scaling_configuration": resourceAlicloudEssScalingConfiguration(),
"alicloud_ess_scaling_rule": resourceAlicloudEssScalingRule(),
"alicloud_ess_schedule": resourceAlicloudEssSchedule(),
"alicloud_vpc": resourceAliyunVpc(),
"alicloud_nat_gateway": resourceAliyunNatGateway(),
//both subnet and vswith exists,cause compatible old version, and compatible aws habit.
"alicloud_subnet": resourceAliyunSubnet(),
"alicloud_vswitch": resourceAliyunSubnet(),
"alicloud_route_entry": resourceAliyunRouteEntry(),
"alicloud_snat_entry": resourceAliyunSnatEntry(),
"alicloud_forward_entry": resourceAliyunForwardEntry(),
"alicloud_eip": resourceAliyunEip(),
"alicloud_eip_association": resourceAliyunEipAssociation(),
"alicloud_slb": resourceAliyunSlb(),
"alicloud_slb_attachment": resourceAliyunSlbAttachment(),
},
ConfigureFunc: providerConfigure,
}
}
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
config := Config{
AccessKey: d.Get("access_key").(string),
SecretKey: d.Get("secret_key").(string),
Region: common.Region(d.Get("region").(string)),
}
client, err := config.Client()
if err != nil {
return nil, err
}
return client, nil
}
// This is a global MutexKV for use within this plugin.
var alicloudMutexKV = mutexkv.NewMutexKV()
var descriptions map[string]string
func init() {
descriptions = map[string]string{
"access_key": "Access key of alicloud",
"secret_key": "Secret key of alicloud",
"region": "Region of alicloud",
}
}

View File

@ -0,0 +1,59 @@
package alicloud
import (
"log"
"os"
"testing"
"fmt"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
)
var testAccProviders map[string]terraform.ResourceProvider
var testAccProvider *schema.Provider
func init() {
testAccProvider = Provider().(*schema.Provider)
testAccProviders = map[string]terraform.ResourceProvider{
"alicloud": testAccProvider,
}
}
func TestProvider(t *testing.T) {
if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestProvider_impl(t *testing.T) {
var _ terraform.ResourceProvider = Provider()
}
func testAccPreCheck(t *testing.T) {
if v := os.Getenv("ALICLOUD_ACCESS_KEY"); v == "" {
t.Fatal("ALICLOUD_ACCESS_KEY must be set for acceptance tests")
}
if v := os.Getenv("ALICLOUD_SECRET_KEY"); v == "" {
t.Fatal("ALICLOUD_SECRET_KEY must be set for acceptance tests")
}
if v := os.Getenv("ALICLOUD_REGION"); v == "" {
log.Println("[INFO] Test: Using cn-beijing as test region")
os.Setenv("ALICLOUD_REGION", "cn-beijing")
}
}
func testAccCheckAlicloudDataSourceID(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Can't find data source: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("data source ID not set")
}
return nil
}
}

View File

@ -0,0 +1,550 @@
package alicloud
import (
"bytes"
"encoding/json"
"fmt"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/rds"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"log"
"strconv"
"strings"
"time"
)
func resourceAlicloudDBInstance() *schema.Resource {
return &schema.Resource{
Create: resourceAlicloudDBInstanceCreate,
Read: resourceAlicloudDBInstanceRead,
Update: resourceAlicloudDBInstanceUpdate,
Delete: resourceAlicloudDBInstanceDelete,
Schema: map[string]*schema.Schema{
"engine": &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validateAllowedStringValue([]string{"MySQL", "SQLServer", "PostgreSQL", "PPAS"}),
ForceNew: true,
Required: true,
},
"engine_version": &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validateAllowedStringValue([]string{"5.5", "5.6", "5.7", "2008r2", "2012", "9.4", "9.3"}),
ForceNew: true,
Required: true,
},
"db_instance_class": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"db_instance_storage": &schema.Schema{
Type: schema.TypeInt,
Required: true,
},
"instance_charge_type": &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validateAllowedStringValue([]string{string(rds.Postpaid), string(rds.Prepaid)}),
Optional: true,
ForceNew: true,
Default: rds.Postpaid,
},
"period": &schema.Schema{
Type: schema.TypeInt,
ValidateFunc: validateAllowedIntValue([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 24, 36}),
Optional: true,
ForceNew: true,
Default: 1,
},
"zone_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"multi_az": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"db_instance_net_type": &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validateAllowedStringValue([]string{string(common.Internet), string(common.Intranet)}),
Optional: true,
},
"allocate_public_connection": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"instance_network_type": &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validateAllowedStringValue([]string{string(common.VPC), string(common.Classic)}),
Optional: true,
Computed: true,
},
"vswitch_id": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
Optional: true,
},
"master_user_name": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
Optional: true,
},
"master_user_password": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
Optional: true,
Sensitive: true,
},
"preferred_backup_period": &schema.Schema{
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
// terraform does not support ValidateFunc of TypeList attr
// ValidateFunc: validateAllowedStringValue([]string{"Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"}),
Optional: true,
},
"preferred_backup_time": &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validateAllowedStringValue(rds.BACKUP_TIME),
Optional: true,
},
"backup_retention_period": &schema.Schema{
Type: schema.TypeInt,
ValidateFunc: validateIntegerInRange(7, 730),
Optional: true,
},
"security_ips": &schema.Schema{
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeString},
Computed: true,
Optional: true,
},
"port": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"connections": &schema.Schema{
Type: schema.TypeList,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"connection_string": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"ip_type": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"ip_address": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
},
},
Computed: true,
},
"db_mappings": &schema.Schema{
Type: schema.TypeSet,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"db_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"character_set_name": &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validateAllowedStringValue(rds.CHARACTER_SET_NAME),
Required: true,
},
"db_description": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
},
},
Optional: true,
Set: resourceAlicloudDatabaseHash,
},
},
}
}
func resourceAlicloudDatabaseHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-", m["db_name"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["character_set_name"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["db_description"].(string)))
return hashcode.String(buf.String())
}
func resourceAlicloudDBInstanceCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AliyunClient)
conn := client.rdsconn
args, err := buildDBCreateOrderArgs(d, meta)
if err != nil {
return err
}
resp, err := conn.CreateOrder(args)
if err != nil {
return fmt.Errorf("Error creating Alicloud db instance: %#v", err)
}
instanceId := resp.DBInstanceId
if instanceId == "" {
return fmt.Errorf("Error get Alicloud db instance id")
}
d.SetId(instanceId)
d.Set("instance_charge_type", d.Get("instance_charge_type"))
d.Set("period", d.Get("period"))
d.Set("period_type", d.Get("period_type"))
// wait instance status change from Creating to running
if err := conn.WaitForInstance(d.Id(), rds.Running, defaultLongTimeout); err != nil {
return fmt.Errorf("WaitForInstance %s got error: %#v", rds.Running, err)
}
if err := modifySecurityIps(d.Id(), d.Get("security_ips"), meta); err != nil {
return err
}
masterUserName := d.Get("master_user_name").(string)
masterUserPwd := d.Get("master_user_password").(string)
if masterUserName != "" && masterUserPwd != "" {
if err := client.CreateAccountByInfo(d.Id(), masterUserName, masterUserPwd); err != nil {
return fmt.Errorf("Create db account %s error: %v", masterUserName, err)
}
}
if d.Get("allocate_public_connection").(bool) {
if err := client.AllocateDBPublicConnection(d.Id(), DB_DEFAULT_CONNECT_PORT); err != nil {
return fmt.Errorf("Allocate public connection error: %v", err)
}
}
return resourceAlicloudDBInstanceUpdate(d, meta)
}
func modifySecurityIps(id string, ips interface{}, meta interface{}) error {
client := meta.(*AliyunClient)
ipList := expandStringList(ips.([]interface{}))
ipstr := strings.Join(ipList[:], COMMA_SEPARATED)
// default disable connect from outside
if ipstr == "" {
ipstr = LOCAL_HOST_IP
}
if err := client.ModifyDBSecurityIps(id, ipstr); err != nil {
return fmt.Errorf("Error modify security ips %s: %#v", ipstr, err)
}
return nil
}
func resourceAlicloudDBInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AliyunClient)
conn := client.rdsconn
d.Partial(true)
if d.HasChange("db_mappings") {
o, n := d.GetChange("db_mappings")
os := o.(*schema.Set)
ns := n.(*schema.Set)
var allDbs []string
remove := os.Difference(ns).List()
add := ns.Difference(os).List()
if len(remove) > 0 && len(add) > 0 {
return fmt.Errorf("Failure modify database, we neither support create and delete database simultaneous nor modify database attributes.")
}
if len(remove) > 0 {
for _, db := range remove {
dbm, _ := db.(map[string]interface{})
if err := conn.DeleteDatabase(d.Id(), dbm["db_name"].(string)); err != nil {
return fmt.Errorf("Failure delete database %s: %#v", dbm["db_name"].(string), err)
}
}
}
if len(add) > 0 {
for _, db := range add {
dbm, _ := db.(map[string]interface{})
dbName := dbm["db_name"].(string)
allDbs = append(allDbs, dbName)
if err := client.CreateDatabaseByInfo(d.Id(), dbName, dbm["character_set_name"].(string), dbm["db_description"].(string)); err != nil {
return fmt.Errorf("Failure create database %s: %#v", dbName, err)
}
}
}
if err := conn.WaitForAllDatabase(d.Id(), allDbs, rds.Running, 600); err != nil {
return fmt.Errorf("Failure create database %#v", err)
}
if user := d.Get("master_user_name").(string); user != "" {
for _, dbName := range allDbs {
if err := client.GrantDBPrivilege2Account(d.Id(), user, dbName); err != nil {
return fmt.Errorf("Failed to grant database %s readwrite privilege to account %s: %#v", dbName, user, err)
}
}
}
d.SetPartial("db_mappings")
}
if d.HasChange("preferred_backup_period") || d.HasChange("preferred_backup_time") || d.HasChange("backup_retention_period") {
period := d.Get("preferred_backup_period").([]interface{})
periodList := expandStringList(period)
time := d.Get("preferred_backup_time").(string)
retention := d.Get("backup_retention_period").(int)
if time == "" || retention == 0 || len(periodList) < 1 {
return fmt.Errorf("Both backup_time, backup_period and retention_period are required to set backup policy.")
}
ps := strings.Join(periodList[:], COMMA_SEPARATED)
if err := client.ConfigDBBackup(d.Id(), time, ps, retention); err != nil {
return fmt.Errorf("Error set backup policy: %#v", err)
}
d.SetPartial("preferred_backup_period")
d.SetPartial("preferred_backup_time")
d.SetPartial("backup_retention_period")
}
if d.HasChange("security_ips") {
if err := modifySecurityIps(d.Id(), d.Get("security_ips"), meta); err != nil {
return err
}
d.SetPartial("security_ips")
}
if d.HasChange("db_instance_class") || d.HasChange("db_instance_storage") {
co, cn := d.GetChange("db_instance_class")
so, sn := d.GetChange("db_instance_storage")
classOld := co.(string)
classNew := cn.(string)
storageOld := so.(int)
storageNew := sn.(int)
// update except the first time, because we will do it in create function
if classOld != "" && storageOld != 0 {
chargeType := d.Get("instance_charge_type").(string)
if chargeType == string(rds.Prepaid) {
return fmt.Errorf("Prepaid db instance does not support modify db_instance_class or db_instance_storage")
}
if err := client.ModifyDBClassStorage(d.Id(), classNew, strconv.Itoa(storageNew)); err != nil {
return fmt.Errorf("Error modify db instance class or storage error: %#v", err)
}
}
}
d.Partial(false)
return resourceAlicloudDBInstanceRead(d, meta)
}
func resourceAlicloudDBInstanceRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AliyunClient)
conn := client.rdsconn
instance, err := client.DescribeDBInstanceById(d.Id())
if err != nil {
if notFoundError(err) {
d.SetId("")
return nil
}
return fmt.Errorf("Error Describe DB InstanceAttribute: %#v", err)
}
args := rds.DescribeDatabasesArgs{
DBInstanceId: d.Id(),
}
resp, err := conn.DescribeDatabases(&args)
if err != nil {
return err
}
if resp.Databases.Database == nil {
d.SetId("")
return nil
}
d.Set("db_mappings", flattenDatabaseMappings(resp.Databases.Database))
argn := rds.DescribeDBInstanceNetInfoArgs{
DBInstanceId: d.Id(),
}
resn, err := conn.DescribeDBInstanceNetInfo(&argn)
if err != nil {
return err
}
d.Set("connections", flattenDBConnections(resn.DBInstanceNetInfos.DBInstanceNetInfo))
ips, err := client.GetSecurityIps(d.Id())
if err != nil {
log.Printf("Describe DB security ips error: %#v", err)
}
d.Set("security_ips", ips)
d.Set("engine", instance.Engine)
d.Set("engine_version", instance.EngineVersion)
d.Set("db_instance_class", instance.DBInstanceClass)
d.Set("port", instance.Port)
d.Set("db_instance_storage", instance.DBInstanceStorage)
d.Set("zone_id", instance.ZoneId)
d.Set("db_instance_net_type", instance.DBInstanceNetType)
d.Set("instance_network_type", instance.InstanceNetworkType)
return nil
}
func resourceAlicloudDBInstanceDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AliyunClient).rdsconn
return resource.Retry(5*time.Minute, func() *resource.RetryError {
err := conn.DeleteInstance(d.Id())
if err != nil {
return resource.RetryableError(fmt.Errorf("DB Instance in use - trying again while it is deleted."))
}
args := &rds.DescribeDBInstancesArgs{
DBInstanceId: d.Id(),
}
resp, err := conn.DescribeDBInstanceAttribute(args)
if err != nil {
return resource.NonRetryableError(err)
} else if len(resp.Items.DBInstanceAttribute) < 1 {
return nil
}
return resource.RetryableError(fmt.Errorf("DB in use - trying again while it is deleted."))
})
}
func buildDBCreateOrderArgs(d *schema.ResourceData, meta interface{}) (*rds.CreateOrderArgs, error) {
client := meta.(*AliyunClient)
args := &rds.CreateOrderArgs{
RegionId: getRegion(d, meta),
// we does not expose this param to user,
// because create prepaid instance progress will be stopped when set auto_pay to false,
// then could not get instance info, cause timeout error
AutoPay: "true",
EngineVersion: d.Get("engine_version").(string),
Engine: rds.Engine(d.Get("engine").(string)),
DBInstanceStorage: d.Get("db_instance_storage").(int),
DBInstanceClass: d.Get("db_instance_class").(string),
Quantity: DEFAULT_INSTANCE_COUNT,
Resource: rds.DefaultResource,
}
bussStr, err := json.Marshal(DefaultBusinessInfo)
if err != nil {
return nil, fmt.Errorf("Failed to translate bussiness info %#v from json to string", DefaultBusinessInfo)
}
args.BusinessInfo = string(bussStr)
zoneId := d.Get("zone_id").(string)
args.ZoneId = zoneId
multiAZ := d.Get("multi_az").(bool)
if multiAZ {
if zoneId != "" {
return nil, fmt.Errorf("You cannot set the ZoneId parameter when the MultiAZ parameter is set to true")
}
izs, err := client.DescribeMultiIZByRegion()
if err != nil {
return nil, fmt.Errorf("Get multiAZ id error")
}
if len(izs) < 1 {
return nil, fmt.Errorf("Current region does not support MultiAZ.")
}
args.ZoneId = izs[0]
}
vswitchId := d.Get("vswitch_id").(string)
networkType := d.Get("instance_network_type").(string)
args.InstanceNetworkType = common.NetworkType(networkType)
if vswitchId != "" {
args.VSwitchId = vswitchId
// check InstanceNetworkType with vswitchId
if networkType == string(common.Classic) {
return nil, fmt.Errorf("When fill vswitchId, you shold set instance_network_type to VPC")
} else if networkType == "" {
args.InstanceNetworkType = common.VPC
}
// get vpcId
vpcId, err := client.GetVpcIdByVSwitchId(vswitchId)
if err != nil {
return nil, fmt.Errorf("VswitchId %s is not valid of current region", vswitchId)
}
// fill vpcId by vswitchId
args.VPCId = vpcId
// check vswitchId in zone
vsw, err := client.QueryVswitchById(vpcId, vswitchId)
if err != nil {
return nil, fmt.Errorf("VswitchId %s is not valid of current region", vswitchId)
}
if zoneId == "" {
args.ZoneId = vsw.ZoneId
} else if vsw.ZoneId != zoneId {
return nil, fmt.Errorf("VswitchId %s is not belong to the zone %s", vswitchId, zoneId)
}
}
if v := d.Get("db_instance_net_type").(string); v != "" {
args.DBInstanceNetType = common.NetType(v)
}
chargeType := d.Get("instance_charge_type").(string)
if chargeType != "" {
args.PayType = rds.DBPayType(chargeType)
} else {
args.PayType = rds.Postpaid
}
// if charge type is postpaid, the commodity code must set to bards
if chargeType == string(rds.Postpaid) {
args.CommodityCode = rds.Bards
} else {
args.CommodityCode = rds.Rds
}
period := d.Get("period").(int)
args.UsedTime, args.TimeType = TransformPeriod2Time(period, chargeType)
return args, nil
}

View File

@ -0,0 +1,765 @@
package alicloud
import (
"fmt"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/rds"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"log"
"strings"
"testing"
)
func TestAccAlicloudDBInstance_basic(t *testing.T) {
var instance rds.DBInstanceAttribute
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
// module name
IDRefreshName: "alicloud_db_instance.foo",
Providers: testAccProviders,
CheckDestroy: testAccCheckDBInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDBInstanceConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckDBInstanceExists(
"alicloud_db_instance.foo", &instance),
resource.TestCheckResourceAttr(
"alicloud_db_instance.foo",
"port",
"3306"),
resource.TestCheckResourceAttr(
"alicloud_db_instance.foo",
"db_instance_storage",
"10"),
resource.TestCheckResourceAttr(
"alicloud_db_instance.foo",
"instance_network_type",
"Classic"),
resource.TestCheckResourceAttr(
"alicloud_db_instance.foo",
"db_instance_net_type",
"Intranet"),
resource.TestCheckResourceAttr(
"alicloud_db_instance.foo",
"engine_version",
"5.6"),
resource.TestCheckResourceAttr(
"alicloud_db_instance.foo",
"engine",
"MySQL"),
),
},
},
})
}
func TestAccAlicloudDBInstance_vpc(t *testing.T) {
var instance rds.DBInstanceAttribute
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
// module name
IDRefreshName: "alicloud_db_instance.foo",
Providers: testAccProviders,
CheckDestroy: testAccCheckDBInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDBInstance_vpc,
Check: resource.ComposeTestCheckFunc(
testAccCheckDBInstanceExists(
"alicloud_db_instance.foo", &instance),
resource.TestCheckResourceAttr(
"alicloud_db_instance.foo",
"port",
"3306"),
resource.TestCheckResourceAttr(
"alicloud_db_instance.foo",
"db_instance_storage",
"10"),
resource.TestCheckResourceAttr(
"alicloud_db_instance.foo",
"instance_network_type",
"VPC"),
resource.TestCheckResourceAttr(
"alicloud_db_instance.foo",
"db_instance_net_type",
"Intranet"),
resource.TestCheckResourceAttr(
"alicloud_db_instance.foo",
"engine_version",
"5.6"),
resource.TestCheckResourceAttr(
"alicloud_db_instance.foo",
"engine",
"MySQL"),
),
},
},
})
}
func TestC2CAlicloudDBInstance_prepaid_order(t *testing.T) {
var instance rds.DBInstanceAttribute
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
// module name
IDRefreshName: "alicloud_db_instance.foo",
Providers: testAccProviders,
CheckDestroy: testAccCheckDBInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDBInstance_prepaid_order,
Check: resource.ComposeTestCheckFunc(
testAccCheckDBInstanceExists(
"alicloud_db_instance.foo", &instance),
resource.TestCheckResourceAttr(
"alicloud_db_instance.foo",
"port",
"3306"),
resource.TestCheckResourceAttr(
"alicloud_db_instance.foo",
"db_instance_storage",
"10"),
resource.TestCheckResourceAttr(
"alicloud_db_instance.foo",
"instance_network_type",
"VPC"),
resource.TestCheckResourceAttr(
"alicloud_db_instance.foo",
"db_instance_net_type",
"Intranet"),
resource.TestCheckResourceAttr(
"alicloud_db_instance.foo",
"engine_version",
"5.6"),
resource.TestCheckResourceAttr(
"alicloud_db_instance.foo",
"engine",
"MySQL"),
),
},
},
})
}
func TestAccAlicloudDBInstance_multiIZ(t *testing.T) {
var instance rds.DBInstanceAttribute
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
// module name
IDRefreshName: "alicloud_db_instance.foo",
Providers: testAccProviders,
CheckDestroy: testAccCheckDBInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDBInstance_multiIZ,
Check: resource.ComposeTestCheckFunc(
testAccCheckDBInstanceExists(
"alicloud_db_instance.foo", &instance),
testAccCheckDBInstanceMultiIZ(&instance),
),
},
},
})
}
func TestAccAlicloudDBInstance_database(t *testing.T) {
var instance rds.DBInstanceAttribute
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
// module name
IDRefreshName: "alicloud_db_instance.foo",
Providers: testAccProviders,
CheckDestroy: testAccCheckDBInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDBInstance_database,
Check: resource.ComposeTestCheckFunc(
testAccCheckDBInstanceExists(
"alicloud_db_instance.foo", &instance),
resource.TestCheckResourceAttr("alicloud_db_instance.foo", "db_mappings.#", "2"),
),
},
resource.TestStep{
Config: testAccDBInstance_database_update,
Check: resource.ComposeTestCheckFunc(
testAccCheckDBInstanceExists(
"alicloud_db_instance.foo", &instance),
resource.TestCheckResourceAttr("alicloud_db_instance.foo", "db_mappings.#", "3"),
),
},
},
})
}
func TestAccAlicloudDBInstance_account(t *testing.T) {
var instance rds.DBInstanceAttribute
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
// module name
IDRefreshName: "alicloud_db_instance.foo",
Providers: testAccProviders,
CheckDestroy: testAccCheckDBInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDBInstance_grantDatabasePrivilege2Account,
Check: resource.ComposeTestCheckFunc(
testAccCheckDBInstanceExists(
"alicloud_db_instance.foo", &instance),
resource.TestCheckResourceAttr("alicloud_db_instance.foo", "db_mappings.#", "2"),
testAccCheckAccountHasPrivilege2Database("alicloud_db_instance.foo", "tester", "foo", "ReadWrite"),
),
},
},
})
}
func TestAccAlicloudDBInstance_allocatePublicConnection(t *testing.T) {
var instance rds.DBInstanceAttribute
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
// module name
IDRefreshName: "alicloud_db_instance.foo",
Providers: testAccProviders,
CheckDestroy: testAccCheckDBInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDBInstance_allocatePublicConnection,
Check: resource.ComposeTestCheckFunc(
testAccCheckDBInstanceExists(
"alicloud_db_instance.foo", &instance),
resource.TestCheckResourceAttr("alicloud_db_instance.foo", "connections.#", "2"),
testAccCheckHasPublicConnection("alicloud_db_instance.foo"),
),
},
},
})
}
func TestAccAlicloudDBInstance_backupPolicy(t *testing.T) {
var policies []map[string]interface{}
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
// module name
IDRefreshName: "alicloud_db_instance.foo",
Providers: testAccProviders,
CheckDestroy: testAccCheckDBInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDBInstance_backup,
Check: resource.ComposeTestCheckFunc(
testAccCheckBackupPolicyExists(
"alicloud_db_instance.foo", policies),
testAccCheckKeyValueInMaps(policies, "backup policy", "preferred_backup_period", "Wednesday,Thursday"),
testAccCheckKeyValueInMaps(policies, "backup policy", "preferred_backup_time", "00:00Z-01:00Z"),
),
},
},
})
}
func TestAccAlicloudDBInstance_securityIps(t *testing.T) {
var ips []map[string]interface{}
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
// module name
IDRefreshName: "alicloud_db_instance.foo",
Providers: testAccProviders,
CheckDestroy: testAccCheckDBInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDBInstance_securityIps,
Check: resource.ComposeTestCheckFunc(
testAccCheckSecurityIpExists(
"alicloud_db_instance.foo", ips),
testAccCheckKeyValueInMaps(ips, "security ip", "security_ips", "127.0.0.1"),
),
},
resource.TestStep{
Config: testAccDBInstance_securityIpsConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckSecurityIpExists(
"alicloud_db_instance.foo", ips),
testAccCheckKeyValueInMaps(ips, "security ip", "security_ips", "10.168.1.12,100.69.7.112"),
),
},
},
})
}
func TestAccAlicloudDBInstance_upgradeClass(t *testing.T) {
var instance rds.DBInstanceAttribute
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
// module name
IDRefreshName: "alicloud_db_instance.foo",
Providers: testAccProviders,
CheckDestroy: testAccCheckDBInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDBInstance_class,
Check: resource.ComposeTestCheckFunc(
testAccCheckDBInstanceExists(
"alicloud_db_instance.foo", &instance),
resource.TestCheckResourceAttr("alicloud_db_instance.foo", "db_instance_class", "rds.mysql.t1.small"),
),
},
resource.TestStep{
Config: testAccDBInstance_classUpgrade,
Check: resource.ComposeTestCheckFunc(
testAccCheckDBInstanceExists(
"alicloud_db_instance.foo", &instance),
resource.TestCheckResourceAttr("alicloud_db_instance.foo", "db_instance_class", "rds.mysql.s1.small"),
),
},
},
})
}
func testAccCheckSecurityIpExists(n string, ips []map[string]interface{}) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No DB Instance ID is set")
}
conn := testAccProvider.Meta().(*AliyunClient).rdsconn
args := rds.DescribeDBInstanceIPsArgs{
DBInstanceId: rs.Primary.ID,
}
resp, err := conn.DescribeDBInstanceIPs(&args)
log.Printf("[DEBUG] check instance %s security ip %#v", rs.Primary.ID, resp)
if err != nil {
return err
}
p := resp.Items.DBInstanceIPArray
if len(p) < 1 {
return fmt.Errorf("DB security ip not found")
}
ips = flattenDBSecurityIPs(p)
return nil
}
}
func testAccCheckDBInstanceMultiIZ(i *rds.DBInstanceAttribute) resource.TestCheckFunc {
return func(s *terraform.State) error {
if !strings.Contains(i.ZoneId, MULTI_IZ_SYMBOL) {
return fmt.Errorf("Current region does not support multiIZ.")
}
return nil
}
}
func testAccCheckAccountHasPrivilege2Database(n, accountName, dbName, privilege string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No DB instance ID is set")
}
conn := testAccProvider.Meta().(*AliyunClient).rdsconn
if err := conn.WaitForAccountPrivilege(rs.Primary.ID, accountName, dbName, rds.AccountPrivilege(privilege), 50); err != nil {
return fmt.Errorf("Failed to grant database %s privilege to account %s: %v", dbName, accountName, err)
}
return nil
}
}
func testAccCheckHasPublicConnection(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No DB instance ID is set")
}
conn := testAccProvider.Meta().(*AliyunClient).rdsconn
if err := conn.WaitForPublicConnection(rs.Primary.ID, 50); err != nil {
return fmt.Errorf("Failed to allocate public connection: %v", err)
}
return nil
}
}
func testAccCheckDBInstanceExists(n string, d *rds.DBInstanceAttribute) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No DB Instance ID is set")
}
client := testAccProvider.Meta().(*AliyunClient)
attr, err := client.DescribeDBInstanceById(rs.Primary.ID)
log.Printf("[DEBUG] check instance %s attribute %#v", rs.Primary.ID, attr)
if err != nil {
return err
}
if attr == nil {
return fmt.Errorf("DB Instance not found")
}
*d = *attr
return nil
}
}
func testAccCheckBackupPolicyExists(n string, ps []map[string]interface{}) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Backup policy not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No DB Instance ID is set")
}
conn := testAccProvider.Meta().(*AliyunClient).rdsconn
args := rds.DescribeBackupPolicyArgs{
DBInstanceId: rs.Primary.ID,
}
resp, err := conn.DescribeBackupPolicy(&args)
log.Printf("[DEBUG] check instance %s backup policy %#v", rs.Primary.ID, resp)
if err != nil {
return err
}
var bs []rds.BackupPolicy
bs = append(bs, resp.BackupPolicy)
ps = flattenDBBackup(bs)
return nil
}
}
func testAccCheckKeyValueInMaps(ps []map[string]interface{}, propName, key, value string) resource.TestCheckFunc {
return func(s *terraform.State) error {
for _, policy := range ps {
if policy[key].(string) != value {
return fmt.Errorf("DB %s attribute '%s' expected %#v, got %#v", propName, key, value, policy[key])
}
}
return nil
}
}
func testAccCheckDBInstanceDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*AliyunClient)
for _, rs := range s.RootModule().Resources {
if rs.Type != "alicloud_db_instance" {
continue
}
ins, err := client.DescribeDBInstanceById(rs.Primary.ID)
if ins != nil {
return fmt.Errorf("Error DB Instance still exist")
}
// Verify the error is what we want
if err != nil {
// Verify the error is what we want
e, _ := err.(*common.Error)
if e.ErrorResponse.Code == InstanceNotfound {
continue
}
return err
}
}
return nil
}
const testAccDBInstanceConfig = `
resource "alicloud_db_instance" "foo" {
engine = "MySQL"
engine_version = "5.6"
db_instance_class = "rds.mysql.t1.small"
db_instance_storage = "10"
instance_charge_type = "Postpaid"
db_instance_net_type = "Intranet"
}
`
const testAccDBInstance_vpc = `
data "alicloud_zones" "default" {
"available_resource_creation"= "VSwitch"
}
resource "alicloud_vpc" "foo" {
name = "tf_test_foo"
cidr_block = "172.16.0.0/12"
}
resource "alicloud_vswitch" "foo" {
vpc_id = "${alicloud_vpc.foo.id}"
cidr_block = "172.16.0.0/21"
availability_zone = "${data.alicloud_zones.default.zones.0.id}"
}
resource "alicloud_db_instance" "foo" {
engine = "MySQL"
engine_version = "5.6"
db_instance_class = "rds.mysql.t1.small"
db_instance_storage = "10"
instance_charge_type = "Postpaid"
db_instance_net_type = "Intranet"
vswitch_id = "${alicloud_vswitch.foo.id}"
}
`
const testAccDBInstance_multiIZ = `
resource "alicloud_db_instance" "foo" {
engine = "MySQL"
engine_version = "5.6"
db_instance_class = "rds.mysql.t1.small"
db_instance_storage = "10"
db_instance_net_type = "Intranet"
multi_az = true
}
`
const testAccDBInstance_prepaid_order = `
resource "alicloud_db_instance" "foo" {
engine = "MySQL"
engine_version = "5.6"
db_instance_class = "rds.mysql.t1.small"
db_instance_storage = "10"
instance_charge_type = "Prepaid"
db_instance_net_type = "Intranet"
}
`
const testAccDBInstance_database = `
resource "alicloud_db_instance" "foo" {
engine = "MySQL"
engine_version = "5.6"
db_instance_class = "rds.mysql.t1.small"
db_instance_storage = "10"
instance_charge_type = "Postpaid"
db_instance_net_type = "Intranet"
db_mappings = [
{
"db_name" = "foo"
"character_set_name" = "utf8"
"db_description" = "tf"
},{
"db_name" = "bar"
"character_set_name" = "utf8"
"db_description" = "tf"
}]
}
`
const testAccDBInstance_database_update = `
resource "alicloud_db_instance" "foo" {
engine = "MySQL"
engine_version = "5.6"
db_instance_class = "rds.mysql.t1.small"
db_instance_storage = "10"
instance_charge_type = "Postpaid"
db_instance_net_type = "Intranet"
db_mappings = [
{
"db_name" = "foo"
"character_set_name" = "utf8"
"db_description" = "tf"
},{
"db_name" = "bar"
"character_set_name" = "utf8"
"db_description" = "tf"
},{
"db_name" = "zzz"
"character_set_name" = "utf8"
"db_description" = "tf"
}]
}
`
const testAccDBInstance_grantDatabasePrivilege2Account = `
resource "alicloud_db_instance" "foo" {
engine = "MySQL"
engine_version = "5.6"
db_instance_class = "rds.mysql.t1.small"
db_instance_storage = "10"
instance_charge_type = "Postpaid"
db_instance_net_type = "Intranet"
master_user_name = "tester"
master_user_password = "Test12345"
db_mappings = [
{
"db_name" = "foo"
"character_set_name" = "utf8"
"db_description" = "tf"
},{
"db_name" = "bar"
"character_set_name" = "utf8"
"db_description" = "tf"
}]
}
`
const testAccDBInstance_allocatePublicConnection = `
resource "alicloud_db_instance" "foo" {
engine = "MySQL"
engine_version = "5.6"
db_instance_class = "rds.mysql.t1.small"
db_instance_storage = "10"
instance_charge_type = "Postpaid"
db_instance_net_type = "Intranet"
master_user_name = "tester"
master_user_password = "Test12345"
allocate_public_connection = true
}
`
const testAccDBInstance_backup = `
resource "alicloud_db_instance" "foo" {
engine = "MySQL"
engine_version = "5.6"
db_instance_class = "rds.mysql.t1.small"
db_instance_storage = "10"
instance_charge_type = "Postpaid"
db_instance_net_type = "Intranet"
preferred_backup_period = ["Wednesday","Thursday"]
preferred_backup_time = "00:00Z-01:00Z"
backup_retention_period = 9
}
`
const testAccDBInstance_securityIps = `
resource "alicloud_db_instance" "foo" {
engine = "MySQL"
engine_version = "5.6"
db_instance_class = "rds.mysql.t1.small"
db_instance_storage = "10"
instance_charge_type = "Postpaid"
db_instance_net_type = "Intranet"
}
`
const testAccDBInstance_securityIpsConfig = `
resource "alicloud_db_instance" "foo" {
engine = "MySQL"
engine_version = "5.6"
db_instance_class = "rds.mysql.t1.small"
db_instance_storage = "10"
instance_charge_type = "Postpaid"
db_instance_net_type = "Intranet"
security_ips = ["10.168.1.12", "100.69.7.112"]
}
`
const testAccDBInstance_class = `
resource "alicloud_db_instance" "foo" {
engine = "MySQL"
engine_version = "5.6"
db_instance_class = "rds.mysql.t1.small"
db_instance_storage = "10"
db_instance_net_type = "Intranet"
}
`
const testAccDBInstance_classUpgrade = `
resource "alicloud_db_instance" "foo" {
engine = "MySQL"
engine_version = "5.6"
db_instance_class = "rds.mysql.s1.small"
db_instance_storage = "10"
db_instance_net_type = "Intranet"
}
`

View File

@ -0,0 +1,247 @@
package alicloud
import (
"fmt"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"log"
"time"
)
func resourceAliyunDisk() *schema.Resource {
return &schema.Resource{
Create: resourceAliyunDiskCreate,
Read: resourceAliyunDiskRead,
Update: resourceAliyunDiskUpdate,
Delete: resourceAliyunDiskDelete,
Schema: map[string]*schema.Schema{
"availability_zone": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateDiskName,
},
"description": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateDiskDescription,
},
"category": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validateDiskCategory,
Default: "cloud",
},
"size": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
},
"snapshot_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"status": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"tags": tagsSchema(),
},
}
}
func resourceAliyunDiskCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AliyunClient)
conn := client.ecsconn
availabilityZone, err := client.DescribeZone(d.Get("availability_zone").(string))
if err != nil {
return err
}
args := &ecs.CreateDiskArgs{
RegionId: getRegion(d, meta),
ZoneId: availabilityZone.ZoneId,
}
if v, ok := d.GetOk("category"); ok && v.(string) != "" {
category := ecs.DiskCategory(v.(string))
if err := client.DiskAvailable(availabilityZone, category); err != nil {
return err
}
args.DiskCategory = category
}
if v, ok := d.GetOk("size"); ok {
size := v.(int)
if args.DiskCategory == ecs.DiskCategoryCloud && (size < 5 || size > 2000) {
return fmt.Errorf("the size of cloud disk must between 5 to 2000")
}
if (args.DiskCategory == ecs.DiskCategoryCloudEfficiency ||
args.DiskCategory == ecs.DiskCategoryCloudSSD) && (size < 20 || size > 32768) {
return fmt.Errorf("the size of %s disk must between 20 to 32768", args.DiskCategory)
}
args.Size = size
d.Set("size", args.Size)
}
if v, ok := d.GetOk("snapshot_id"); ok && v.(string) != "" {
args.SnapshotId = v.(string)
}
if args.Size <= 0 && args.SnapshotId == "" {
return fmt.Errorf("One of size or snapshot_id is required when specifying an ECS disk.")
}
if v, ok := d.GetOk("name"); ok && v.(string) != "" {
args.DiskName = v.(string)
}
if v, ok := d.GetOk("description"); ok && v.(string) != "" {
args.Description = v.(string)
}
diskID, err := conn.CreateDisk(args)
if err != nil {
return fmt.Errorf("CreateDisk got a error: %#v", err)
}
d.SetId(diskID)
return resourceAliyunDiskUpdate(d, meta)
}
func resourceAliyunDiskRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AliyunClient).ecsconn
disks, _, err := conn.DescribeDisks(&ecs.DescribeDisksArgs{
RegionId: getRegion(d, meta),
DiskIds: []string{d.Id()},
})
if err != nil {
if notFoundError(err) {
d.SetId("")
return nil
}
return fmt.Errorf("Error DescribeDiskAttribute: %#v", err)
}
log.Printf("[DEBUG] DescribeDiskAttribute for instance: %#v", disks)
if disks == nil || len(disks) <= 0 {
return fmt.Errorf("No disks found.")
}
disk := disks[0]
d.Set("availability_zone", disk.ZoneId)
d.Set("category", disk.Category)
d.Set("size", disk.Size)
d.Set("status", disk.Status)
d.Set("name", disk.DiskName)
d.Set("description", disk.Description)
d.Set("snapshot_id", disk.SourceSnapshotId)
tags, _, err := conn.DescribeTags(&ecs.DescribeTagsArgs{
RegionId: getRegion(d, meta),
ResourceType: ecs.TagResourceDisk,
ResourceId: d.Id(),
})
if err != nil {
log.Printf("[DEBUG] DescribeTags for disk got error: %#v", err)
}
d.Set("tags", tagsToMap(tags))
return nil
}
func resourceAliyunDiskUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AliyunClient)
conn := client.ecsconn
d.Partial(true)
if err := setTags(client, ecs.TagResourceDisk, d); err != nil {
log.Printf("[DEBUG] Set tags for instance got error: %#v", err)
return fmt.Errorf("Set tags for instance got error: %#v", err)
} else {
d.SetPartial("tags")
}
attributeUpdate := false
args := &ecs.ModifyDiskAttributeArgs{
DiskId: d.Id(),
}
if d.HasChange("name") {
d.SetPartial("name")
val := d.Get("name").(string)
args.DiskName = val
attributeUpdate = true
}
if d.HasChange("description") {
d.SetPartial("description")
val := d.Get("description").(string)
args.Description = val
attributeUpdate = true
}
if attributeUpdate {
if err := conn.ModifyDiskAttribute(args); err != nil {
return err
}
}
d.Partial(false)
return resourceAliyunDiskRead(d, meta)
}
func resourceAliyunDiskDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AliyunClient).ecsconn
return resource.Retry(5*time.Minute, func() *resource.RetryError {
err := conn.DeleteDisk(d.Id())
if err != nil {
e, _ := err.(*common.Error)
if e.ErrorResponse.Code == DiskIncorrectStatus || e.ErrorResponse.Code == DiskCreatingSnapshot {
return resource.RetryableError(fmt.Errorf("Disk in use - trying again while it is deleted."))
}
}
disks, _, descErr := conn.DescribeDisks(&ecs.DescribeDisksArgs{
RegionId: getRegion(d, meta),
DiskIds: []string{d.Id()},
})
if descErr != nil {
log.Printf("[ERROR] Delete disk is failed.")
return resource.NonRetryableError(descErr)
}
if disks == nil || len(disks) < 1 {
return nil
}
return resource.RetryableError(fmt.Errorf("Disk in use - trying again while it is deleted."))
})
}

View File

@ -0,0 +1,176 @@
package alicloud
import (
"fmt"
"strings"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/ecs"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"log"
"time"
)
func resourceAliyunDiskAttachment() *schema.Resource {
return &schema.Resource{
Create: resourceAliyunDiskAttachmentCreate,
Read: resourceAliyunDiskAttachmentRead,
Delete: resourceAliyunDiskAttachmentDelete,
Schema: map[string]*schema.Schema{
"instance_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"disk_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"device_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
},
}
}
func resourceAliyunDiskAttachmentCreate(d *schema.ResourceData, meta interface{}) error {
err := diskAttachment(d, meta)
if err != nil {
return err
}
d.SetId(d.Get("disk_id").(string) + ":" + d.Get("instance_id").(string))
return resourceAliyunDiskAttachmentRead(d, meta)
}
func resourceAliyunDiskAttachmentRead(d *schema.ResourceData, meta interface{}) error {
diskId, instanceId, err := getDiskIDAndInstanceID(d, meta)
if err != nil {
return err
}
conn := meta.(*AliyunClient).ecsconn
disks, _, err := conn.DescribeDisks(&ecs.DescribeDisksArgs{
RegionId: getRegion(d, meta),
InstanceId: instanceId,
DiskIds: []string{diskId},
})
if err != nil {
if notFoundError(err) {
d.SetId("")
return nil
}
return fmt.Errorf("Error DescribeDiskAttribute: %#v", err)
}
log.Printf("[DEBUG] DescribeDiskAttribute for instance: %#v", disks)
if disks == nil || len(disks) <= 0 {
return fmt.Errorf("No Disks Found.")
}
disk := disks[0]
d.Set("instance_id", disk.InstanceId)
d.Set("disk_id", disk.DiskId)
d.Set("device_name", disk.Device)
return nil
}
func resourceAliyunDiskAttachmentDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AliyunClient).ecsconn
diskID, instanceID, err := getDiskIDAndInstanceID(d, meta)
if err != nil {
return err
}
return resource.Retry(5*time.Minute, func() *resource.RetryError {
err := conn.DetachDisk(instanceID, diskID)
if err != nil {
e, _ := err.(*common.Error)
if e.ErrorResponse.Code == DiskIncorrectStatus || e.ErrorResponse.Code == InstanceLockedForSecurity {
return resource.RetryableError(fmt.Errorf("Disk in use - trying again while it detaches"))
}
}
disks, _, descErr := conn.DescribeDisks(&ecs.DescribeDisksArgs{
RegionId: getRegion(d, meta),
DiskIds: []string{diskID},
})
if descErr != nil {
log.Printf("[ERROR] Disk %s is not detached.", diskID)
return resource.NonRetryableError(err)
}
for _, disk := range disks {
if disk.Status != ecs.DiskStatusAvailable {
return resource.RetryableError(fmt.Errorf("Disk in use - trying again while it is deleted."))
}
}
return nil
})
}
func getDiskIDAndInstanceID(d *schema.ResourceData, meta interface{}) (string, string, error) {
parts := strings.Split(d.Id(), ":")
if len(parts) != 2 {
return "", "", fmt.Errorf("invalid resource id")
}
return parts[0], parts[1], nil
}
func diskAttachment(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AliyunClient).ecsconn
diskID := d.Get("disk_id").(string)
instanceID := d.Get("instance_id").(string)
deviceName := d.Get("device_name").(string)
args := &ecs.AttachDiskArgs{
InstanceId: instanceID,
DiskId: diskID,
Device: deviceName,
}
return resource.Retry(5*time.Minute, func() *resource.RetryError {
err := conn.AttachDisk(args)
log.Printf("error : %s", err)
if err != nil {
e, _ := err.(*common.Error)
if e.ErrorResponse.Code == DiskIncorrectStatus || e.ErrorResponse.Code == InstanceIncorrectStatus {
return resource.RetryableError(fmt.Errorf("Disk or Instance status is incorrect - trying again while it attaches"))
}
return resource.NonRetryableError(err)
}
disks, _, descErr := conn.DescribeDisks(&ecs.DescribeDisksArgs{
RegionId: getRegion(d, meta),
InstanceId: instanceID,
DiskIds: []string{diskID},
})
if descErr != nil {
log.Printf("[ERROR] Disk %s is not attached.", diskID)
return resource.NonRetryableError(err)
}
if disks == nil || len(disks) <= 0 {
return resource.RetryableError(fmt.Errorf("Disk in attaching - trying again while it is attached."))
}
return nil
})
}

Some files were not shown because too many files have changed in this diff Show More