Merge branch 'master' into patch-1
This commit is contained in:
commit
d32ea219ee
|
@ -27,5 +27,5 @@ website/node_modules
|
|||
website/vendor
|
||||
|
||||
# Test exclusions
|
||||
!command/test-fixtures/**/*.tfstate
|
||||
!command/test-fixtures/**/.terraform/
|
||||
!command/testdata/**/*.tfstate
|
||||
!command/testdata/**/.terraform/
|
||||
|
|
|
@ -1 +1 @@
|
|||
1.12.4
|
||||
1.12.9
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
behavior "pull_request_path_labeler" "sdk_cherrypicker" {
|
||||
label_map = {
|
||||
"sdkv1" = [
|
||||
# 1-1 package mapping between core and sdk
|
||||
"helper/**",
|
||||
"httpclient/**",
|
||||
"plugin/**",
|
||||
"terraform/**",
|
||||
"internal/earlyconfig/**",
|
||||
"internal/initwd/**",
|
||||
"internal/modsdir/**",
|
||||
"internal/tfplugin5/**",
|
||||
# these packages have been moved under internal/ in the sdk
|
||||
"addrs/**",
|
||||
"command/format/**",
|
||||
"configs/**",
|
||||
"dag/**",
|
||||
"flatmap/**",
|
||||
"lang/**",
|
||||
"moduledeps/**",
|
||||
"plans/**",
|
||||
"providers/**",
|
||||
"provisioners/**",
|
||||
"registry/**",
|
||||
"states/**",
|
||||
"svchost/**",
|
||||
"tfdiags/**",
|
||||
"version/**"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
behavior "regexp_issue_labeler" "panic_label" {
|
||||
regexp = "panic:"
|
||||
labels = ["crash", "bug"]
|
||||
}
|
||||
|
||||
behavior "remove_labels_on_reply" "remove_stale" {
|
||||
labels = ["waiting-response", "stale"]
|
||||
only_non_maintainers = true
|
||||
}
|
||||
|
||||
poll "label_issue_migrater" "provider_migrater" {
|
||||
schedule = "0 20 * * * *"
|
||||
new_owner = env.PROVIDERS_OWNER
|
||||
repo_prefix = "terraform-provider-"
|
||||
label_prefix = "provider/"
|
||||
excluded_label_prefixes = ["backend/", "provisioner/"]
|
||||
excluded_labels = ["build", "cli", "config", "core", "new-provider", "new-provisioner", "new-remote-state", "provider/terraform"]
|
||||
aliases = {
|
||||
"provider/google-cloud" = "provider/google"
|
||||
"provider/influx" = "provider/influxdb"
|
||||
"provider/vcloud" = "provider/vcd"
|
||||
}
|
||||
issue_header = <<-EOF
|
||||
_This issue was originally opened by @${var.user} as ${var.repository}#${var.issue_number}. It was migrated here as a result of the [provider split](https://www.hashicorp.com/blog/upcoming-provider-changes-in-terraform-0-10/). The original body of the issue is below._
|
||||
|
||||
<hr>
|
||||
|
||||
EOF
|
||||
migrated_comment = "This issue has been automatically migrated to ${var.repository}#${var.issue_number} because it looks like an issue with that provider. If you believe this is _not_ an issue with the provider, please reply to ${var.repository}#${var.issue_number}."
|
||||
}
|
||||
|
||||
poll "closed_issue_locker" "locker" {
|
||||
schedule = "0 50 1 * * *"
|
||||
closed_for = "720h" # 30 days
|
||||
max_issues = 500
|
||||
sleep_between_issues = "5s"
|
||||
|
||||
message = <<-EOF
|
||||
I'm going to lock this issue because it has been closed for _30 days_ ⏳. This helps our maintainers find and focus on the active issues.
|
||||
|
||||
If you have found a problem that seems similar to this, please open a new issue and complete the issue template so we can capture all the details necessary to investigate further.
|
||||
EOF
|
||||
}
|
|
@ -4,14 +4,14 @@ services:
|
|||
- docker
|
||||
language: go
|
||||
go:
|
||||
- "1.12.4"
|
||||
- "1.12.9"
|
||||
|
||||
# add TF_CONSUL_TEST=1 to run consul tests
|
||||
# they were causing timouts in travis
|
||||
# add TF_ETCDV3_TEST=1 to run etcdv3 tests
|
||||
# if added, TF_ETCDV3_ENDPOINTS must be set to a comma-separated list of (insecure) etcd endpoints against which to test
|
||||
env:
|
||||
- CONSUL_VERSION=0.7.5 GOMAXPROCS=4 GO111MODULE=on
|
||||
- CONSUL_VERSION=0.7.5 GOMAXPROCS=4 GO111MODULE=on GOPROXY=https://proxy.golang.org/
|
||||
|
||||
# Fetch consul for the backend and provider tests
|
||||
before_install:
|
||||
|
|
13
BUILDING.md
13
BUILDING.md
|
@ -22,25 +22,12 @@ The guide below outlines the steps HashiCorp takes to build the official release
|
|||
binaries for Terraform. This process will generate a set of binaries for each supported
|
||||
platform, using the [gox](https://github.com/mitchellh/gox) tool.
|
||||
|
||||
A Vagrant virtual machine is used to provide a consistent environment with
|
||||
the pre-requisite tools in place. The specifics of this VM are defined in the
|
||||
[Vagrantfile](Vagrantfile).
|
||||
|
||||
|
||||
```sh
|
||||
# clone the repository if needed
|
||||
git clone https://github.com/hashicorp/terraform.git
|
||||
cd terraform
|
||||
|
||||
# Spin up a fresh build VM
|
||||
vagrant destroy -f
|
||||
vagrant up
|
||||
vagrant ssh
|
||||
|
||||
# The Vagrantfile installs Go and configures the $GOPATH at /opt/gopath
|
||||
# The current "terraform" directory is then sync'd into the gopath
|
||||
cd /opt/gopath/src/github.com/hashicorp/terraform/
|
||||
|
||||
# Verify unit tests pass
|
||||
make test
|
||||
|
||||
|
|
1902
CHANGELOG.md
1902
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
3
Makefile
3
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION?="0.3.32"
|
||||
TEST?=./...
|
||||
GOFMT_FILES?=$$(find . -name '*.go' | grep -v vendor)
|
||||
GOFMT_FILES?=$$(find . -not -path "./vendor/*" -type f -name '*.go')
|
||||
WEBSITE_REPO=github.com/hashicorp/terraform-website
|
||||
|
||||
default: test
|
||||
|
@ -88,6 +88,7 @@ generate: tools
|
|||
# If you are working on changes to protobuf interfaces you may either use
|
||||
# this target or run the individual scripts below directly.
|
||||
protobuf:
|
||||
bash scripts/protobuf-check.sh
|
||||
bash internal/tfplugin5/generate.sh
|
||||
bash plans/internal/planproto/generate.sh
|
||||
|
||||
|
|
10
README.md
10
README.md
|
@ -34,7 +34,7 @@ All documentation is available on the [Terraform website](http://www.terraform.i
|
|||
Developing Terraform
|
||||
--------------------
|
||||
|
||||
If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.11+ is *required*). Alternatively, you can use the Vagrantfile in the root of this repo to stand up a virtual machine with the appropriate dev tooling already set up for you.
|
||||
If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.11+ is *required*).
|
||||
|
||||
This repository contains only Terraform core, which includes the command line interface and the main graph engine. Providers are implemented as plugins that each have their own repository in [the `terraform-providers` organization](https://github.com/terraform-providers) on GitHub. Instructions for developing each provider are in the associated README file. For more information, see [the provider development overview](https://www.terraform.io/docs/plugins/provider.html).
|
||||
|
||||
|
@ -61,7 +61,7 @@ $ bin/terraform
|
|||
...
|
||||
```
|
||||
|
||||
If you're developing a specific package, you can run tests for just that package by specifying the `TEST` variable. For example below, only`terraform` package tests will be run.
|
||||
If you're developing a specific package, you can run tests for just that package by specifying the `TEST` variable. For example below, only `terraform` package tests will be run.
|
||||
|
||||
```sh
|
||||
$ make test TEST=./terraform
|
||||
|
@ -157,12 +157,12 @@ _Note: Cross-compilation uses [gox](https://github.com/mitchellh/gox), which req
|
|||
|
||||
When using docker you don't need to have any of the Go development tools installed and you can clone terraform to any location on disk (doesn't have to be in your $GOPATH). This is useful for users who want to build `master` or a specific branch for testing without setting up a proper Go environment.
|
||||
|
||||
For example, run the following command to build terraform in a linux-based container for macOS.
|
||||
For example, run the following command to install the required tools and build terraform in a linux-based container for macOS.
|
||||
|
||||
```sh
|
||||
docker run --rm -v $(pwd):/go/src/github.com/hashicorp/terraform -w /go/src/github.com/hashicorp/terraform -e XC_OS=darwin -e XC_ARCH=amd64 golang:latest bash -c "apt-get update && apt-get install -y zip && make bin"
|
||||
docker run --rm -v $(pwd):/go/src/github.com/hashicorp/terraform -w /go/src/github.com/hashicorp/terraform -e XC_OS=darwin -e XC_ARCH=amd64 golang:latest bash -c "apt-get update && apt-get install -y zip && make tools bin"
|
||||
```
|
||||
|
||||
|
||||
## License
|
||||
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fhashicorp%2Fterraform.svg?type=large)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fhashicorp%2Fterraform?ref=badge_large)
|
||||
[Mozilla Public License v2.0](https://github.com/hashicorp/terraform/blob/master/LICENSE)
|
||||
|
|
|
@ -1,92 +0,0 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
# Software version variables
|
||||
GOVERSION = "1.11.5"
|
||||
UBUNTUVERSION = "16.04"
|
||||
|
||||
# CPU and RAM can be adjusted depending on your system
|
||||
CPUCOUNT = "2"
|
||||
RAM = "4096"
|
||||
|
||||
$script = <<SCRIPT
|
||||
GOVERSION="#{GOVERSION}"
|
||||
SRCROOT="/opt/go"
|
||||
SRCPATH="/opt/gopath"
|
||||
|
||||
# Get the ARCH
|
||||
ARCH="$(uname -m | sed 's|i686|386|' | sed 's|x86_64|amd64|')"
|
||||
|
||||
# Install Prereq Packages
|
||||
export DEBIAN_PRIORITY=critical
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
export DEBCONF_NONINTERACTIVE_SEEN=true
|
||||
APT_OPTS="--assume-yes --no-install-suggests --no-install-recommends -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\""
|
||||
echo "Upgrading packages ..."
|
||||
apt-get update ${APT_OPTS} >/dev/null
|
||||
apt-get upgrade ${APT_OPTS} >/dev/null
|
||||
echo "Installing prerequisites ..."
|
||||
apt-get install ${APT_OPTS} build-essential curl git-core libpcre3-dev mercurial pkg-config zip >/dev/null
|
||||
|
||||
# Install Go
|
||||
echo "Downloading go (${GOVERSION}) ..."
|
||||
wget -P /tmp --quiet "https://storage.googleapis.com/golang/go${GOVERSION}.linux-${ARCH}.tar.gz"
|
||||
echo "Setting up go (${GOVERSION}) ..."
|
||||
tar -C /opt -xf "/tmp/go${GOVERSION}.linux-${ARCH}.tar.gz"
|
||||
chmod 775 "$SRCROOT"
|
||||
chown vagrant:vagrant "$SRCROOT"
|
||||
|
||||
# Setup the GOPATH; even though the shared folder spec gives the working
|
||||
# directory the right user/group, we need to set it properly on the
|
||||
# parent path to allow subsequent "go get" commands to work.
|
||||
mkdir -p "$SRCPATH"
|
||||
chown -R vagrant:vagrant "$SRCPATH" 2>/dev/null || true
|
||||
# ^^ silencing errors here because we expect this to fail for the shared folder
|
||||
|
||||
cat >/etc/profile.d/gopath.sh <<EOF
|
||||
export GOPATH="$SRCPATH"
|
||||
export GOROOT="$SRCROOT"
|
||||
export PATH="$SRCROOT/bin:$SRCPATH/bin:\$PATH"
|
||||
EOF
|
||||
chmod 755 /etc/profile.d/gopath.sh
|
||||
|
||||
grep -q -F 'cd /opt/gopath/src/github.com/hashicorp/terraform' /home/vagrant/.bashrc || cat >>/home/vagrant/.bashrc <<EOF
|
||||
|
||||
## After login, change to terraform directory
|
||||
cd /opt/gopath/src/github.com/hashicorp/terraform
|
||||
EOF
|
||||
|
||||
SCRIPT
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
config.vm.box = "bento/ubuntu-#{UBUNTUVERSION}"
|
||||
config.vm.hostname = "terraform"
|
||||
|
||||
config.vm.provision "prepare-shell", type: "shell", inline: "sudo sed -i '/tty/!s/mesg n/tty -s \\&\\& mesg n/' /root/.profile", privileged: false
|
||||
config.vm.provision "initial-setup", type: "shell", inline: $script
|
||||
config.vm.synced_folder '.', '/opt/gopath/src/github.com/hashicorp/terraform'
|
||||
|
||||
config.vm.provider "docker" do |v, override|
|
||||
override.vm.box = "tknerr/baseimage-ubuntu-#{UBUNTUVERSION}"
|
||||
end
|
||||
|
||||
["vmware_fusion", "vmware_workstation"].each do |p|
|
||||
config.vm.provider p do |v|
|
||||
v.vmx["memsize"] = "#{RAM}"
|
||||
v.vmx["numvcpus"] = "#{CPUCOUNT}"
|
||||
end
|
||||
end
|
||||
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
v.memory = "#{RAM}"
|
||||
v.cpus = "#{CPUCOUNT}"
|
||||
end
|
||||
|
||||
config.vm.provider "parallels" do |prl|
|
||||
prl.memory = "#{RAM}"
|
||||
prl.cpus = "#{CPUCOUNT}"
|
||||
end
|
||||
end
|
|
@ -0,0 +1,12 @@
|
|||
package addrs
|
||||
|
||||
// ForEachAttr is the address of an attribute referencing the current "for_each" object in
|
||||
// the interpolation scope, addressed using the "each" keyword, ex. "each.key" and "each.value"
|
||||
type ForEachAttr struct {
|
||||
referenceable
|
||||
Name string
|
||||
}
|
||||
|
||||
func (f ForEachAttr) String() string {
|
||||
return "each." + f.Name
|
||||
}
|
|
@ -85,6 +85,14 @@ func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) {
|
|||
Remaining: remain,
|
||||
}, diags
|
||||
|
||||
case "each":
|
||||
name, rng, remain, diags := parseSingleAttrRef(traversal)
|
||||
return &Reference{
|
||||
Subject: ForEachAttr{Name: name},
|
||||
SourceRange: tfdiags.SourceRangeFromHCL(rng),
|
||||
Remaining: remain,
|
||||
}, diags
|
||||
|
||||
case "data":
|
||||
if len(traversal) < 3 {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
|
|
|
@ -64,6 +64,52 @@ func TestParseRef(t *testing.T) {
|
|||
`The "count" object does not support this operation.`,
|
||||
},
|
||||
|
||||
// each
|
||||
{
|
||||
`each.key`,
|
||||
&Reference{
|
||||
Subject: ForEachAttr{
|
||||
Name: "key",
|
||||
},
|
||||
SourceRange: tfdiags.SourceRange{
|
||||
Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0},
|
||||
End: tfdiags.SourcePos{Line: 1, Column: 9, Byte: 8},
|
||||
},
|
||||
},
|
||||
``,
|
||||
},
|
||||
{
|
||||
`each.value.blah`,
|
||||
&Reference{
|
||||
Subject: ForEachAttr{
|
||||
Name: "value",
|
||||
},
|
||||
SourceRange: tfdiags.SourceRange{
|
||||
Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0},
|
||||
End: tfdiags.SourcePos{Line: 1, Column: 11, Byte: 10},
|
||||
},
|
||||
Remaining: hcl.Traversal{
|
||||
hcl.TraverseAttr{
|
||||
Name: "blah",
|
||||
SrcRange: hcl.Range{
|
||||
Start: hcl.Pos{Line: 1, Column: 11, Byte: 10},
|
||||
End: hcl.Pos{Line: 1, Column: 16, Byte: 15},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
``,
|
||||
},
|
||||
{
|
||||
`each`,
|
||||
nil,
|
||||
`The "each" object cannot be accessed directly. Instead, access one of its attributes.`,
|
||||
},
|
||||
{
|
||||
`each["hello"]`,
|
||||
nil,
|
||||
`The "each" object does not support this operation.`,
|
||||
},
|
||||
// data
|
||||
{
|
||||
`data.external.foo`,
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
package addrs
|
||||
|
||||
// ProviderType encapsulates a single provider type. In the future this will be
|
||||
// extended to include additional fields including Namespace and SourceHost
|
||||
type ProviderType struct {
|
||||
Name string
|
||||
}
|
|
@ -22,6 +22,7 @@ import (
|
|||
backendHTTP "github.com/hashicorp/terraform/backend/remote-state/http"
|
||||
backendInmem "github.com/hashicorp/terraform/backend/remote-state/inmem"
|
||||
backendManta "github.com/hashicorp/terraform/backend/remote-state/manta"
|
||||
backendOSS "github.com/hashicorp/terraform/backend/remote-state/oss"
|
||||
backendPg "github.com/hashicorp/terraform/backend/remote-state/pg"
|
||||
backendS3 "github.com/hashicorp/terraform/backend/remote-state/s3"
|
||||
backendSwift "github.com/hashicorp/terraform/backend/remote-state/swift"
|
||||
|
@ -62,6 +63,7 @@ func Init(services *disco.Disco) {
|
|||
"http": func() backend.Backend { return backendHTTP.New() },
|
||||
"inmem": func() backend.Backend { return backendInmem.New() },
|
||||
"manta": func() backend.Backend { return backendManta.New() },
|
||||
"oss": func() backend.Backend { return backendOSS.New() },
|
||||
"pg": func() backend.Backend { return backendPg.New() },
|
||||
"s3": func() backend.Backend { return backendS3.New() },
|
||||
"swift": func() backend.Backend { return backendSwift.New() },
|
||||
|
|
|
@ -443,7 +443,7 @@ func (b *Local) ReportResult(op *backend.RunningOperation, diags tfdiags.Diagnos
|
|||
}
|
||||
|
||||
// Colorize returns the Colorize structure that can be used for colorizing
|
||||
// output. This is gauranteed to always return a non-nil value and so is useful
|
||||
// output. This is guaranteed to always return a non-nil value and so is useful
|
||||
// as a helper to wrap any potentially colored strings.
|
||||
func (b *Local) Colorize() *colorstring.Colorize {
|
||||
if b.CLIColor != nil {
|
||||
|
|
|
@ -32,7 +32,7 @@ func TestLocal_applyBasic(t *testing.T) {
|
|||
"ami": cty.StringVal("bar"),
|
||||
})}
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
|
@ -71,7 +71,7 @@ func TestLocal_applyEmptyDir(t *testing.T) {
|
|||
p := TestLocalProvider(t, b, "test", &terraform.ProviderSchema{})
|
||||
p.ApplyResourceChangeResponse = providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{"id": cty.StringVal("yes")})}
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/empty")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/empty")
|
||||
defer configCleanup()
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
|
@ -99,7 +99,7 @@ func TestLocal_applyEmptyDirDestroy(t *testing.T) {
|
|||
p := TestLocalProvider(t, b, "test", &terraform.ProviderSchema{})
|
||||
p.ApplyResourceChangeResponse = providers.ApplyResourceChangeResponse{}
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/empty")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/empty")
|
||||
defer configCleanup()
|
||||
op.Destroy = true
|
||||
|
||||
|
@ -161,7 +161,7 @@ func TestLocal_applyError(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply-error")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply-error")
|
||||
defer configCleanup()
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
|
@ -201,7 +201,7 @@ func TestLocal_applyBackendFail(t *testing.T) {
|
|||
}
|
||||
defer os.Chdir(wd)
|
||||
|
||||
op, configCleanup := testOperationApply(t, wd+"/test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, wd+"/testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
b.Backend = &backendWithFailingState{}
|
||||
|
@ -282,7 +282,7 @@ func testApplyState() *terraform.State {
|
|||
}
|
||||
|
||||
// applyFixtureSchema returns a schema suitable for processing the
|
||||
// configuration in test-fixtures/apply . This schema should be
|
||||
// configuration in testdata/apply . This schema should be
|
||||
// assigned to a mock provider named "test".
|
||||
func applyFixtureSchema() *terraform.ProviderSchema {
|
||||
return &terraform.ProviderSchema{
|
||||
|
|
|
@ -260,9 +260,10 @@ func (b *Local) renderPlan(plan *plans.Plan, state *states.State, schemas *terra
|
|||
// check if the change is due to a tainted resource
|
||||
tainted := false
|
||||
if !state.Empty() {
|
||||
rs := state.ResourceInstance(rcs.Addr)
|
||||
if rs != nil {
|
||||
tainted = rs.Current.Status == states.ObjectTainted
|
||||
if is := state.ResourceInstance(rcs.Addr); is != nil {
|
||||
if obj := is.GetGeneration(rcs.DeposedKey.Generation()); obj != nil {
|
||||
tainted = obj.Status == states.ObjectTainted
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ func TestLocal_planBasic(t *testing.T) {
|
|||
defer cleanup()
|
||||
p := TestLocalProvider(t, b, "test", planFixtureSchema())
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
|
||||
|
@ -59,7 +59,7 @@ func TestLocal_planInAutomation(t *testing.T) {
|
|||
b.RunningInAutomation = false
|
||||
b.CLI = cli.NewMockUi()
|
||||
{
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
|
||||
|
@ -83,7 +83,7 @@ func TestLocal_planInAutomation(t *testing.T) {
|
|||
b.RunningInAutomation = true
|
||||
b.CLI = cli.NewMockUi()
|
||||
{
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
|
||||
|
@ -111,7 +111,7 @@ func TestLocal_planNoConfig(t *testing.T) {
|
|||
|
||||
b.CLI = cli.NewMockUi()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/empty")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/empty")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
|
||||
|
@ -139,7 +139,7 @@ func TestLocal_planTainted(t *testing.T) {
|
|||
outDir := testTempDir(t)
|
||||
defer os.RemoveAll(outDir)
|
||||
planPath := filepath.Join(outDir, "plan.tfplan")
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
op.PlanOutPath = planPath
|
||||
|
@ -193,6 +193,121 @@ Plan: 1 to add, 0 to change, 1 to destroy.`
|
|||
}
|
||||
}
|
||||
|
||||
func TestLocal_planDeposedOnly(t *testing.T) {
|
||||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
p := TestLocalProvider(t, b, "test", planFixtureSchema())
|
||||
testStateFile(t, b.StatePath, states.BuildState(func(ss *states.SyncState) {
|
||||
ss.SetResourceInstanceDeposed(
|
||||
addrs.Resource{
|
||||
Mode: addrs.ManagedResourceMode,
|
||||
Type: "test_instance",
|
||||
Name: "foo",
|
||||
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),
|
||||
states.DeposedKey("00000000"),
|
||||
&states.ResourceInstanceObjectSrc{
|
||||
Status: states.ObjectReady,
|
||||
AttrsJSON: []byte(`{
|
||||
"ami": "bar",
|
||||
"network_interface": [{
|
||||
"device_index": 0,
|
||||
"description": "Main network interface"
|
||||
}]
|
||||
}`),
|
||||
},
|
||||
addrs.ProviderConfig{
|
||||
Type: "test",
|
||||
}.Absolute(addrs.RootModuleInstance),
|
||||
)
|
||||
}))
|
||||
b.CLI = cli.NewMockUi()
|
||||
outDir := testTempDir(t)
|
||||
defer os.RemoveAll(outDir)
|
||||
planPath := filepath.Join(outDir, "plan.tfplan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
op.PlanOutPath = planPath
|
||||
cfg := cty.ObjectVal(map[string]cty.Value{
|
||||
"path": cty.StringVal(b.StatePath),
|
||||
})
|
||||
cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
op.PlanOutBackend = &plans.Backend{
|
||||
// Just a placeholder so that we can generate a valid plan file.
|
||||
Type: "local",
|
||||
Config: cfgRaw,
|
||||
}
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
<-run.Done()
|
||||
if run.Result != backend.OperationSuccess {
|
||||
t.Fatalf("plan operation failed")
|
||||
}
|
||||
if !p.ReadResourceCalled {
|
||||
t.Fatal("ReadResource should be called")
|
||||
}
|
||||
if run.PlanEmpty {
|
||||
t.Fatal("plan should not be empty")
|
||||
}
|
||||
|
||||
// The deposed object and the current object are distinct, so our
|
||||
// plan includes separate actions for each of them. This strange situation
|
||||
// is not common: it should arise only if Terraform fails during
|
||||
// a create-before-destroy when the create hasn't completed yet but
|
||||
// in a severe way that prevents the previous object from being restored
|
||||
// as "current".
|
||||
//
|
||||
// However, that situation was more common in some earlier Terraform
|
||||
// versions where deposed objects were not managed properly, so this
|
||||
// can arise when upgrading from an older version with deposed objects
|
||||
// already in the state.
|
||||
//
|
||||
// This is one of the few cases where we expose the idea of "deposed" in
|
||||
// the UI, including the user-unfriendly "deposed key" (00000000 in this
|
||||
// case) just so that users can correlate this with what they might
|
||||
// see in `terraform show` and in the subsequent apply output, because
|
||||
// it's also possible for there to be _multiple_ deposed objects, in the
|
||||
// unlikely event that create_before_destroy _keeps_ crashing across
|
||||
// subsequent runs.
|
||||
expectedOutput := `An execution plan has been generated and is shown below.
|
||||
Resource actions are indicated with the following symbols:
|
||||
+ create
|
||||
- destroy
|
||||
|
||||
Terraform will perform the following actions:
|
||||
|
||||
# test_instance.foo will be created
|
||||
+ resource "test_instance" "foo" {
|
||||
+ ami = "bar"
|
||||
|
||||
+ network_interface {
|
||||
+ description = "Main network interface"
|
||||
+ device_index = 0
|
||||
}
|
||||
}
|
||||
|
||||
# test_instance.foo (deposed object 00000000) will be destroyed
|
||||
- resource "test_instance" "foo" {
|
||||
- ami = "bar" -> null
|
||||
|
||||
- network_interface {
|
||||
- description = "Main network interface" -> null
|
||||
- device_index = 0 -> null
|
||||
}
|
||||
}
|
||||
|
||||
Plan: 1 to add, 0 to change, 1 to destroy.`
|
||||
output := b.CLI.(*cli.MockUi).OutputWriter.String()
|
||||
if !strings.Contains(output, expectedOutput) {
|
||||
t.Fatalf("Unexpected output:\n%s\n\nwant output containing:\n%s", output, expectedOutput)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocal_planTainted_createBeforeDestroy(t *testing.T) {
|
||||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
|
@ -202,7 +317,7 @@ func TestLocal_planTainted_createBeforeDestroy(t *testing.T) {
|
|||
outDir := testTempDir(t)
|
||||
defer os.RemoveAll(outDir)
|
||||
planPath := filepath.Join(outDir, "plan.tfplan")
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan-cbd")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan-cbd")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
op.PlanOutPath = planPath
|
||||
|
@ -263,7 +378,7 @@ func TestLocal_planRefreshFalse(t *testing.T) {
|
|||
p := TestLocalProvider(t, b, "test", planFixtureSchema())
|
||||
testStateFile(t, b.StatePath, testPlanState())
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
|
@ -295,7 +410,7 @@ func TestLocal_planDestroy(t *testing.T) {
|
|||
defer os.RemoveAll(outDir)
|
||||
planPath := filepath.Join(outDir, "plan.tfplan")
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
op.Destroy = true
|
||||
op.PlanRefresh = true
|
||||
|
@ -351,7 +466,7 @@ func TestLocal_planDestroy_withDataSources(t *testing.T) {
|
|||
defer os.RemoveAll(outDir)
|
||||
planPath := filepath.Join(outDir, "plan.tfplan")
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/destroy-with-ds")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/destroy-with-ds")
|
||||
defer configCleanup()
|
||||
op.Destroy = true
|
||||
op.PlanRefresh = true
|
||||
|
@ -436,7 +551,7 @@ func TestLocal_planOutPathNoChange(t *testing.T) {
|
|||
defer os.RemoveAll(outDir)
|
||||
planPath := filepath.Join(outDir, "plan.tfplan")
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
op.PlanOutPath = planPath
|
||||
cfg := cty.ObjectVal(map[string]cty.Value{
|
||||
|
@ -486,7 +601,7 @@ func TestLocal_planScaleOutNoDupeCount(t *testing.T) {
|
|||
outDir := testTempDir(t)
|
||||
defer os.RemoveAll(outDir)
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan-scaleout")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan-scaleout")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
|
||||
|
@ -636,7 +751,7 @@ func testReadPlan(t *testing.T, path string) *plans.Plan {
|
|||
}
|
||||
|
||||
// planFixtureSchema returns a schema suitable for processing the
|
||||
// configuration in test-fixtures/plan . This schema should be
|
||||
// configuration in testdata/plan . This schema should be
|
||||
// assigned to a mock provider named "test".
|
||||
func planFixtureSchema() *terraform.ProviderSchema {
|
||||
return &terraform.ProviderSchema{
|
||||
|
|
|
@ -26,7 +26,7 @@ func TestLocal_refresh(t *testing.T) {
|
|||
"id": cty.StringVal("yes"),
|
||||
})}
|
||||
|
||||
op, configCleanup := testOperationRefresh(t, "./test-fixtures/refresh")
|
||||
op, configCleanup := testOperationRefresh(t, "./testdata/refresh")
|
||||
defer configCleanup()
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
|
@ -56,7 +56,7 @@ func TestLocal_refreshNoConfig(t *testing.T) {
|
|||
"id": cty.StringVal("yes"),
|
||||
})}
|
||||
|
||||
op, configCleanup := testOperationRefresh(t, "./test-fixtures/empty")
|
||||
op, configCleanup := testOperationRefresh(t, "./testdata/empty")
|
||||
defer configCleanup()
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
|
@ -89,7 +89,7 @@ func TestLocal_refreshNilModuleWithInput(t *testing.T) {
|
|||
|
||||
b.OpInput = true
|
||||
|
||||
op, configCleanup := testOperationRefresh(t, "./test-fixtures/empty")
|
||||
op, configCleanup := testOperationRefresh(t, "./testdata/empty")
|
||||
defer configCleanup()
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
|
@ -146,7 +146,7 @@ func TestLocal_refreshInput(t *testing.T) {
|
|||
b.OpInput = true
|
||||
b.ContextOpts.UIInput = &terraform.MockUIInput{InputReturnString: "bar"}
|
||||
|
||||
op, configCleanup := testOperationRefresh(t, "./test-fixtures/refresh-var-unset")
|
||||
op, configCleanup := testOperationRefresh(t, "./testdata/refresh-var-unset")
|
||||
defer configCleanup()
|
||||
op.UIIn = b.ContextOpts.UIInput
|
||||
|
||||
|
@ -180,7 +180,7 @@ func TestLocal_refreshValidate(t *testing.T) {
|
|||
// Enable validation
|
||||
b.OpValidation = true
|
||||
|
||||
op, configCleanup := testOperationRefresh(t, "./test-fixtures/refresh")
|
||||
op, configCleanup := testOperationRefresh(t, "./testdata/refresh")
|
||||
defer configCleanup()
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
|
@ -234,7 +234,7 @@ func testRefreshState() *terraform.State {
|
|||
}
|
||||
|
||||
// refreshFixtureSchema returns a schema suitable for processing the
|
||||
// configuration in test-fixtures/refresh . This schema should be
|
||||
// configuration in testdata/refresh . This schema should be
|
||||
// assigned to a mock provider named "test".
|
||||
func refreshFixtureSchema() *terraform.ProviderSchema {
|
||||
return &terraform.ProviderSchema{
|
||||
|
|
|
@ -1,72 +0,0 @@
|
|||
// Package remotestate implements a Backend for remote state implementations
|
||||
// from the state/remote package that also implement a backend schema for
|
||||
// configuration.
|
||||
package remotestate
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
)
|
||||
|
||||
// Backend implements backend.Backend for remote state backends.
|
||||
//
|
||||
// All exported fields should be set. This struct should only be used
|
||||
// by implementers of backends, not by consumers. If you're consuming, please
|
||||
// use a higher level package such as Consul backends.
|
||||
type Backend struct {
|
||||
// Backend should be set to the configuration schema. ConfigureFunc
|
||||
// should not be set on the schema.
|
||||
*schema.Backend
|
||||
|
||||
// ConfigureFunc takes the ctx from a schema.Backend and returns a
|
||||
// fully configured remote client to use for state operations.
|
||||
ConfigureFunc func(ctx context.Context) (remote.Client, error)
|
||||
|
||||
client remote.Client
|
||||
}
|
||||
|
||||
func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics {
|
||||
|
||||
// Set our configureFunc manually
|
||||
b.Backend.ConfigureFunc = func(ctx context.Context) error {
|
||||
c, err := b.ConfigureFunc(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the client for later
|
||||
b.client = c
|
||||
return nil
|
||||
}
|
||||
|
||||
return b.Backend.Configure(obj)
|
||||
}
|
||||
|
||||
func (b *Backend) Workspaces() ([]string, error) {
|
||||
return nil, backend.ErrWorkspacesNotSupported
|
||||
}
|
||||
|
||||
func (b *Backend) DeleteWorkspace(name string) error {
|
||||
return backend.ErrWorkspacesNotSupported
|
||||
}
|
||||
|
||||
func (b *Backend) StateMgr(name string) (statemgr.Full, error) {
|
||||
// This shouldn't happen
|
||||
if b.client == nil {
|
||||
panic("nil remote client")
|
||||
}
|
||||
|
||||
if name != backend.DefaultStateName {
|
||||
return nil, backend.ErrWorkspacesNotSupported
|
||||
}
|
||||
|
||||
s := &remote.State{Client: b.client}
|
||||
return s, nil
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
package remotestate
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
)
|
||||
|
||||
func TestBackend_impl(t *testing.T) {
|
||||
var _ backend.Backend = new(Backend)
|
||||
}
|
|
@ -138,7 +138,7 @@ func (b *Backend) configure(ctx context.Context) error {
|
|||
Email: account.ClientEmail,
|
||||
PrivateKey: []byte(account.PrivateKey),
|
||||
Scopes: []string{storage.ScopeReadWrite},
|
||||
TokenURL: "https://accounts.google.com/o/oauth2/token",
|
||||
TokenURL: "https://oauth2.googleapis.com/token",
|
||||
}
|
||||
|
||||
opts = append(opts, option.WithHTTPClient(conf.Client(ctx)))
|
||||
|
|
|
@ -110,7 +110,7 @@ func (c *remoteClient) Lock(info *state.LockInfo) (string, error) {
|
|||
func (c *remoteClient) Unlock(id string) error {
|
||||
gen, err := strconv.ParseInt(id, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("Lock ID should be numerical value, got '%s'", id)
|
||||
}
|
||||
|
||||
if err := c.lockFile().If(storage.Conditions{GenerationMatch: gen}).Delete(c.storageContext); err != nil {
|
||||
|
|
|
@ -6,8 +6,10 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/go-retryablehttp"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
|
@ -66,6 +68,24 @@ func New() backend.Backend {
|
|||
Default: false,
|
||||
Description: "Whether to skip TLS verification.",
|
||||
},
|
||||
"retry_max": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 2,
|
||||
Description: "The number of HTTP request retries.",
|
||||
},
|
||||
"retry_wait_min": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 1,
|
||||
Description: "The minimum time in seconds to wait between HTTP request attempts.",
|
||||
},
|
||||
"retry_wait_max": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 30,
|
||||
Description: "The maximum time in seconds to wait between HTTP request attempts.",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -131,6 +151,12 @@ func (b *Backend) configure(ctx context.Context) error {
|
|||
}
|
||||
}
|
||||
|
||||
rClient := retryablehttp.NewClient()
|
||||
rClient.HTTPClient = client
|
||||
rClient.RetryMax = data.Get("retry_max").(int)
|
||||
rClient.RetryWaitMin = time.Duration(data.Get("retry_wait_min").(int)) * time.Second
|
||||
rClient.RetryWaitMax = time.Duration(data.Get("retry_wait_max").(int)) * time.Second
|
||||
|
||||
b.client = &httpClient{
|
||||
URL: updateURL,
|
||||
UpdateMethod: updateMethod,
|
||||
|
@ -144,7 +170,7 @@ func (b *Backend) configure(ctx context.Context) error {
|
|||
Password: data.Get("password").(string),
|
||||
|
||||
// accessible only for testing use
|
||||
Client: client,
|
||||
Client: rClient,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package http
|
|||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/configs"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
|
@ -51,6 +52,9 @@ func TestHTTPClientFactory(t *testing.T) {
|
|||
"unlock_method": cty.StringVal("BLOOP"),
|
||||
"username": cty.StringVal("user"),
|
||||
"password": cty.StringVal("pass"),
|
||||
"retry_max": cty.StringVal("999"),
|
||||
"retry_wait_min": cty.StringVal("15"),
|
||||
"retry_wait_max": cty.StringVal("150"),
|
||||
}
|
||||
|
||||
b = backend.TestBackendConfig(t, New(), configs.SynthBody("synth", conf)).(*Backend)
|
||||
|
@ -74,4 +78,13 @@ func TestHTTPClientFactory(t *testing.T) {
|
|||
t.Fatalf("Unexpected username \"%s\" vs \"%s\" or password \"%s\" vs \"%s\"", client.Username, conf["username"],
|
||||
client.Password, conf["password"])
|
||||
}
|
||||
if client.Client.RetryMax != 999 {
|
||||
t.Fatalf("Expected retry_max \"%d\", got \"%d\"", 999, client.Client.RetryMax)
|
||||
}
|
||||
if client.Client.RetryWaitMin != 15*time.Second {
|
||||
t.Fatalf("Expected retry_wait_min \"%s\", got \"%s\"", 15*time.Second, client.Client.RetryWaitMin)
|
||||
}
|
||||
if client.Client.RetryWaitMax != 150*time.Second {
|
||||
t.Fatalf("Expected retry_wait_max \"%s\", got \"%s\"", 150*time.Second, client.Client.RetryWaitMax)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/hashicorp/go-retryablehttp"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
)
|
||||
|
@ -28,7 +29,7 @@ type httpClient struct {
|
|||
UnlockMethod string
|
||||
|
||||
// HTTP
|
||||
Client *http.Client
|
||||
Client *retryablehttp.Client
|
||||
Username string
|
||||
Password string
|
||||
|
||||
|
@ -44,7 +45,7 @@ func (c *httpClient) httpRequest(method string, url *url.URL, data *[]byte, what
|
|||
}
|
||||
|
||||
// Create the request
|
||||
req, err := http.NewRequest(method, url.String(), reader)
|
||||
req, err := retryablehttp.NewRequest(method, url.String(), reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to make %s HTTP request: %s", what, err)
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/go-retryablehttp"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
)
|
||||
|
||||
|
@ -30,14 +30,14 @@ func TestHTTPClient(t *testing.T) {
|
|||
}
|
||||
|
||||
// Test basic get/update
|
||||
client := &httpClient{URL: url, Client: cleanhttp.DefaultClient()}
|
||||
client := &httpClient{URL: url, Client: retryablehttp.NewClient()}
|
||||
remote.TestClient(t, client)
|
||||
|
||||
// test just a single PUT
|
||||
p := &httpClient{
|
||||
URL: url,
|
||||
UpdateMethod: "PUT",
|
||||
Client: cleanhttp.DefaultClient(),
|
||||
Client: retryablehttp.NewClient(),
|
||||
}
|
||||
remote.TestClient(t, p)
|
||||
|
||||
|
@ -49,7 +49,7 @@ func TestHTTPClient(t *testing.T) {
|
|||
LockMethod: "LOCK",
|
||||
UnlockURL: url,
|
||||
UnlockMethod: "UNLOCK",
|
||||
Client: cleanhttp.DefaultClient(),
|
||||
Client: retryablehttp.NewClient(),
|
||||
}
|
||||
b := &httpClient{
|
||||
URL: url,
|
||||
|
@ -58,7 +58,7 @@ func TestHTTPClient(t *testing.T) {
|
|||
LockMethod: "LOCK",
|
||||
UnlockURL: url,
|
||||
UnlockMethod: "UNLOCK",
|
||||
Client: cleanhttp.DefaultClient(),
|
||||
Client: retryablehttp.NewClient(),
|
||||
}
|
||||
remote.TestRemoteLocks(t, a, b)
|
||||
|
||||
|
@ -68,13 +68,23 @@ func TestHTTPClient(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
url, err = url.Parse(ts.URL)
|
||||
c := &httpClient{
|
||||
client = &httpClient{
|
||||
URL: url,
|
||||
UpdateMethod: "PUT",
|
||||
Client: cleanhttp.DefaultClient(),
|
||||
Client: retryablehttp.NewClient(),
|
||||
}
|
||||
remote.TestClient(t, c) // first time through: 201
|
||||
remote.TestClient(t, c) // second time, with identical data: 204
|
||||
remote.TestClient(t, client) // first time through: 201
|
||||
remote.TestClient(t, client) // second time, with identical data: 204
|
||||
|
||||
// test a broken backend
|
||||
brokenHandler := new(testBrokenHTTPHandler)
|
||||
brokenHandler.handler = new(testHTTPHandler)
|
||||
ts = httptest.NewServer(http.HandlerFunc(brokenHandler.Handle))
|
||||
defer ts.Close()
|
||||
|
||||
url, err = url.Parse(ts.URL)
|
||||
client = &httpClient{URL: url, Client: retryablehttp.NewClient()}
|
||||
remote.TestClient(t, client)
|
||||
}
|
||||
|
||||
func assertError(t *testing.T, err error, expected string) {
|
||||
|
@ -149,3 +159,18 @@ func (h *testHTTPHandler) HandleWebDAV(w http.ResponseWriter, r *http.Request) {
|
|||
w.Write([]byte(fmt.Sprintf("Unknown method: %s", r.Method)))
|
||||
}
|
||||
}
|
||||
|
||||
type testBrokenHTTPHandler struct {
|
||||
lastRequestWasBroken bool
|
||||
handler *testHTTPHandler
|
||||
}
|
||||
|
||||
func (h *testBrokenHTTPHandler) Handle(w http.ResponseWriter, r *http.Request) {
|
||||
if h.lastRequestWasBroken {
|
||||
h.lastRequestWasBroken = false
|
||||
h.handler.Handle(w, r)
|
||||
} else {
|
||||
h.lastRequestWasBroken = true
|
||||
w.WriteHeader(500)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,505 @@
|
|||
package oss
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/sts"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/helper/validation"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/location"
|
||||
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore"
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/terraform/version"
|
||||
"log"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// New creates a new backend for OSS remote state.
|
||||
func New() backend.Backend {
|
||||
s := &schema.Backend{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"access_key": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Alibaba Cloud Access Key ID",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ACCESS_KEY", os.Getenv("ALICLOUD_ACCESS_KEY_ID")),
|
||||
},
|
||||
|
||||
"secret_key": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Alibaba Cloud Access Secret Key",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECRET_KEY", os.Getenv("ALICLOUD_ACCESS_KEY_SECRET")),
|
||||
},
|
||||
|
||||
"security_token": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Alibaba Cloud Security Token",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECURITY_TOKEN", ""),
|
||||
},
|
||||
|
||||
"region": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The region of the OSS bucket.",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_REGION", os.Getenv("ALICLOUD_DEFAULT_REGION")),
|
||||
},
|
||||
"tablestore_endpoint": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "A custom endpoint for the TableStore API",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_TABLESTORE_ENDPOINT", ""),
|
||||
},
|
||||
"endpoint": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "A custom endpoint for the OSS API",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_OSS_ENDPOINT", os.Getenv("OSS_ENDPOINT")),
|
||||
},
|
||||
|
||||
"bucket": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "The name of the OSS bucket",
|
||||
},
|
||||
|
||||
"prefix": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The directory where state files will be saved inside the bucket",
|
||||
Default: "env:",
|
||||
ValidateFunc: func(v interface{}, s string) ([]string, []error) {
|
||||
prefix := v.(string)
|
||||
if strings.HasPrefix(prefix, "/") || strings.HasPrefix(prefix, "./") {
|
||||
return nil, []error{fmt.Errorf("workspace_key_prefix must not start with '/' or './'")}
|
||||
}
|
||||
return nil, nil
|
||||
},
|
||||
},
|
||||
|
||||
"key": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The path of the state file inside the bucket",
|
||||
ValidateFunc: func(v interface{}, s string) ([]string, []error) {
|
||||
if strings.HasPrefix(v.(string), "/") || strings.HasSuffix(v.(string), "/") {
|
||||
return nil, []error{fmt.Errorf("key can not start and end with '/'")}
|
||||
}
|
||||
return nil, nil
|
||||
},
|
||||
Default: "terraform.tfstate",
|
||||
},
|
||||
|
||||
"tablestore_table": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "TableStore table for state locking and consistency",
|
||||
Default: "",
|
||||
},
|
||||
|
||||
"encrypt": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Description: "Whether to enable server side encryption of the state file",
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"acl": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Object ACL to be applied to the state file",
|
||||
Default: "",
|
||||
ValidateFunc: func(v interface{}, k string) ([]string, []error) {
|
||||
if value := v.(string); value != "" {
|
||||
acls := oss.ACLType(value)
|
||||
if acls != oss.ACLPrivate && acls != oss.ACLPublicRead && acls != oss.ACLPublicReadWrite {
|
||||
return nil, []error{fmt.Errorf(
|
||||
"%q must be a valid ACL value , expected %s, %s or %s, got %q",
|
||||
k, oss.ACLPrivate, oss.ACLPublicRead, oss.ACLPublicReadWrite, acls)}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
},
|
||||
},
|
||||
|
||||
"assume_role": assumeRoleSchema(),
|
||||
"shared_credentials_file": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "This is the path to the shared credentials file. If this is not set and a profile is specified, `~/.aliyun/config.json` will be used.",
|
||||
},
|
||||
"profile": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "This is the Alibaba Cloud profile name as set in the shared credentials file. It can also be sourced from the `ALICLOUD_PROFILE` environment variable.",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_PROFILE", ""),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := &Backend{Backend: s}
|
||||
result.Backend.ConfigureFunc = result.configure
|
||||
return result
|
||||
}
|
||||
|
||||
func assumeRoleSchema() *schema.Schema {
|
||||
return &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"role_arn": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "The ARN of a RAM role to assume prior to making API calls.",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ASSUME_ROLE_ARN", ""),
|
||||
},
|
||||
"session_name": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The session name to use when assuming the role.",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ASSUME_ROLE_SESSION_NAME", ""),
|
||||
},
|
||||
"policy": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The permissions applied when assuming a role. You cannot use this policy to grant permissions which exceed those of the role that is being assumed.",
|
||||
},
|
||||
"session_expiration": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "The time after which the established session for assuming role expires.",
|
||||
ValidateFunc: validation.IntBetween(900, 3600),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type Backend struct {
|
||||
*schema.Backend
|
||||
|
||||
// The fields below are set from configure
|
||||
ossClient *oss.Client
|
||||
otsClient *tablestore.TableStoreClient
|
||||
|
||||
bucketName string
|
||||
statePrefix string
|
||||
stateKey string
|
||||
serverSideEncryption bool
|
||||
acl string
|
||||
endpoint string
|
||||
otsEndpoint string
|
||||
otsTable string
|
||||
}
|
||||
|
||||
func (b *Backend) configure(ctx context.Context) error {
|
||||
if b.ossClient != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Grab the resource data
|
||||
d := schema.FromContextBackendConfig(ctx)
|
||||
|
||||
b.bucketName = d.Get("bucket").(string)
|
||||
b.statePrefix = strings.TrimPrefix(strings.Trim(d.Get("prefix").(string), "/"), "./")
|
||||
b.stateKey = d.Get("key").(string)
|
||||
b.serverSideEncryption = d.Get("encrypt").(bool)
|
||||
b.acl = d.Get("acl").(string)
|
||||
|
||||
var getBackendConfig = func(str string, key string) string {
|
||||
if str == "" {
|
||||
value, err := getConfigFromProfile(d, key)
|
||||
if err == nil && value != nil {
|
||||
str = value.(string)
|
||||
}
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
accessKey := getBackendConfig(d.Get("access_key").(string), "access_key_id")
|
||||
secretKey := getBackendConfig(d.Get("secret_key").(string), "access_key_secret")
|
||||
securityToken := getBackendConfig(d.Get("security_token").(string), "sts_token")
|
||||
region := getBackendConfig(d.Get("region").(string), "region_id")
|
||||
|
||||
endpoint := d.Get("endpoint").(string)
|
||||
schma := "https"
|
||||
|
||||
roleArn := getBackendConfig("", "ram_role_arn")
|
||||
sessionName := getBackendConfig("", "ram_session_name")
|
||||
var policy string
|
||||
var sessionExpiration int
|
||||
expiredSeconds, err := getConfigFromProfile(d, "expired_seconds")
|
||||
if err == nil && expiredSeconds != nil {
|
||||
sessionExpiration = (int)(expiredSeconds.(float64))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("assume_role"); ok {
|
||||
for _, v := range v.(*schema.Set).List() {
|
||||
assumeRole := v.(map[string]interface{})
|
||||
if assumeRole["role_arn"].(string) != "" {
|
||||
roleArn = assumeRole["role_arn"].(string)
|
||||
}
|
||||
if assumeRole["session_name"].(string) != "" {
|
||||
sessionName = assumeRole["session_name"].(string)
|
||||
}
|
||||
if sessionName == "" {
|
||||
sessionName = "terraform"
|
||||
}
|
||||
policy = assumeRole["policy"].(string)
|
||||
sessionExpiration = assumeRole["session_expiration"].(int)
|
||||
if sessionExpiration == 0 {
|
||||
if v := os.Getenv("ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION"); v != "" {
|
||||
if expiredSeconds, err := strconv.Atoi(v); err == nil {
|
||||
sessionExpiration = expiredSeconds
|
||||
}
|
||||
}
|
||||
if sessionExpiration == 0 {
|
||||
sessionExpiration = 3600
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if roleArn != "" {
|
||||
subAccessKeyId, subAccessKeySecret, subSecurityToken, err := getAssumeRoleAK(accessKey, secretKey, region, roleArn, sessionName, policy, sessionExpiration)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
accessKey, secretKey, securityToken = subAccessKeyId, subAccessKeySecret, subSecurityToken
|
||||
}
|
||||
|
||||
if endpoint == "" {
|
||||
endpointItem, _ := b.getOSSEndpointByRegion(accessKey, secretKey, securityToken, region)
|
||||
if endpointItem != nil && len(endpointItem.Endpoint) > 0 {
|
||||
if len(endpointItem.Protocols.Protocols) > 0 {
|
||||
// HTTP or HTTPS
|
||||
schma = strings.ToLower(endpointItem.Protocols.Protocols[0])
|
||||
for _, p := range endpointItem.Protocols.Protocols {
|
||||
if strings.ToLower(p) == "https" {
|
||||
schma = strings.ToLower(p)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
endpoint = endpointItem.Endpoint
|
||||
} else {
|
||||
endpoint = fmt.Sprintf("oss-%s.aliyuncs.com", region)
|
||||
}
|
||||
}
|
||||
if !strings.HasPrefix(endpoint, "http") {
|
||||
endpoint = fmt.Sprintf("%s://%s", schma, endpoint)
|
||||
}
|
||||
log.Printf("[DEBUG] Instantiate OSS client using endpoint: %#v", endpoint)
|
||||
var options []oss.ClientOption
|
||||
if securityToken != "" {
|
||||
options = append(options, oss.SecurityToken(securityToken))
|
||||
}
|
||||
options = append(options, oss.UserAgent(fmt.Sprintf("%s/%s", TerraformUA, TerraformVersion)))
|
||||
|
||||
client, err := oss.New(endpoint, accessKey, secretKey, options...)
|
||||
b.ossClient = client
|
||||
otsEndpoint := d.Get("tablestore_endpoint").(string)
|
||||
if otsEndpoint != "" {
|
||||
if !strings.HasPrefix(otsEndpoint, "http") {
|
||||
otsEndpoint = fmt.Sprintf("%s://%s", schma, otsEndpoint)
|
||||
}
|
||||
b.otsEndpoint = otsEndpoint
|
||||
parts := strings.Split(strings.TrimPrefix(strings.TrimPrefix(otsEndpoint, "https://"), "http://"), ".")
|
||||
b.otsClient = tablestore.NewClientWithConfig(otsEndpoint, parts[0], accessKey, secretKey, securityToken, tablestore.NewDefaultTableStoreConfig())
|
||||
}
|
||||
b.otsTable = d.Get("tablestore_table").(string)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *Backend) getOSSEndpointByRegion(access_key, secret_key, security_token, region string) (*location.DescribeEndpointResponse, error) {
|
||||
args := location.CreateDescribeEndpointRequest()
|
||||
args.ServiceCode = "oss"
|
||||
args.Id = region
|
||||
args.Domain = "location-readonly.aliyuncs.com"
|
||||
|
||||
locationClient, err := location.NewClientWithOptions(region, getSdkConfig(), credentials.NewStsTokenCredential(access_key, secret_key, security_token))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to initialize the location client: %#v", err)
|
||||
|
||||
}
|
||||
locationClient.AppendUserAgent(TerraformUA, TerraformVersion)
|
||||
endpointsResponse, err := locationClient.DescribeEndpoint(args)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Describe oss endpoint using region: %#v got an error: %#v.", region, err)
|
||||
}
|
||||
return endpointsResponse, nil
|
||||
}
|
||||
|
||||
func getAssumeRoleAK(accessKey, secretKey, region, roleArn, sessionName, policy string, sessionExpiration int) (string, string, string, error) {
|
||||
request := sts.CreateAssumeRoleRequest()
|
||||
request.RoleArn = roleArn
|
||||
request.RoleSessionName = sessionName
|
||||
request.DurationSeconds = requests.NewInteger(sessionExpiration)
|
||||
request.Policy = policy
|
||||
request.Scheme = "https"
|
||||
|
||||
client, err := sts.NewClientWithAccessKey(region, accessKey, secretKey)
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
}
|
||||
response, err := client.AssumeRole(request)
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
}
|
||||
return response.Credentials.AccessKeyId, response.Credentials.AccessKeySecret, response.Credentials.SecurityToken, nil
|
||||
}
|
||||
|
||||
func getSdkConfig() *sdk.Config {
|
||||
return sdk.NewConfig().
|
||||
WithMaxRetryTime(5).
|
||||
WithTimeout(time.Duration(30) * time.Second).
|
||||
WithGoRoutinePoolSize(10).
|
||||
WithDebug(false).
|
||||
WithHttpTransport(getTransport()).
|
||||
WithScheme("HTTPS")
|
||||
}
|
||||
|
||||
func getTransport() *http.Transport {
|
||||
handshakeTimeout, err := strconv.Atoi(os.Getenv("TLSHandshakeTimeout"))
|
||||
if err != nil {
|
||||
handshakeTimeout = 120
|
||||
}
|
||||
transport := cleanhttp.DefaultTransport()
|
||||
transport.TLSHandshakeTimeout = time.Duration(handshakeTimeout) * time.Second
|
||||
transport.Proxy = http.ProxyFromEnvironment
|
||||
return transport
|
||||
}
|
||||
|
||||
type Invoker struct {
|
||||
catchers []*Catcher
|
||||
}
|
||||
|
||||
type Catcher struct {
|
||||
Reason string
|
||||
RetryCount int
|
||||
RetryWaitSeconds int
|
||||
}
|
||||
|
||||
const TerraformUA = "HashiCorp-Terraform"
|
||||
|
||||
var TerraformVersion = strings.TrimSuffix(version.String(), "-dev")
|
||||
var ClientErrorCatcher = Catcher{"AliyunGoClientFailure", 10, 3}
|
||||
var ServiceBusyCatcher = Catcher{"ServiceUnavailable", 10, 3}
|
||||
|
||||
func NewInvoker() Invoker {
|
||||
i := Invoker{}
|
||||
i.AddCatcher(ClientErrorCatcher)
|
||||
i.AddCatcher(ServiceBusyCatcher)
|
||||
return i
|
||||
}
|
||||
|
||||
func (a *Invoker) AddCatcher(catcher Catcher) {
|
||||
a.catchers = append(a.catchers, &catcher)
|
||||
}
|
||||
|
||||
func (a *Invoker) Run(f func() error) error {
|
||||
err := f()
|
||||
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, catcher := range a.catchers {
|
||||
if strings.Contains(err.Error(), catcher.Reason) {
|
||||
catcher.RetryCount--
|
||||
|
||||
if catcher.RetryCount <= 0 {
|
||||
return fmt.Errorf("Retry timeout and got an error: %#v.", err)
|
||||
} else {
|
||||
time.Sleep(time.Duration(catcher.RetryWaitSeconds) * time.Second)
|
||||
return a.Run(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
var providerConfig map[string]interface{}
|
||||
|
||||
func getConfigFromProfile(d *schema.ResourceData, ProfileKey string) (interface{}, error) {
|
||||
|
||||
if providerConfig == nil {
|
||||
if v, ok := d.GetOk("profile"); !ok || v.(string) == "" {
|
||||
return nil, nil
|
||||
}
|
||||
current := d.Get("profile").(string)
|
||||
profilePath := d.Get("shared_credentials_file").(string)
|
||||
if profilePath == "" {
|
||||
profilePath = fmt.Sprintf("%s/.aliyun/config.json", os.Getenv("HOME"))
|
||||
if runtime.GOOS == "windows" {
|
||||
profilePath = fmt.Sprintf("%s/.aliyun/config.json", os.Getenv("USERPROFILE"))
|
||||
}
|
||||
}
|
||||
providerConfig = make(map[string]interface{})
|
||||
_, err := os.Stat(profilePath)
|
||||
if !os.IsNotExist(err) {
|
||||
data, err := ioutil.ReadFile(profilePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config := map[string]interface{}{}
|
||||
err = json.Unmarshal(data, &config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, v := range config["profiles"].([]interface{}) {
|
||||
if current == v.(map[string]interface{})["name"] {
|
||||
providerConfig = v.(map[string]interface{})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mode := ""
|
||||
if v, ok := providerConfig["mode"]; ok {
|
||||
mode = v.(string)
|
||||
} else {
|
||||
return v, nil
|
||||
}
|
||||
switch ProfileKey {
|
||||
case "access_key_id", "access_key_secret":
|
||||
if mode == "EcsRamRole" {
|
||||
return "", nil
|
||||
}
|
||||
case "ram_role_name":
|
||||
if mode != "EcsRamRole" {
|
||||
return "", nil
|
||||
}
|
||||
case "sts_token":
|
||||
if mode != "StsToken" {
|
||||
return "", nil
|
||||
}
|
||||
case "ram_role_arn", "ram_session_name":
|
||||
if mode != "RamRoleArn" {
|
||||
return "", nil
|
||||
}
|
||||
case "expired_seconds":
|
||||
if mode != "RamRoleArn" {
|
||||
return float64(0), nil
|
||||
}
|
||||
}
|
||||
|
||||
return providerConfig[ProfileKey], nil
|
||||
}
|
|
@ -0,0 +1,199 @@
|
|||
package oss
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states"
|
||||
|
||||
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore"
|
||||
"log"
|
||||
"path"
|
||||
)
|
||||
|
||||
const (
|
||||
lockFileSuffix = ".tflock"
|
||||
)
|
||||
|
||||
// get a remote client configured for this state
|
||||
func (b *Backend) remoteClient(name string) (*RemoteClient, error) {
|
||||
if name == "" {
|
||||
return nil, errors.New("missing state name")
|
||||
}
|
||||
|
||||
client := &RemoteClient{
|
||||
ossClient: b.ossClient,
|
||||
bucketName: b.bucketName,
|
||||
stateFile: b.stateFile(name),
|
||||
lockFile: b.lockFile(name),
|
||||
serverSideEncryption: b.serverSideEncryption,
|
||||
acl: b.acl,
|
||||
otsTable: b.otsTable,
|
||||
otsClient: b.otsClient,
|
||||
}
|
||||
if b.otsEndpoint != "" && b.otsTable != "" {
|
||||
table, err := b.otsClient.DescribeTable(&tablestore.DescribeTableRequest{
|
||||
TableName: b.otsTable,
|
||||
})
|
||||
if err != nil {
|
||||
return client, fmt.Errorf("Error describing table store %s: %#v", b.otsTable, err)
|
||||
}
|
||||
for _, t := range table.TableMeta.SchemaEntry {
|
||||
pkMeta := TableStorePrimaryKeyMeta{
|
||||
PKName: *t.Name,
|
||||
}
|
||||
if *t.Type == tablestore.PrimaryKeyType_INTEGER {
|
||||
pkMeta.PKType = "Integer"
|
||||
} else if *t.Type == tablestore.PrimaryKeyType_STRING {
|
||||
pkMeta.PKType = "String"
|
||||
} else if *t.Type == tablestore.PrimaryKeyType_BINARY {
|
||||
pkMeta.PKType = "Binary"
|
||||
} else {
|
||||
return client, fmt.Errorf("Unsupported PrimaryKey type: %d.", *t.Type)
|
||||
}
|
||||
client.otsTabkePK = pkMeta
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (b *Backend) Workspaces() ([]string, error) {
|
||||
bucket, err := b.ossClient.Bucket(b.bucketName)
|
||||
if err != nil {
|
||||
return []string{""}, fmt.Errorf("Error getting bucket: %#v", err)
|
||||
}
|
||||
|
||||
var options []oss.Option
|
||||
options = append(options, oss.Prefix(b.statePrefix+"/"))
|
||||
resp, err := bucket.ListObjects(options...)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := []string{backend.DefaultStateName}
|
||||
prefix := b.statePrefix
|
||||
for _, obj := range resp.Objects {
|
||||
// we have 3 parts, the state prefix, the workspace name, and the state file: <prefix>/<worksapce-name>/<key>
|
||||
if path.Join(b.statePrefix, b.stateKey) == obj.Key {
|
||||
// filter the default workspace
|
||||
continue
|
||||
}
|
||||
|
||||
parts := strings.Split(strings.TrimPrefix(obj.Key, prefix+"/"), "/")
|
||||
if len(parts) > 0 && parts[0] != "" {
|
||||
result = append(result, parts[0])
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(result[1:])
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (b *Backend) DeleteWorkspace(name string) error {
|
||||
if name == backend.DefaultStateName || name == "" {
|
||||
return fmt.Errorf("can't delete default state")
|
||||
}
|
||||
|
||||
client, err := b.remoteClient(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return client.Delete()
|
||||
}
|
||||
|
||||
func (b *Backend) StateMgr(name string) (state.State, error) {
|
||||
client, err := b.remoteClient(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateMgr := &remote.State{Client: client}
|
||||
|
||||
// Check to see if this state already exists.
|
||||
existing, err := b.Workspaces()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Current workspace name: %s. All workspaces:%#v", name, existing)
|
||||
|
||||
exists := false
|
||||
for _, s := range existing {
|
||||
if s == name {
|
||||
exists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// We need to create the object so it's listed by States.
|
||||
if !exists {
|
||||
// take a lock on this state while we write it
|
||||
lockInfo := state.NewLockInfo()
|
||||
lockInfo.Operation = "init"
|
||||
lockId, err := client.Lock(lockInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to lock OSS state: %s", err)
|
||||
}
|
||||
|
||||
// Local helper function so we can call it multiple places
|
||||
lockUnlock := func(e error) error {
|
||||
if err := stateMgr.Unlock(lockId); err != nil {
|
||||
return fmt.Errorf(strings.TrimSpace(stateUnlockError), lockId, err)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// Grab the value
|
||||
if err := stateMgr.RefreshState(); err != nil {
|
||||
err = lockUnlock(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If we have no state, we have to create an empty state
|
||||
if v := stateMgr.State(); v == nil {
|
||||
if err := stateMgr.WriteState(states.NewState()); err != nil {
|
||||
err = lockUnlock(err)
|
||||
return nil, err
|
||||
}
|
||||
if err := stateMgr.PersistState(); err != nil {
|
||||
err = lockUnlock(err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock, the state should now be initialized
|
||||
if err := lockUnlock(nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
return stateMgr, nil
|
||||
}
|
||||
|
||||
func (b *Backend) stateFile(name string) string {
|
||||
if name == backend.DefaultStateName {
|
||||
return path.Join(b.statePrefix, b.stateKey)
|
||||
}
|
||||
return path.Join(b.statePrefix, name, b.stateKey)
|
||||
}
|
||||
|
||||
func (b *Backend) lockFile(name string) string {
|
||||
return b.stateFile(name) + lockFileSuffix
|
||||
}
|
||||
|
||||
const stateUnlockError = `
|
||||
Error unlocking Alibaba Cloud OSS state file:
|
||||
|
||||
Lock ID: %s
|
||||
Error message: %#v
|
||||
|
||||
You may have to force-unlock this state in order to use it again.
|
||||
The Alibaba Cloud backend acquires a lock during initialization to ensure the initial state file is created.
|
||||
`
|
|
@ -0,0 +1,206 @@
|
|||
package oss
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/configs/hcl2shim"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// verify that we are doing ACC tests or the OSS tests specifically
|
||||
func testACC(t *testing.T) {
|
||||
skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_OSS_TEST") == ""
|
||||
if skip {
|
||||
t.Log("oss backend tests require setting TF_ACC or TF_OSS_TEST")
|
||||
t.Skip()
|
||||
}
|
||||
if os.Getenv("ALICLOUD_REGION") == "" {
|
||||
os.Setenv("ALICLOUD_REGION", "cn-beijing")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackend_impl(t *testing.T) {
|
||||
var _ backend.Backend = new(Backend)
|
||||
}
|
||||
|
||||
func TestBackendConfig(t *testing.T) {
|
||||
testACC(t)
|
||||
config := map[string]interface{}{
|
||||
"region": "cn-beijing",
|
||||
"bucket": "terraform-backend-oss-test",
|
||||
"prefix": "mystate",
|
||||
"key": "first.tfstate",
|
||||
"tablestore_endpoint": "https://terraformstate.cn-beijing.ots.aliyuncs.com",
|
||||
"tablestore_table": "TableStore",
|
||||
}
|
||||
|
||||
b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)).(*Backend)
|
||||
|
||||
if !strings.HasPrefix(b.ossClient.Config.Endpoint, "https://oss-cn-beijing") {
|
||||
t.Fatalf("Incorrect region was provided")
|
||||
}
|
||||
if b.bucketName != "terraform-backend-oss-test" {
|
||||
t.Fatalf("Incorrect bucketName was provided")
|
||||
}
|
||||
if b.statePrefix != "mystate" {
|
||||
t.Fatalf("Incorrect state file path was provided")
|
||||
}
|
||||
if b.stateKey != "first.tfstate" {
|
||||
t.Fatalf("Incorrect keyName was provided")
|
||||
}
|
||||
|
||||
if b.ossClient.Config.AccessKeyID == "" {
|
||||
t.Fatalf("No Access Key Id was provided")
|
||||
}
|
||||
if b.ossClient.Config.AccessKeySecret == "" {
|
||||
t.Fatalf("No Secret Access Key was provided")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendConfigProfile(t *testing.T) {
|
||||
testACC(t)
|
||||
config := map[string]interface{}{
|
||||
"region": "cn-beijing",
|
||||
"bucket": "terraform-backend-oss-test",
|
||||
"prefix": "mystate",
|
||||
"key": "first.tfstate",
|
||||
"tablestore_endpoint": "https://terraformstate.cn-beijing.ots.aliyuncs.com",
|
||||
"tablestore_table": "TableStore",
|
||||
"profile": "default",
|
||||
}
|
||||
|
||||
b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)).(*Backend)
|
||||
|
||||
if !strings.HasPrefix(b.ossClient.Config.Endpoint, "https://oss-cn-beijing") {
|
||||
t.Fatalf("Incorrect region was provided")
|
||||
}
|
||||
if b.bucketName != "terraform-backend-oss-test" {
|
||||
t.Fatalf("Incorrect bucketName was provided")
|
||||
}
|
||||
if b.statePrefix != "mystate" {
|
||||
t.Fatalf("Incorrect state file path was provided")
|
||||
}
|
||||
if b.stateKey != "first.tfstate" {
|
||||
t.Fatalf("Incorrect keyName was provided")
|
||||
}
|
||||
|
||||
if b.ossClient.Config.AccessKeyID == "" {
|
||||
t.Fatalf("No Access Key Id was provided")
|
||||
}
|
||||
if b.ossClient.Config.AccessKeySecret == "" {
|
||||
t.Fatalf("No Secret Access Key was provided")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendConfig_invalidKey(t *testing.T) {
|
||||
testACC(t)
|
||||
cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{
|
||||
"region": "cn-beijing",
|
||||
"bucket": "terraform-backend-oss-test",
|
||||
"prefix": "/leading-slash",
|
||||
"name": "/test.tfstate",
|
||||
"tablestore_endpoint": "https://terraformstate.cn-beijing.ots.aliyuncs.com",
|
||||
"tablestore_table": "TableStore",
|
||||
})
|
||||
|
||||
_, results := New().PrepareConfig(cfg)
|
||||
if !results.HasErrors() {
|
||||
t.Fatal("expected config validation error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackend(t *testing.T) {
|
||||
testACC(t)
|
||||
|
||||
bucketName := fmt.Sprintf("terraform-remote-oss-test-%x", time.Now().Unix())
|
||||
statePrefix := "multi/level/path/"
|
||||
|
||||
b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"bucket": bucketName,
|
||||
"prefix": statePrefix,
|
||||
})).(*Backend)
|
||||
|
||||
b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"bucket": bucketName,
|
||||
"prefix": statePrefix,
|
||||
})).(*Backend)
|
||||
|
||||
createOSSBucket(t, b1.ossClient, bucketName)
|
||||
defer deleteOSSBucket(t, b1.ossClient, bucketName)
|
||||
|
||||
backend.TestBackendStates(t, b1)
|
||||
backend.TestBackendStateLocks(t, b1, b2)
|
||||
backend.TestBackendStateForceUnlock(t, b1, b2)
|
||||
}
|
||||
|
||||
func createOSSBucket(t *testing.T, ossClient *oss.Client, bucketName string) {
|
||||
// Be clear about what we're doing in case the user needs to clean this up later.
|
||||
if err := ossClient.CreateBucket(bucketName); err != nil {
|
||||
t.Fatal("failed to create test OSS bucket:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func deleteOSSBucket(t *testing.T, ossClient *oss.Client, bucketName string) {
|
||||
warning := "WARNING: Failed to delete the test OSS bucket. It may have been left in your Alibaba Cloud account and may incur storage charges. (error was %s)"
|
||||
|
||||
// first we have to get rid of the env objects, or we can't delete the bucket
|
||||
bucket, err := ossClient.Bucket(bucketName)
|
||||
if err != nil {
|
||||
t.Fatal("Error getting bucket:", err)
|
||||
return
|
||||
}
|
||||
objects, err := bucket.ListObjects()
|
||||
if err != nil {
|
||||
t.Logf(warning, err)
|
||||
return
|
||||
}
|
||||
for _, obj := range objects.Objects {
|
||||
if err := bucket.DeleteObject(obj.Key); err != nil {
|
||||
// this will need cleanup no matter what, so just warn and exit
|
||||
t.Logf(warning, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := ossClient.DeleteBucket(bucketName); err != nil {
|
||||
t.Logf(warning, err)
|
||||
}
|
||||
}
|
||||
|
||||
// create the dynamoDB table, and wait until we can query it.
|
||||
func createTablestoreTable(t *testing.T, otsClient *tablestore.TableStoreClient, tableName string) {
|
||||
tableMeta := new(tablestore.TableMeta)
|
||||
tableMeta.TableName = tableName
|
||||
tableMeta.AddPrimaryKeyColumn("testbackend", tablestore.PrimaryKeyType_STRING)
|
||||
|
||||
tableOption := new(tablestore.TableOption)
|
||||
tableOption.TimeToAlive = -1
|
||||
tableOption.MaxVersion = 1
|
||||
|
||||
reservedThroughput := new(tablestore.ReservedThroughput)
|
||||
|
||||
_, err := otsClient.CreateTable(&tablestore.CreateTableRequest{
|
||||
TableMeta: tableMeta,
|
||||
TableOption: tableOption,
|
||||
ReservedThroughput: reservedThroughput,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func deleteTablestoreTable(t *testing.T, otsClient *tablestore.TableStoreClient, tableName string) {
|
||||
params := &tablestore.DeleteTableRequest{
|
||||
TableName: tableName,
|
||||
}
|
||||
_, err := otsClient.DeleteTable(params)
|
||||
if err != nil {
|
||||
t.Logf("WARNING: Failed to delete the test TableStore table %q. It has been left in your Alibaba Cloud account and may incur charges. (error was %s)", tableName, err)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,484 @@
|
|||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"encoding/hex"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
uuid "github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/terraform/helper/hashcode"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/pkg/errors"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Store the last saved serial in tablestore with this suffix for consistency checks.
|
||||
const (
|
||||
stateIDSuffix = "-md5"
|
||||
statePKValue = "terraform-remote-state-lock"
|
||||
)
|
||||
|
||||
var (
|
||||
// The amount of time we will retry a state waiting for it to match the
|
||||
// expected checksum.
|
||||
consistencyRetryTimeout = 10 * time.Second
|
||||
|
||||
// delay when polling the state
|
||||
consistencyRetryPollInterval = 2 * time.Second
|
||||
)
|
||||
|
||||
// test hook called when checksums don't match
|
||||
var testChecksumHook func()
|
||||
|
||||
type TableStorePrimaryKeyMeta struct {
|
||||
PKName string
|
||||
PKType string
|
||||
}
|
||||
|
||||
type RemoteClient struct {
|
||||
ossClient *oss.Client
|
||||
otsClient *tablestore.TableStoreClient
|
||||
bucketName string
|
||||
stateFile string
|
||||
lockFile string
|
||||
serverSideEncryption bool
|
||||
acl string
|
||||
info *state.LockInfo
|
||||
mu sync.Mutex
|
||||
otsTable string
|
||||
otsTabkePK TableStorePrimaryKeyMeta
|
||||
}
|
||||
|
||||
func (c *RemoteClient) Get() (payload *remote.Payload, err error) {
|
||||
deadline := time.Now().Add(consistencyRetryTimeout)
|
||||
|
||||
// If we have a checksum, and the returned payload doesn't match, we retry
|
||||
// up until deadline.
|
||||
for {
|
||||
payload, err = c.getObj()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the remote state was manually removed the payload will be nil,
|
||||
// but if there's still a digest entry for that state we will still try
|
||||
// to compare the MD5 below.
|
||||
var digest []byte
|
||||
if payload != nil {
|
||||
digest = payload.MD5
|
||||
}
|
||||
|
||||
// verify that this state is what we expect
|
||||
if expected, err := c.getMD5(); err != nil {
|
||||
log.Printf("[WARN] failed to fetch state md5: %s", err)
|
||||
} else if len(expected) > 0 && !bytes.Equal(expected, digest) {
|
||||
log.Printf("[WARN] state md5 mismatch: expected '%x', got '%x'", expected, digest)
|
||||
|
||||
if testChecksumHook != nil {
|
||||
testChecksumHook()
|
||||
}
|
||||
|
||||
if time.Now().Before(deadline) {
|
||||
time.Sleep(consistencyRetryPollInterval)
|
||||
log.Println("[INFO] retrying OSS RemoteClient.Get...")
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf(errBadChecksumFmt, digest)
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
func (c *RemoteClient) Put(data []byte) error {
|
||||
bucket, err := c.ossClient.Bucket(c.bucketName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting bucket: %#v", err)
|
||||
}
|
||||
|
||||
body := bytes.NewReader(data)
|
||||
|
||||
var options []oss.Option
|
||||
if c.acl != "" {
|
||||
options = append(options, oss.ACL(oss.ACLType(c.acl)))
|
||||
}
|
||||
options = append(options, oss.ContentType("application/json"))
|
||||
if c.serverSideEncryption {
|
||||
options = append(options, oss.ServerSideEncryption("AES256"))
|
||||
}
|
||||
options = append(options, oss.ContentLength(int64(len(data))))
|
||||
|
||||
if body != nil {
|
||||
if err := bucket.PutObject(c.stateFile, body, options...); err != nil {
|
||||
return fmt.Errorf("Failed to upload state %s: %#v", c.stateFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
sum := md5.Sum(data)
|
||||
if err := c.putMD5(sum[:]); err != nil {
|
||||
// if this errors out, we unfortunately have to error out altogether,
|
||||
// since the next Get will inevitably fail.
|
||||
return fmt.Errorf("Failed to store state MD5: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *RemoteClient) Delete() error {
|
||||
bucket, err := c.ossClient.Bucket(c.bucketName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting bucket %s: %#v", c.bucketName, err)
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Deleting remote state from OSS: %#v", c.stateFile)
|
||||
|
||||
if err := bucket.DeleteObject(c.stateFile); err != nil {
|
||||
return fmt.Errorf("Error deleting state %s: %#v", c.stateFile, err)
|
||||
}
|
||||
|
||||
if err := c.deleteMD5(); err != nil {
|
||||
log.Printf("[WARN] Error deleting state MD5: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) {
|
||||
if c.otsTable == "" {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if info.ID == "" {
|
||||
lockID, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
info.ID = lockID
|
||||
}
|
||||
|
||||
putParams := &tablestore.PutRowChange{
|
||||
TableName: c.otsTable,
|
||||
PrimaryKey: &tablestore.PrimaryKey{
|
||||
PrimaryKeys: []*tablestore.PrimaryKeyColumn{
|
||||
{
|
||||
ColumnName: c.otsTabkePK.PKName,
|
||||
Value: c.getPKValue(),
|
||||
},
|
||||
},
|
||||
},
|
||||
Columns: []tablestore.AttributeColumn{
|
||||
{
|
||||
ColumnName: "LockID",
|
||||
Value: c.lockFile,
|
||||
},
|
||||
{
|
||||
ColumnName: "Info",
|
||||
Value: string(info.Marshal()),
|
||||
},
|
||||
},
|
||||
Condition: &tablestore.RowCondition{
|
||||
RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_NOT_EXIST,
|
||||
},
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Recoring state lock in tablestore: %#v", putParams)
|
||||
|
||||
_, err := c.otsClient.PutRow(&tablestore.PutRowRequest{
|
||||
PutRowChange: putParams,
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("[WARN] Error storing state lock in tablestore: %#v", err)
|
||||
lockInfo, infoErr := c.getLockInfo()
|
||||
if infoErr != nil {
|
||||
log.Printf("[WARN] Error getting lock info: %#v", err)
|
||||
err = multierror.Append(err, infoErr)
|
||||
}
|
||||
lockErr := &state.LockError{
|
||||
Err: err,
|
||||
Info: lockInfo,
|
||||
}
|
||||
log.Printf("[WARN] state lock error: %#v", lockErr)
|
||||
return "", lockErr
|
||||
}
|
||||
|
||||
return info.ID, nil
|
||||
}
|
||||
|
||||
func (c *RemoteClient) getMD5() ([]byte, error) {
|
||||
if c.otsTable == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
getParams := &tablestore.SingleRowQueryCriteria{
|
||||
TableName: c.otsTable,
|
||||
PrimaryKey: &tablestore.PrimaryKey{
|
||||
PrimaryKeys: []*tablestore.PrimaryKeyColumn{
|
||||
{
|
||||
ColumnName: c.otsTabkePK.PKName,
|
||||
Value: c.getPKValue(),
|
||||
},
|
||||
},
|
||||
},
|
||||
ColumnsToGet: []string{"LockID", "Digest"},
|
||||
MaxVersion: 1,
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Retrieving state serial in tablestore: %#v", getParams)
|
||||
|
||||
object, err := c.otsClient.GetRow(&tablestore.GetRowRequest{
|
||||
SingleRowQueryCriteria: getParams,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var val string
|
||||
if v, ok := object.GetColumnMap().Columns["Digest"]; ok && len(v) > 0 {
|
||||
val = v[0].Value.(string)
|
||||
}
|
||||
|
||||
sum, err := hex.DecodeString(val)
|
||||
if err != nil || len(sum) != md5.Size {
|
||||
return nil, errors.New("invalid md5")
|
||||
}
|
||||
|
||||
return sum, nil
|
||||
}
|
||||
|
||||
// store the hash of the state to that clients can check for stale state files.
|
||||
func (c *RemoteClient) putMD5(sum []byte) error {
|
||||
if c.otsTable == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(sum) != md5.Size {
|
||||
return errors.New("invalid payload md5")
|
||||
}
|
||||
|
||||
putParams := &tablestore.PutRowChange{
|
||||
TableName: c.otsTable,
|
||||
PrimaryKey: &tablestore.PrimaryKey{
|
||||
PrimaryKeys: []*tablestore.PrimaryKeyColumn{
|
||||
{
|
||||
ColumnName: c.otsTabkePK.PKName,
|
||||
Value: c.getPKValue(),
|
||||
},
|
||||
},
|
||||
},
|
||||
Columns: []tablestore.AttributeColumn{
|
||||
{
|
||||
ColumnName: "LockID",
|
||||
Value: c.lockPath() + stateIDSuffix,
|
||||
},
|
||||
{
|
||||
ColumnName: "Digest",
|
||||
Value: hex.EncodeToString(sum),
|
||||
},
|
||||
},
|
||||
Condition: &tablestore.RowCondition{
|
||||
RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_NOT_EXIST,
|
||||
},
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Recoring state serial in tablestore: %#v", putParams)
|
||||
|
||||
_, err := c.otsClient.PutRow(&tablestore.PutRowRequest{
|
||||
PutRowChange: putParams,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Printf("[WARN] failed to record state serial in tablestore: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// remove the hash value for a deleted state
|
||||
func (c *RemoteClient) deleteMD5() error {
|
||||
if c.otsTable == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
params := &tablestore.DeleteRowRequest{
|
||||
DeleteRowChange: &tablestore.DeleteRowChange{
|
||||
TableName: c.otsTable,
|
||||
PrimaryKey: &tablestore.PrimaryKey{
|
||||
PrimaryKeys: []*tablestore.PrimaryKeyColumn{
|
||||
{
|
||||
ColumnName: c.otsTabkePK.PKName,
|
||||
Value: c.getPKValue(),
|
||||
},
|
||||
},
|
||||
},
|
||||
Condition: &tablestore.RowCondition{
|
||||
RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_EXIST,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Deleting state serial in tablestore: %#v", params)
|
||||
|
||||
if _, err := c.otsClient.DeleteRow(params); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) {
|
||||
getParams := &tablestore.SingleRowQueryCriteria{
|
||||
TableName: c.otsTable,
|
||||
PrimaryKey: &tablestore.PrimaryKey{
|
||||
PrimaryKeys: []*tablestore.PrimaryKeyColumn{
|
||||
{
|
||||
ColumnName: c.otsTabkePK.PKName,
|
||||
Value: c.getPKValue(),
|
||||
},
|
||||
},
|
||||
},
|
||||
ColumnsToGet: []string{"LockID", "Info"},
|
||||
MaxVersion: 1,
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Retrieving state lock info from tablestore: %#v", getParams)
|
||||
|
||||
object, err := c.otsClient.GetRow(&tablestore.GetRowRequest{
|
||||
SingleRowQueryCriteria: getParams,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var infoData string
|
||||
if v, ok := object.GetColumnMap().Columns["Info"]; ok && len(v) > 0 {
|
||||
infoData = v[0].Value.(string)
|
||||
}
|
||||
lockInfo := &state.LockInfo{}
|
||||
err = json.Unmarshal([]byte(infoData), lockInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return lockInfo, nil
|
||||
}
|
||||
func (c *RemoteClient) Unlock(id string) error {
|
||||
if c.otsTable == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
lockErr := &state.LockError{}
|
||||
|
||||
lockInfo, err := c.getLockInfo()
|
||||
if err != nil {
|
||||
lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err)
|
||||
return lockErr
|
||||
}
|
||||
lockErr.Info = lockInfo
|
||||
|
||||
if lockInfo.ID != id {
|
||||
lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id)
|
||||
return lockErr
|
||||
}
|
||||
params := &tablestore.DeleteRowRequest{
|
||||
DeleteRowChange: &tablestore.DeleteRowChange{
|
||||
TableName: c.otsTable,
|
||||
PrimaryKey: &tablestore.PrimaryKey{
|
||||
PrimaryKeys: []*tablestore.PrimaryKeyColumn{
|
||||
{
|
||||
ColumnName: c.otsTabkePK.PKName,
|
||||
Value: c.getPKValue(),
|
||||
},
|
||||
},
|
||||
},
|
||||
Condition: &tablestore.RowCondition{
|
||||
RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_EXIST,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Deleting state lock from tablestore: %#v", params)
|
||||
|
||||
_, err = c.otsClient.DeleteRow(params)
|
||||
|
||||
if err != nil {
|
||||
lockErr.Err = err
|
||||
return lockErr
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *RemoteClient) lockPath() string {
|
||||
return fmt.Sprintf("%s/%s", c.bucketName, c.stateFile)
|
||||
}
|
||||
|
||||
func (c *RemoteClient) getObj() (*remote.Payload, error) {
|
||||
bucket, err := c.ossClient.Bucket(c.bucketName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error getting bucket %s: %#v", c.bucketName, err)
|
||||
}
|
||||
|
||||
if exist, err := bucket.IsObjectExist(c.stateFile); err != nil {
|
||||
return nil, fmt.Errorf("Estimating object %s is exist got an error: %#v", c.stateFile, err)
|
||||
} else if !exist {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var options []oss.Option
|
||||
output, err := bucket.GetObject(c.stateFile, options...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error getting object: %#v", err)
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
if _, err := io.Copy(buf, output); err != nil {
|
||||
return nil, fmt.Errorf("Failed to read remote state: %s", err)
|
||||
}
|
||||
sum := md5.Sum(buf.Bytes())
|
||||
payload := &remote.Payload{
|
||||
Data: buf.Bytes(),
|
||||
MD5: sum[:],
|
||||
}
|
||||
|
||||
// If there was no data, then return nil
|
||||
if len(payload.Data) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
func (c *RemoteClient) getPKValue() (value interface{}) {
|
||||
value = statePKValue
|
||||
if c.otsTabkePK.PKType == "Integer" {
|
||||
value = hashcode.String(statePKValue)
|
||||
} else if c.otsTabkePK.PKType == "Binary" {
|
||||
value = stringToBin(statePKValue)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func stringToBin(s string) (binString string) {
|
||||
for _, c := range s {
|
||||
binString = fmt.Sprintf("%s%b", binString, c)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const errBadChecksumFmt = `state data in OSS does not have the expected content.
|
||||
|
||||
This may be caused by unusually long delays in OSS processing a previous state
|
||||
update. Please wait for a minute or two and try again. If this problem
|
||||
persists, and neither OSS nor TableStore are experiencing an outage, you may need
|
||||
to manually verify the remote state and update the Digest value stored in the
|
||||
TableStore table to the following value: %x
|
||||
`
|
|
@ -0,0 +1,330 @@
|
|||
package oss
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states/statefile"
|
||||
)
|
||||
|
||||
// NOTE: Before running this testcase, please create a OTS instance called 'tf-oss-remote'
|
||||
var RemoteTestUsedOTSEndpoint = "https://tf-oss-remote.cn-hangzhou.ots.aliyuncs.com"
|
||||
|
||||
func TestRemoteClient_impl(t *testing.T) {
|
||||
var _ remote.Client = new(RemoteClient)
|
||||
var _ remote.ClientLocker = new(RemoteClient)
|
||||
}
|
||||
|
||||
func TestRemoteClient(t *testing.T) {
|
||||
testACC(t)
|
||||
bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix())
|
||||
path := "testState"
|
||||
|
||||
b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"bucket": bucketName,
|
||||
"prefix": path,
|
||||
"encrypt": true,
|
||||
})).(*Backend)
|
||||
|
||||
createOSSBucket(t, b.ossClient, bucketName)
|
||||
defer deleteOSSBucket(t, b.ossClient, bucketName)
|
||||
|
||||
state, err := b.StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
remote.TestClient(t, state.(*remote.State).Client)
|
||||
}
|
||||
|
||||
func TestRemoteClientLocks(t *testing.T) {
|
||||
testACC(t)
|
||||
bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix())
|
||||
tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix())
|
||||
path := "testState"
|
||||
|
||||
b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"bucket": bucketName,
|
||||
"prefix": path,
|
||||
"encrypt": true,
|
||||
"tablestore_table": tableName,
|
||||
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
|
||||
})).(*Backend)
|
||||
|
||||
b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"bucket": bucketName,
|
||||
"prefix": path,
|
||||
"encrypt": true,
|
||||
"tablestore_table": tableName,
|
||||
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
|
||||
})).(*Backend)
|
||||
|
||||
createOSSBucket(t, b1.ossClient, bucketName)
|
||||
defer deleteOSSBucket(t, b1.ossClient, bucketName)
|
||||
createTablestoreTable(t, b1.otsClient, tableName)
|
||||
defer deleteTablestoreTable(t, b1.otsClient, tableName)
|
||||
|
||||
s1, err := b1.StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s2, err := b2.StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client)
|
||||
}
|
||||
|
||||
// verify that we can unlock a state with an existing lock
|
||||
func TestRemoteForceUnlock(t *testing.T) {
|
||||
testACC(t)
|
||||
bucketName := fmt.Sprintf("tf-remote-oss-test-force-%x", time.Now().Unix())
|
||||
tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix())
|
||||
path := "testState"
|
||||
|
||||
b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"bucket": bucketName,
|
||||
"prefix": path,
|
||||
"encrypt": true,
|
||||
"tablestore_table": tableName,
|
||||
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
|
||||
})).(*Backend)
|
||||
|
||||
b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"bucket": bucketName,
|
||||
"prefix": path,
|
||||
"encrypt": true,
|
||||
"tablestore_table": tableName,
|
||||
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
|
||||
})).(*Backend)
|
||||
|
||||
createOSSBucket(t, b1.ossClient, bucketName)
|
||||
defer deleteOSSBucket(t, b1.ossClient, bucketName)
|
||||
createTablestoreTable(t, b1.otsClient, tableName)
|
||||
defer deleteTablestoreTable(t, b1.otsClient, tableName)
|
||||
|
||||
// first test with default
|
||||
s1, err := b1.StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
info := state.NewLockInfo()
|
||||
info.Operation = "test"
|
||||
info.Who = "clientA"
|
||||
|
||||
lockID, err := s1.Lock(info)
|
||||
if err != nil {
|
||||
t.Fatal("unable to get initial lock:", err)
|
||||
}
|
||||
|
||||
// s1 is now locked, get the same state through s2 and unlock it
|
||||
s2, err := b2.StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatal("failed to get default state to force unlock:", err)
|
||||
}
|
||||
|
||||
if err := s2.Unlock(lockID); err != nil {
|
||||
t.Fatal("failed to force-unlock default state")
|
||||
}
|
||||
|
||||
// now try the same thing with a named state
|
||||
// first test with default
|
||||
s1, err = b1.StateMgr("test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
info = state.NewLockInfo()
|
||||
info.Operation = "test"
|
||||
info.Who = "clientA"
|
||||
|
||||
lockID, err = s1.Lock(info)
|
||||
if err != nil {
|
||||
t.Fatal("unable to get initial lock:", err)
|
||||
}
|
||||
|
||||
// s1 is now locked, get the same state through s2 and unlock it
|
||||
s2, err = b2.StateMgr("test")
|
||||
if err != nil {
|
||||
t.Fatal("failed to get named state to force unlock:", err)
|
||||
}
|
||||
|
||||
if err = s2.Unlock(lockID); err != nil {
|
||||
t.Fatal("failed to force-unlock named state")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteClient_clientMD5(t *testing.T) {
|
||||
testACC(t)
|
||||
|
||||
bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix())
|
||||
tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix())
|
||||
path := "testState"
|
||||
|
||||
b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"bucket": bucketName,
|
||||
"prefix": path,
|
||||
"tablestore_table": tableName,
|
||||
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
|
||||
})).(*Backend)
|
||||
|
||||
createOSSBucket(t, b.ossClient, bucketName)
|
||||
defer deleteOSSBucket(t, b.ossClient, bucketName)
|
||||
createTablestoreTable(t, b.otsClient, tableName)
|
||||
defer deleteTablestoreTable(t, b.otsClient, tableName)
|
||||
|
||||
s, err := b.StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
client := s.(*remote.State).Client.(*RemoteClient)
|
||||
|
||||
sum := md5.Sum([]byte("test"))
|
||||
|
||||
if err := client.putMD5(sum[:]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
getSum, err := client.getMD5()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(getSum, sum[:]) {
|
||||
t.Fatalf("getMD5 returned the wrong checksum: expected %x, got %x", sum[:], getSum)
|
||||
}
|
||||
|
||||
if err := client.deleteMD5(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if getSum, err := client.getMD5(); err == nil {
|
||||
t.Fatalf("expected getMD5 error, got none. checksum: %x", getSum)
|
||||
}
|
||||
}
|
||||
|
||||
// verify that a client won't return a state with an incorrect checksum.
|
||||
func TestRemoteClient_stateChecksum(t *testing.T) {
|
||||
testACC(t)
|
||||
|
||||
bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix())
|
||||
tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix())
|
||||
path := "testState"
|
||||
|
||||
b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"bucket": bucketName,
|
||||
"prefix": path,
|
||||
"tablestore_table": tableName,
|
||||
"tablestore_endpoint": RemoteTestUsedOTSEndpoint,
|
||||
})).(*Backend)
|
||||
|
||||
createOSSBucket(t, b1.ossClient, bucketName)
|
||||
defer deleteOSSBucket(t, b1.ossClient, bucketName)
|
||||
createTablestoreTable(t, b1.otsClient, tableName)
|
||||
defer deleteTablestoreTable(t, b1.otsClient, tableName)
|
||||
|
||||
s1, err := b1.StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
client1 := s1.(*remote.State).Client
|
||||
|
||||
// create an old and new state version to persist
|
||||
s := state.TestStateInitial()
|
||||
sf := &statefile.File{State: s}
|
||||
var oldState bytes.Buffer
|
||||
if err := statefile.Write(sf, &oldState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sf.Serial++
|
||||
var newState bytes.Buffer
|
||||
if err := statefile.Write(sf, &newState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Use b2 without a tablestore_table to bypass the lock table to write the state directly.
|
||||
// client2 will write the "incorrect" state, simulating oss eventually consistency delays
|
||||
b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"bucket": bucketName,
|
||||
"prefix": path,
|
||||
})).(*Backend)
|
||||
s2, err := b2.StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
client2 := s2.(*remote.State).Client
|
||||
|
||||
// write the new state through client2 so that there is no checksum yet
|
||||
if err := client2.Put(newState.Bytes()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// verify that we can pull a state without a checksum
|
||||
if _, err := client1.Get(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// write the new state back with its checksum
|
||||
if err := client1.Put(newState.Bytes()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// put an empty state in place to check for panics during get
|
||||
if err := client2.Put([]byte{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// remove the timeouts so we can fail immediately
|
||||
origTimeout := consistencyRetryTimeout
|
||||
origInterval := consistencyRetryPollInterval
|
||||
defer func() {
|
||||
consistencyRetryTimeout = origTimeout
|
||||
consistencyRetryPollInterval = origInterval
|
||||
}()
|
||||
consistencyRetryTimeout = 0
|
||||
consistencyRetryPollInterval = 0
|
||||
|
||||
// fetching an empty state through client1 should now error out due to a
|
||||
// mismatched checksum.
|
||||
if _, err := client1.Get(); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) {
|
||||
t.Fatalf("expected state checksum error: got %s", err)
|
||||
}
|
||||
|
||||
// put the old state in place of the new, without updating the checksum
|
||||
if err := client2.Put(oldState.Bytes()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// fetching the wrong state through client1 should now error out due to a
|
||||
// mismatched checksum.
|
||||
if _, err := client1.Get(); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) {
|
||||
t.Fatalf("expected state checksum error: got %s", err)
|
||||
}
|
||||
|
||||
// update the state with the correct one after we Get again
|
||||
testChecksumHook = func() {
|
||||
if err := client2.Put(newState.Bytes()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testChecksumHook = nil
|
||||
}
|
||||
|
||||
consistencyRetryTimeout = origTimeout
|
||||
|
||||
// this final Get will fail to fail the checksum verification, the above
|
||||
// callback will update the state with the correct version, and Get should
|
||||
// retry automatically.
|
||||
if _, err := client1.Get(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
|
@ -31,6 +31,13 @@ func New() backend.Backend {
|
|||
Description: "Name of the automatically managed Postgres schema to store state",
|
||||
Default: "terraform_remote_state",
|
||||
},
|
||||
|
||||
"skip_schema_creation": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Description: "If set to `true`, Terraform won't try to create the Postgres schema",
|
||||
Default: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -64,9 +71,25 @@ func (b *Backend) configure(ctx context.Context) error {
|
|||
|
||||
// Prepare database schema, tables, & indexes.
|
||||
var query string
|
||||
query = `CREATE SCHEMA IF NOT EXISTS %s`
|
||||
if _, err := db.Exec(fmt.Sprintf(query, b.schemaName)); err != nil {
|
||||
return err
|
||||
|
||||
if !data.Get("skip_schema_creation").(bool) {
|
||||
// list all schemas to see if it exists
|
||||
var count int
|
||||
query = `select count(1) from information_schema.schemata where lower(schema_name) = lower('%s')`
|
||||
if err := db.QueryRow(fmt.Sprintf(query, b.schemaName)).Scan(&count); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// skip schema creation if schema already exists
|
||||
// `CREATE SCHEMA IF NOT EXISTS` is to be avoided if ever
|
||||
// a user hasn't been granted the `CREATE SCHEMA` privilege
|
||||
if count < 1 {
|
||||
// tries to create the schema
|
||||
query = `CREATE SCHEMA IF NOT EXISTS %s`
|
||||
if _, err := db.Exec(fmt.Sprintf(query, b.schemaName)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
query = `CREATE TABLE IF NOT EXISTS %s.%s (
|
||||
id SERIAL PRIMARY KEY,
|
||||
|
|
|
@ -73,6 +73,50 @@ func TestBackendConfig(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestBackendConfigSkipSchema(t *testing.T) {
|
||||
testACC(t)
|
||||
connStr := getDatabaseUrl()
|
||||
schemaName := fmt.Sprintf("terraform_%s", t.Name())
|
||||
db, err := sql.Open("postgres", connStr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// create the schema as a prerequisites
|
||||
db.Query(fmt.Sprintf("CREATE SCHEMA IF NOT EXISTS %s", schemaName))
|
||||
defer db.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName))
|
||||
|
||||
config := backend.TestWrapConfig(map[string]interface{}{
|
||||
"conn_str": connStr,
|
||||
"schema_name": schemaName,
|
||||
"skip_schema_creation": true,
|
||||
})
|
||||
b := backend.TestBackendConfig(t, New(), config).(*Backend)
|
||||
|
||||
if b == nil {
|
||||
t.Fatal("Backend could not be configured")
|
||||
}
|
||||
|
||||
_, err = b.db.Query(fmt.Sprintf("SELECT name, data FROM %s.%s LIMIT 1", schemaName, statesTableName))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = b.StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s, err := b.StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c := s.(*remote.State).Client.(*RemoteClient)
|
||||
if c.Name != backend.DefaultStateName {
|
||||
t.Fatal("RemoteClient name is not configured")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendStates(t *testing.T) {
|
||||
testACC(t)
|
||||
connStr := getDatabaseUrl()
|
||||
|
|
|
@ -2,7 +2,9 @@ package s3
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
|
@ -185,6 +187,21 @@ func New() backend.Backend {
|
|||
Default: false,
|
||||
},
|
||||
|
||||
"sse_customer_key": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The base64-encoded encryption key to use for server-side encryption with customer-provided keys (SSE-C).",
|
||||
DefaultFunc: schema.EnvDefaultFunc("AWS_SSE_CUSTOMER_KEY", ""),
|
||||
Sensitive: true,
|
||||
ValidateFunc: func(v interface{}, s string) ([]string, []error) {
|
||||
key := v.(string)
|
||||
if key != "" && len(key) != 44 {
|
||||
return nil, []error{errors.New("sse_customer_key must be 44 characters in length (256 bits, base64 encoded)")}
|
||||
}
|
||||
return nil, nil
|
||||
},
|
||||
},
|
||||
|
||||
"role_arn": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
|
@ -255,13 +272,14 @@ type Backend struct {
|
|||
s3Client *s3.S3
|
||||
dynClient *dynamodb.DynamoDB
|
||||
|
||||
bucketName string
|
||||
keyName string
|
||||
serverSideEncryption bool
|
||||
acl string
|
||||
kmsKeyID string
|
||||
ddbTable string
|
||||
workspaceKeyPrefix string
|
||||
bucketName string
|
||||
keyName string
|
||||
serverSideEncryption bool
|
||||
customerEncryptionKey []byte
|
||||
acl string
|
||||
kmsKeyID string
|
||||
ddbTable string
|
||||
workspaceKeyPrefix string
|
||||
}
|
||||
|
||||
func (b *Backend) configure(ctx context.Context) error {
|
||||
|
@ -280,10 +298,23 @@ func (b *Backend) configure(ctx context.Context) error {
|
|||
|
||||
b.bucketName = data.Get("bucket").(string)
|
||||
b.keyName = data.Get("key").(string)
|
||||
b.serverSideEncryption = data.Get("encrypt").(bool)
|
||||
b.acl = data.Get("acl").(string)
|
||||
b.kmsKeyID = data.Get("kms_key_id").(string)
|
||||
b.workspaceKeyPrefix = data.Get("workspace_key_prefix").(string)
|
||||
b.serverSideEncryption = data.Get("encrypt").(bool)
|
||||
b.kmsKeyID = data.Get("kms_key_id").(string)
|
||||
|
||||
customerKeyString := data.Get("sse_customer_key").(string)
|
||||
if customerKeyString != "" {
|
||||
if b.kmsKeyID != "" {
|
||||
return errors.New(encryptionKeyConflictError)
|
||||
}
|
||||
|
||||
var err error
|
||||
b.customerEncryptionKey, err = base64.StdEncoding.DecodeString(customerKeyString)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to decode sse_customer_key: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
b.ddbTable = data.Get("dynamodb_table").(string)
|
||||
if b.ddbTable == "" {
|
||||
|
@ -330,3 +361,9 @@ func (b *Backend) configure(ctx context.Context) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
const encryptionKeyConflictError = `Cannot have both kms_key_id and sse_customer_key set.
|
||||
|
||||
The kms_key_id is used for encryption with KMS-Managed Keys (SSE-KMS)
|
||||
while sse_customer_key is used for encryption with customer-managed keys (SSE-C).
|
||||
Please choose one or the other.`
|
||||
|
|
|
@ -108,14 +108,15 @@ func (b *Backend) remoteClient(name string) (*RemoteClient, error) {
|
|||
}
|
||||
|
||||
client := &RemoteClient{
|
||||
s3Client: b.s3Client,
|
||||
dynClient: b.dynClient,
|
||||
bucketName: b.bucketName,
|
||||
path: b.path(name),
|
||||
serverSideEncryption: b.serverSideEncryption,
|
||||
acl: b.acl,
|
||||
kmsKeyID: b.kmsKeyID,
|
||||
ddbTable: b.ddbTable,
|
||||
s3Client: b.s3Client,
|
||||
dynClient: b.dynClient,
|
||||
bucketName: b.bucketName,
|
||||
path: b.path(name),
|
||||
serverSideEncryption: b.serverSideEncryption,
|
||||
customerEncryptionKey: b.customerEncryptionKey,
|
||||
acl: b.acl,
|
||||
kmsKeyID: b.kmsKeyID,
|
||||
ddbTable: b.ddbTable,
|
||||
}
|
||||
|
||||
return client, nil
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/service/dynamodb"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/config/hcl2shim"
|
||||
"github.com/hashicorp/terraform/configs/hcl2shim"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states"
|
||||
)
|
||||
|
@ -82,6 +82,58 @@ func TestBackendConfig_invalidKey(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestBackendConfig_invalidSSECustomerKeyLength(t *testing.T) {
|
||||
testACC(t)
|
||||
cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{
|
||||
"region": "us-west-1",
|
||||
"bucket": "tf-test",
|
||||
"encrypt": true,
|
||||
"key": "state",
|
||||
"dynamodb_table": "dynamoTable",
|
||||
"sse_customer_key": "key",
|
||||
})
|
||||
|
||||
_, diags := New().PrepareConfig(cfg)
|
||||
if !diags.HasErrors() {
|
||||
t.Fatal("expected error for invalid sse_customer_key length")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendConfig_invalidSSECustomerKeyEncoding(t *testing.T) {
|
||||
testACC(t)
|
||||
cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{
|
||||
"region": "us-west-1",
|
||||
"bucket": "tf-test",
|
||||
"encrypt": true,
|
||||
"key": "state",
|
||||
"dynamodb_table": "dynamoTable",
|
||||
"sse_customer_key": "====CT70aTYB2JGff7AjQtwbiLkwH4npICay1PWtmdka",
|
||||
})
|
||||
|
||||
diags := New().Configure(cfg)
|
||||
if !diags.HasErrors() {
|
||||
t.Fatal("expected error for failing to decode sse_customer_key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendConfig_conflictingEncryptionSchema(t *testing.T) {
|
||||
testACC(t)
|
||||
cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{
|
||||
"region": "us-west-1",
|
||||
"bucket": "tf-test",
|
||||
"key": "state",
|
||||
"encrypt": true,
|
||||
"dynamodb_table": "dynamoTable",
|
||||
"sse_customer_key": "1hwbcNPGWL+AwDiyGmRidTWAEVmCWMKbEHA+Es8w75o=",
|
||||
"kms_key_id": "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab",
|
||||
})
|
||||
|
||||
diags := New().Configure(cfg)
|
||||
if !diags.HasErrors() {
|
||||
t.Fatal("expected error for simultaneous usage of kms_key_id and sse_customer_key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackend(t *testing.T) {
|
||||
testACC(t)
|
||||
|
||||
|
@ -129,6 +181,23 @@ func TestBackendLocked(t *testing.T) {
|
|||
backend.TestBackendStateForceUnlock(t, b1, b2)
|
||||
}
|
||||
|
||||
func TestBackendSSECustomerKey(t *testing.T) {
|
||||
testACC(t)
|
||||
bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix())
|
||||
|
||||
b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"bucket": bucketName,
|
||||
"encrypt": true,
|
||||
"key": "test-SSE-C",
|
||||
"sse_customer_key": "4Dm1n4rphuFgawxuzY/bEfvLf6rYK0gIjfaDSLlfXNk=",
|
||||
})).(*Backend)
|
||||
|
||||
createS3Bucket(t, b.s3Client, bucketName)
|
||||
defer deleteS3Bucket(t, b.s3Client, bucketName)
|
||||
|
||||
backend.TestBackendStates(t, b)
|
||||
}
|
||||
|
||||
// add some extra junk in S3 to try and confuse the env listing.
|
||||
func TestBackendExtraPaths(t *testing.T) {
|
||||
testACC(t)
|
||||
|
|
|
@ -3,6 +3,7 @@ package s3
|
|||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
@ -23,19 +24,21 @@ import (
|
|||
|
||||
// Store the last saved serial in dynamo with this suffix for consistency checks.
|
||||
const (
|
||||
s3EncryptionAlgorithm = "AES256"
|
||||
stateIDSuffix = "-md5"
|
||||
s3ErrCodeInternalError = "InternalError"
|
||||
)
|
||||
|
||||
type RemoteClient struct {
|
||||
s3Client *s3.S3
|
||||
dynClient *dynamodb.DynamoDB
|
||||
bucketName string
|
||||
path string
|
||||
serverSideEncryption bool
|
||||
acl string
|
||||
kmsKeyID string
|
||||
ddbTable string
|
||||
s3Client *s3.S3
|
||||
dynClient *dynamodb.DynamoDB
|
||||
bucketName string
|
||||
path string
|
||||
serverSideEncryption bool
|
||||
customerEncryptionKey []byte
|
||||
acl string
|
||||
kmsKeyID string
|
||||
ddbTable string
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -98,10 +101,18 @@ func (c *RemoteClient) get() (*remote.Payload, error) {
|
|||
var output *s3.GetObjectOutput
|
||||
var err error
|
||||
|
||||
output, err = c.s3Client.GetObject(&s3.GetObjectInput{
|
||||
input := &s3.GetObjectInput{
|
||||
Bucket: &c.bucketName,
|
||||
Key: &c.path,
|
||||
})
|
||||
}
|
||||
|
||||
if c.serverSideEncryption && c.customerEncryptionKey != nil {
|
||||
input.SetSSECustomerKey(string(c.customerEncryptionKey))
|
||||
input.SetSSECustomerAlgorithm(s3EncryptionAlgorithm)
|
||||
input.SetSSECustomerKeyMD5(c.getSSECustomerKeyMD5())
|
||||
}
|
||||
|
||||
output, err = c.s3Client.GetObject(input)
|
||||
|
||||
if err != nil {
|
||||
if awserr, ok := err.(awserr.Error); ok {
|
||||
|
@ -152,8 +163,12 @@ func (c *RemoteClient) Put(data []byte) error {
|
|||
if c.kmsKeyID != "" {
|
||||
i.SSEKMSKeyId = &c.kmsKeyID
|
||||
i.ServerSideEncryption = aws.String("aws:kms")
|
||||
} else if c.customerEncryptionKey != nil {
|
||||
i.SetSSECustomerKey(string(c.customerEncryptionKey))
|
||||
i.SetSSECustomerAlgorithm(s3EncryptionAlgorithm)
|
||||
i.SetSSECustomerKeyMD5(c.getSSECustomerKeyMD5())
|
||||
} else {
|
||||
i.ServerSideEncryption = aws.String("AES256")
|
||||
i.ServerSideEncryption = aws.String(s3EncryptionAlgorithm)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -270,7 +285,7 @@ func (c *RemoteClient) getMD5() ([]byte, error) {
|
|||
return sum, nil
|
||||
}
|
||||
|
||||
// store the hash of the state to that clients can check for stale state files.
|
||||
// store the hash of the state so that clients can check for stale state files.
|
||||
func (c *RemoteClient) putMD5(sum []byte) error {
|
||||
if c.ddbTable == "" {
|
||||
return nil
|
||||
|
@ -383,6 +398,11 @@ func (c *RemoteClient) lockPath() string {
|
|||
return fmt.Sprintf("%s/%s", c.bucketName, c.path)
|
||||
}
|
||||
|
||||
func (c *RemoteClient) getSSECustomerKeyMD5() string {
|
||||
b := md5.Sum(c.customerEncryptionKey)
|
||||
return base64.StdEncoding.EncodeToString(b[:])
|
||||
}
|
||||
|
||||
const errBadChecksumFmt = `state data in S3 does not have the expected content.
|
||||
|
||||
This may be caused by unusually long delays in S3 processing a previous state
|
||||
|
|
|
@ -23,7 +23,7 @@ func New() backend.Backend {
|
|||
"auth_url": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_URL", nil),
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_URL", ""),
|
||||
Description: descriptions["auth_url"],
|
||||
},
|
||||
|
||||
|
@ -41,6 +41,27 @@ func New() backend.Backend {
|
|||
Description: descriptions["user_name"],
|
||||
},
|
||||
|
||||
"application_credential_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_APPLICATION_CREDENTIAL_ID", ""),
|
||||
Description: descriptions["application_credential_id"],
|
||||
},
|
||||
|
||||
"application_credential_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_APPLICATION_CREDENTIAL_NAME", ""),
|
||||
Description: descriptions["application_credential_name"],
|
||||
},
|
||||
|
||||
"application_credential_secret": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_APPLICATION_CREDENTIAL_SECRET", ""),
|
||||
Description: descriptions["application_credential_secret"],
|
||||
},
|
||||
|
||||
"tenant_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
|
@ -70,35 +91,71 @@ func New() backend.Backend {
|
|||
},
|
||||
|
||||
"token": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_TOKEN", ""),
|
||||
Description: descriptions["token"],
|
||||
},
|
||||
|
||||
"domain_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
|
||||
"OS_USER_DOMAIN_ID",
|
||||
"OS_PROJECT_DOMAIN_ID",
|
||||
"OS_DOMAIN_ID",
|
||||
"OS_TOKEN",
|
||||
"OS_AUTH_TOKEN",
|
||||
}, ""),
|
||||
Description: descriptions["token"],
|
||||
},
|
||||
|
||||
"user_domain_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_USER_DOMAIN_NAME", ""),
|
||||
Description: descriptions["user_domain_name"],
|
||||
},
|
||||
|
||||
"user_domain_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_USER_DOMAIN_ID", ""),
|
||||
Description: descriptions["user_domain_id"],
|
||||
},
|
||||
|
||||
"project_domain_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_PROJECT_DOMAIN_NAME", ""),
|
||||
Description: descriptions["project_domain_name"],
|
||||
},
|
||||
|
||||
"project_domain_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_PROJECT_DOMAIN_ID", ""),
|
||||
Description: descriptions["project_domain_id"],
|
||||
},
|
||||
|
||||
"domain_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_DOMAIN_ID", ""),
|
||||
Description: descriptions["domain_id"],
|
||||
},
|
||||
|
||||
"domain_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
|
||||
"OS_USER_DOMAIN_NAME",
|
||||
"OS_PROJECT_DOMAIN_NAME",
|
||||
"OS_DOMAIN_NAME",
|
||||
"OS_DEFAULT_DOMAIN",
|
||||
}, ""),
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_DOMAIN_NAME", ""),
|
||||
Description: descriptions["domain_name"],
|
||||
},
|
||||
|
||||
"default_domain": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_DEFAULT_DOMAIN", "default"),
|
||||
Description: descriptions["default_domain"],
|
||||
},
|
||||
|
||||
"cloud": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("OS_CLOUD", ""),
|
||||
Description: descriptions["cloud"],
|
||||
},
|
||||
|
||||
"region_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
|
@ -180,6 +237,13 @@ func New() backend.Backend {
|
|||
Description: "Lock state access",
|
||||
Default: true,
|
||||
},
|
||||
|
||||
"state_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: descriptions["state_name"],
|
||||
Default: "tfstate.tf",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -198,6 +262,12 @@ func init() {
|
|||
|
||||
"user_id": "User ID to login with.",
|
||||
|
||||
"application_credential_id": "Application Credential ID to login with.",
|
||||
|
||||
"application_credential_name": "Application Credential name to login with.",
|
||||
|
||||
"application_credential_secret": "Application Credential secret to login with.",
|
||||
|
||||
"tenant_id": "The ID of the Tenant (Identity v2) or Project (Identity v3)\n" +
|
||||
"to login with.",
|
||||
|
||||
|
@ -208,10 +278,22 @@ func init() {
|
|||
|
||||
"token": "Authentication token to use as an alternative to username/password.",
|
||||
|
||||
"user_domain_name": "The name of the domain where the user resides (Identity v3).",
|
||||
|
||||
"user_domain_id": "The ID of the domain where the user resides (Identity v3).",
|
||||
|
||||
"project_domain_name": "The name of the domain where the project resides (Identity v3).",
|
||||
|
||||
"project_domain_id": "The ID of the domain where the proejct resides (Identity v3).",
|
||||
|
||||
"domain_id": "The ID of the Domain to scope to (Identity v3).",
|
||||
|
||||
"domain_name": "The name of the Domain to scope to (Identity v3).",
|
||||
|
||||
"default_domain": "The name of the Domain ID to scope to if no other domain is specified. Defaults to `default` (Identity v3).",
|
||||
|
||||
"cloud": "An entry in a `clouds.yaml` file to use.",
|
||||
|
||||
"region_name": "The name of the Region to use.",
|
||||
|
||||
"insecure": "Trust self-signed certificates.",
|
||||
|
@ -233,6 +315,8 @@ func init() {
|
|||
"archive_container": "Swift container to archive state to.",
|
||||
|
||||
"expire_after": "Archive object expiry duration.",
|
||||
|
||||
"state_name": "Name of state object in container",
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -246,6 +330,7 @@ type Backend struct {
|
|||
expireSecs int
|
||||
container string
|
||||
lock bool
|
||||
stateName string
|
||||
}
|
||||
|
||||
func (b *Backend) configure(ctx context.Context) error {
|
||||
|
@ -256,19 +341,28 @@ func (b *Backend) configure(ctx context.Context) error {
|
|||
// Grab the resource data
|
||||
data := schema.FromContextBackendConfig(ctx)
|
||||
config := &tf_openstack.Config{
|
||||
CACertFile: data.Get("cacert_file").(string),
|
||||
ClientCertFile: data.Get("cert").(string),
|
||||
ClientKeyFile: data.Get("key").(string),
|
||||
DomainID: data.Get("domain_id").(string),
|
||||
DomainName: data.Get("domain_name").(string),
|
||||
EndpointType: data.Get("endpoint_type").(string),
|
||||
IdentityEndpoint: data.Get("auth_url").(string),
|
||||
Password: data.Get("password").(string),
|
||||
Token: data.Get("token").(string),
|
||||
TenantID: data.Get("tenant_id").(string),
|
||||
TenantName: data.Get("tenant_name").(string),
|
||||
Username: data.Get("user_name").(string),
|
||||
UserID: data.Get("user_id").(string),
|
||||
CACertFile: data.Get("cacert_file").(string),
|
||||
ClientCertFile: data.Get("cert").(string),
|
||||
ClientKeyFile: data.Get("key").(string),
|
||||
Cloud: data.Get("cloud").(string),
|
||||
DefaultDomain: data.Get("default_domain").(string),
|
||||
DomainID: data.Get("domain_id").(string),
|
||||
DomainName: data.Get("domain_name").(string),
|
||||
EndpointType: data.Get("endpoint_type").(string),
|
||||
IdentityEndpoint: data.Get("auth_url").(string),
|
||||
Password: data.Get("password").(string),
|
||||
ProjectDomainID: data.Get("project_domain_id").(string),
|
||||
ProjectDomainName: data.Get("project_domain_name").(string),
|
||||
Token: data.Get("token").(string),
|
||||
TenantID: data.Get("tenant_id").(string),
|
||||
TenantName: data.Get("tenant_name").(string),
|
||||
UserDomainID: data.Get("user_domain_id").(string),
|
||||
UserDomainName: data.Get("user_domain_name").(string),
|
||||
Username: data.Get("user_name").(string),
|
||||
UserID: data.Get("user_id").(string),
|
||||
ApplicationCredentialID: data.Get("application_credential_id").(string),
|
||||
ApplicationCredentialName: data.Get("application_credential_name").(string),
|
||||
ApplicationCredentialSecret: data.Get("application_credential_secret").(string),
|
||||
}
|
||||
|
||||
if v, ok := data.GetOkExists("insecure"); ok {
|
||||
|
@ -280,6 +374,9 @@ func (b *Backend) configure(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Assign state name
|
||||
b.stateName = data.Get("state_name").(string)
|
||||
|
||||
// Assign Container
|
||||
b.container = data.Get("container").(string)
|
||||
if b.container == "" {
|
||||
|
|
|
@ -189,9 +189,9 @@ func (b *Backend) StateMgr(name string) (state.State, error) {
|
|||
|
||||
func (b *Backend) objectName(name string) string {
|
||||
if name != backend.DefaultStateName {
|
||||
name = fmt.Sprintf("%s%s/%s", objectEnvPrefix, name, TFSTATE_NAME)
|
||||
name = fmt.Sprintf("%s%s/%s", objectEnvPrefix, name, b.stateName)
|
||||
} else {
|
||||
name = TFSTATE_NAME
|
||||
name = b.stateName
|
||||
}
|
||||
|
||||
return name
|
||||
|
|
|
@ -19,8 +19,6 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
TFSTATE_NAME = "tfstate.tf"
|
||||
|
||||
consistencyTimeout = 15
|
||||
|
||||
// Suffix that will be appended to state file paths
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
package remotestate
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
)
|
||||
|
||||
func TestClient(t *testing.T, raw backend.Backend) {
|
||||
b, ok := raw.(*Backend)
|
||||
if !ok {
|
||||
t.Fatalf("not Backend: %T", raw)
|
||||
}
|
||||
|
||||
remote.TestClient(t, b.client)
|
||||
}
|
|
@ -657,11 +657,7 @@ func (b *Remote) Operation(ctx context.Context, op *backend.Operation) (*backend
|
|||
)
|
||||
default:
|
||||
return nil, fmt.Errorf(
|
||||
"%s\n\n"+
|
||||
"The configured \"remote\" backend encountered an unexpected error. Sometimes\n"+
|
||||
"this is caused by network connection problems, in which case you could retr\n"+
|
||||
"the command. If the issue persists please open a support ticket to get help\n"+
|
||||
"resolving the problem.",
|
||||
"The configured \"remote\" backend encountered an unexpected error:\n\n%s",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
|
|
@ -242,7 +242,7 @@ func (b *Remote) opApply(stopCtx, cancelCtx context.Context, op *backend.Operati
|
|||
|
||||
const applyDefaultHeader = `
|
||||
[reset][yellow]Running apply in the remote backend. Output will stream here. Pressing Ctrl-C
|
||||
will cancel the remote apply if its still pending. If the apply started it
|
||||
will cancel the remote apply if it's still pending. If the apply started it
|
||||
will stop streaming the logs, but will not stop the apply running remotely.[reset]
|
||||
|
||||
Preparing the remote apply...
|
||||
|
|
|
@ -36,7 +36,7 @@ func TestRemote_applyBasic(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{
|
||||
|
@ -80,7 +80,7 @@ func TestRemote_applyCanceled(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
@ -116,7 +116,7 @@ func TestRemote_applyWithoutPermissions(t *testing.T) {
|
|||
}
|
||||
w.Permissions.CanQueueApply = false
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
op.UIOut = b.CLI
|
||||
|
@ -155,7 +155,7 @@ func TestRemote_applyWithVCS(t *testing.T) {
|
|||
t.Fatalf("error creating named workspace: %v", err)
|
||||
}
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = "prod"
|
||||
|
@ -183,7 +183,7 @@ func TestRemote_applyWithParallelism(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
op.Parallelism = 3
|
||||
|
@ -209,7 +209,7 @@ func TestRemote_applyWithPlan(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
op.PlanFile = &planfile.Reader{}
|
||||
|
@ -238,7 +238,7 @@ func TestRemote_applyWithoutRefresh(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
op.PlanRefresh = false
|
||||
|
@ -264,7 +264,7 @@ func TestRemote_applyWithTarget(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
addr, _ := addrs.ParseAbsResourceStr("null_resource.foo")
|
||||
|
@ -295,7 +295,7 @@ func TestRemote_applyWithVariables(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply-variables")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply-variables")
|
||||
defer configCleanup()
|
||||
|
||||
op.Variables = testVariables(terraform.ValueFromNamedFile, "foo", "bar")
|
||||
|
@ -321,7 +321,7 @@ func TestRemote_applyNoConfig(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/empty")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/empty")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
@ -349,7 +349,7 @@ func TestRemote_applyNoChanges(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply-no-changes")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply-no-changes")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
@ -380,7 +380,7 @@ func TestRemote_applyNoApprove(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{
|
||||
|
@ -418,7 +418,7 @@ func TestRemote_applyAutoApprove(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{
|
||||
|
@ -463,7 +463,7 @@ func TestRemote_applyApprovedExternally(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{
|
||||
|
@ -538,7 +538,7 @@ func TestRemote_applyDiscardedExternally(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{
|
||||
|
@ -626,7 +626,7 @@ func TestRemote_applyWithAutoApply(t *testing.T) {
|
|||
t.Fatalf("error creating named workspace: %v", err)
|
||||
}
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{
|
||||
|
@ -677,7 +677,7 @@ func TestRemote_applyForceLocal(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{
|
||||
|
@ -735,7 +735,7 @@ func TestRemote_applyWorkspaceWithoutOperations(t *testing.T) {
|
|||
t.Fatalf("error creating named workspace: %v", err)
|
||||
}
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{
|
||||
|
@ -802,7 +802,7 @@ func TestRemote_applyLockTimeout(t *testing.T) {
|
|||
t.Fatalf("error creating pending run: %v", err)
|
||||
}
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{
|
||||
|
@ -853,7 +853,7 @@ func TestRemote_applyDestroy(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply-destroy")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply-destroy")
|
||||
defer configCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{
|
||||
|
@ -902,7 +902,7 @@ func TestRemote_applyDestroyNoConfig(t *testing.T) {
|
|||
"approve": "yes",
|
||||
})
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/empty")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/empty")
|
||||
defer configCleanup()
|
||||
|
||||
op.Destroy = true
|
||||
|
@ -932,7 +932,7 @@ func TestRemote_applyPolicyPass(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply-policy-passed")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply-policy-passed")
|
||||
defer configCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{
|
||||
|
@ -979,7 +979,7 @@ func TestRemote_applyPolicyHardFail(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply-policy-hard-failed")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply-policy-hard-failed")
|
||||
defer configCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{
|
||||
|
@ -1031,7 +1031,7 @@ func TestRemote_applyPolicySoftFail(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply-policy-soft-failed")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply-policy-soft-failed")
|
||||
defer configCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{
|
||||
|
@ -1079,7 +1079,7 @@ func TestRemote_applyPolicySoftFailAutoApprove(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply-policy-soft-failed")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply-policy-soft-failed")
|
||||
defer configCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{
|
||||
|
@ -1145,7 +1145,7 @@ func TestRemote_applyPolicySoftFailAutoApply(t *testing.T) {
|
|||
t.Fatalf("error creating named workspace: %v", err)
|
||||
}
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply-policy-soft-failed")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply-policy-soft-failed")
|
||||
defer configCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{
|
||||
|
@ -1193,7 +1193,7 @@ func TestRemote_applyWithRemoteError(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply-with-error")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply-with-error")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
|
|
@ -227,57 +227,6 @@ func (b *Remote) parseVariableValues(op *backend.Operation) (terraform.InputValu
|
|||
return result, diags
|
||||
}
|
||||
|
||||
func (b *Remote) costEstimation(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error {
|
||||
if r.CostEstimation == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output("\n------------------------------------------------------------------------\n")
|
||||
}
|
||||
|
||||
logs, err := b.client.CostEstimations.Logs(stopCtx, r.CostEstimation.ID)
|
||||
if err != nil {
|
||||
return generalError("Failed to retrieve cost estimation logs", err)
|
||||
}
|
||||
scanner := bufio.NewScanner(logs)
|
||||
|
||||
// Retrieve the cost estimation to get its current status.
|
||||
ce, err := b.client.CostEstimations.Read(stopCtx, r.CostEstimation.ID)
|
||||
if err != nil {
|
||||
return generalError("Failed to retrieve cost estimation", err)
|
||||
}
|
||||
|
||||
msgPrefix := "Cost estimation"
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n"))
|
||||
}
|
||||
|
||||
for scanner.Scan() {
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output(b.Colorize().Color(scanner.Text()))
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return generalError("Failed to read logs", err)
|
||||
}
|
||||
|
||||
switch ce.Status {
|
||||
case tfe.CostEstimationFinished:
|
||||
if len(r.PolicyChecks) == 0 && r.HasChanges && op.Type == backend.OperationTypeApply && b.CLI != nil {
|
||||
b.CLI.Output("\n------------------------------------------------------------------------")
|
||||
}
|
||||
return nil
|
||||
case tfe.CostEstimationErrored:
|
||||
return fmt.Errorf(msgPrefix + " errored.")
|
||||
case tfe.CostEstimationCanceled:
|
||||
return fmt.Errorf(msgPrefix + " canceled.")
|
||||
default:
|
||||
return fmt.Errorf("Unknown or unexpected cost estimation state: %s", ce.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Remote) checkPolicy(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error {
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output("\n------------------------------------------------------------------------\n")
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
type mockClient struct {
|
||||
Applies *mockApplies
|
||||
ConfigurationVersions *mockConfigurationVersions
|
||||
CostEstimations *mockCostEstimations
|
||||
Organizations *mockOrganizations
|
||||
Plans *mockPlans
|
||||
PolicyChecks *mockPolicyChecks
|
||||
|
@ -34,7 +33,6 @@ func newMockClient() *mockClient {
|
|||
c := &mockClient{}
|
||||
c.Applies = newMockApplies(c)
|
||||
c.ConfigurationVersions = newMockConfigurationVersions(c)
|
||||
c.CostEstimations = newMockCostEstimations(c)
|
||||
c.Organizations = newMockOrganizations(c)
|
||||
c.Plans = newMockPlans(c)
|
||||
c.PolicyChecks = newMockPolicyChecks(c)
|
||||
|
@ -214,84 +212,6 @@ func (m *mockConfigurationVersions) Upload(ctx context.Context, url, path string
|
|||
return nil
|
||||
}
|
||||
|
||||
type mockCostEstimations struct {
|
||||
client *mockClient
|
||||
estimations map[string]*tfe.CostEstimation
|
||||
logs map[string]string
|
||||
}
|
||||
|
||||
func newMockCostEstimations(client *mockClient) *mockCostEstimations {
|
||||
return &mockCostEstimations{
|
||||
client: client,
|
||||
estimations: make(map[string]*tfe.CostEstimation),
|
||||
logs: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// create is a helper function to create a mock cost estimation that uses the
|
||||
// configured working directory to find the logfile.
|
||||
func (m *mockCostEstimations) create(cvID, workspaceID string) (*tfe.CostEstimation, error) {
|
||||
id := generateID("ce-")
|
||||
|
||||
ce := &tfe.CostEstimation{
|
||||
ID: id,
|
||||
Status: tfe.CostEstimationQueued,
|
||||
}
|
||||
|
||||
w, ok := m.client.Workspaces.workspaceIDs[workspaceID]
|
||||
if !ok {
|
||||
return nil, tfe.ErrResourceNotFound
|
||||
}
|
||||
|
||||
logfile := filepath.Join(
|
||||
m.client.ConfigurationVersions.uploadPaths[cvID],
|
||||
w.WorkingDirectory,
|
||||
"ce.log",
|
||||
)
|
||||
|
||||
if _, err := os.Stat(logfile); os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
m.logs[ce.ID] = logfile
|
||||
m.estimations[ce.ID] = ce
|
||||
|
||||
return ce, nil
|
||||
}
|
||||
|
||||
func (m *mockCostEstimations) Read(ctx context.Context, costEstimationID string) (*tfe.CostEstimation, error) {
|
||||
ce, ok := m.estimations[costEstimationID]
|
||||
if !ok {
|
||||
return nil, tfe.ErrResourceNotFound
|
||||
}
|
||||
return ce, nil
|
||||
}
|
||||
|
||||
func (m *mockCostEstimations) Logs(ctx context.Context, costEstimationID string) (io.Reader, error) {
|
||||
ce, ok := m.estimations[costEstimationID]
|
||||
if !ok {
|
||||
return nil, tfe.ErrResourceNotFound
|
||||
}
|
||||
|
||||
logfile, ok := m.logs[ce.ID]
|
||||
if !ok {
|
||||
return nil, tfe.ErrResourceNotFound
|
||||
}
|
||||
|
||||
if _, err := os.Stat(logfile); os.IsNotExist(err) {
|
||||
return bytes.NewBufferString("logfile does not exist"), nil
|
||||
}
|
||||
|
||||
logs, err := ioutil.ReadFile(logfile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ce.Status = tfe.CostEstimationFinished
|
||||
|
||||
return bytes.NewBuffer(logs), nil
|
||||
}
|
||||
|
||||
// mockInput is a mock implementation of terraform.UIInput.
|
||||
type mockInput struct {
|
||||
answers map[string]string
|
||||
|
@ -732,25 +652,19 @@ func (m *mockRuns) Create(ctx context.Context, options tfe.RunCreateOptions) (*t
|
|||
return nil, err
|
||||
}
|
||||
|
||||
ce, err := m.client.CostEstimations.create(options.ConfigurationVersion.ID, options.Workspace.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pc, err := m.client.PolicyChecks.create(options.ConfigurationVersion.ID, options.Workspace.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := &tfe.Run{
|
||||
ID: generateID("run-"),
|
||||
Actions: &tfe.RunActions{IsCancelable: true},
|
||||
Apply: a,
|
||||
CostEstimation: ce,
|
||||
HasChanges: false,
|
||||
Permissions: &tfe.RunPermissions{},
|
||||
Plan: p,
|
||||
Status: tfe.RunPending,
|
||||
ID: generateID("run-"),
|
||||
Actions: &tfe.RunActions{IsCancelable: true},
|
||||
Apply: a,
|
||||
HasChanges: false,
|
||||
Permissions: &tfe.RunPermissions{},
|
||||
Plan: p,
|
||||
Status: tfe.RunPending,
|
||||
}
|
||||
|
||||
if pc != nil {
|
||||
|
@ -1034,6 +948,11 @@ func (m *mockWorkspaces) Create(ctx context.Context, organization string, option
|
|||
}
|
||||
|
||||
func (m *mockWorkspaces) Read(ctx context.Context, organization, workspace string) (*tfe.Workspace, error) {
|
||||
// custom error for TestRemote_plan500 in backend_plan_test.go
|
||||
if workspace == "network-error" {
|
||||
return nil, errors.New("I'm a little teacup")
|
||||
}
|
||||
|
||||
w, ok := m.workspaceNames[workspace]
|
||||
if !ok {
|
||||
return nil, tfe.ErrResourceNotFound
|
||||
|
|
|
@ -138,13 +138,39 @@ func (b *Remote) plan(stopCtx, cancelCtx context.Context, op *backend.Operation,
|
|||
|
||||
var configDir string
|
||||
if op.ConfigDir != "" {
|
||||
// De-normalize the configuration directory path.
|
||||
configDir, err = filepath.Abs(op.ConfigDir)
|
||||
if err != nil {
|
||||
return nil, generalError(
|
||||
"Failed to get absolute path of the configuration directory: %v", err)
|
||||
}
|
||||
|
||||
// Make sure to take the working directory into account by removing
|
||||
// the working directory from the current path. This will result in
|
||||
// a path that points to the expected root of the workspace.
|
||||
configDir = filepath.Clean(strings.TrimSuffix(
|
||||
filepath.Clean(op.ConfigDir),
|
||||
filepath.Clean(configDir),
|
||||
filepath.Clean(w.WorkingDirectory),
|
||||
))
|
||||
|
||||
// If the workspace has a subdirectory as its working directory then
|
||||
// our configDir will be some parent directory of the current working
|
||||
// directory. Users are likely to find that surprising, so we'll
|
||||
// produce an explicit message about it to be transparent about what
|
||||
// we are doing and why.
|
||||
if w.WorkingDirectory != "" && filepath.Base(configDir) != w.WorkingDirectory {
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output(fmt.Sprintf(strings.TrimSpace(`
|
||||
The remote workspace is configured to work with configuration at
|
||||
%s relative to the target repository.
|
||||
|
||||
Therefore Terraform will upload the full contents of the following directory
|
||||
to capture the filesystem context the remote workspace expects:
|
||||
%s
|
||||
`), w.WorkingDirectory, configDir) + "\n")
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
// We did a check earlier to make sure we either have a config dir,
|
||||
// or the plan is run with -destroy. So this else clause will only
|
||||
|
@ -290,14 +316,6 @@ func (b *Remote) plan(stopCtx, cancelCtx context.Context, op *backend.Operation,
|
|||
return r, nil
|
||||
}
|
||||
|
||||
// Show any cost estimation output.
|
||||
if r.CostEstimation != nil {
|
||||
err = b.costEstimation(stopCtx, cancelCtx, op, r)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
}
|
||||
|
||||
// Check any configured sentinel policies.
|
||||
if len(r.PolicyChecks) > 0 {
|
||||
err = b.checkPolicy(stopCtx, cancelCtx, op, r)
|
||||
|
|
|
@ -36,7 +36,7 @@ func TestRemote_planBasic(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
@ -67,7 +67,7 @@ func TestRemote_planCanceled(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
@ -90,7 +90,7 @@ func TestRemote_planLongLine(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan-long-line")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan-long-line")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
@ -134,7 +134,7 @@ func TestRemote_planWithoutPermissions(t *testing.T) {
|
|||
}
|
||||
w.Permissions.CanQueueRun = false
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = "prod"
|
||||
|
@ -159,7 +159,7 @@ func TestRemote_planWithParallelism(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
|
||||
op.Parallelism = 3
|
||||
|
@ -185,7 +185,7 @@ func TestRemote_planWithPlan(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
|
||||
op.PlanFile = &planfile.Reader{}
|
||||
|
@ -214,10 +214,10 @@ func TestRemote_planWithPath(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
|
||||
op.PlanOutPath = "./test-fixtures/plan"
|
||||
op.PlanOutPath = "./testdata/plan"
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
|
@ -243,7 +243,7 @@ func TestRemote_planWithoutRefresh(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
|
||||
op.PlanRefresh = false
|
||||
|
@ -269,7 +269,7 @@ func TestRemote_planWithTarget(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
|
||||
addr, _ := addrs.ParseAbsResourceStr("null_resource.foo")
|
||||
|
@ -300,7 +300,7 @@ func TestRemote_planWithVariables(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan-variables")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan-variables")
|
||||
defer configCleanup()
|
||||
|
||||
op.Variables = testVariables(terraform.ValueFromCLIArg, "foo", "bar")
|
||||
|
@ -326,7 +326,7 @@ func TestRemote_planNoConfig(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/empty")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/empty")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
@ -354,7 +354,7 @@ func TestRemote_planNoChanges(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan-no-changes")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan-no-changes")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
@ -392,7 +392,7 @@ func TestRemote_planForceLocal(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
@ -423,7 +423,7 @@ func TestRemote_planWithoutOperationsEntitlement(t *testing.T) {
|
|||
b, bCleanup := testBackendNoOperations(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
@ -468,7 +468,7 @@ func TestRemote_planWorkspaceWithoutOperations(t *testing.T) {
|
|||
t.Fatalf("error creating named workspace: %v", err)
|
||||
}
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = "no-operations"
|
||||
|
@ -522,7 +522,7 @@ func TestRemote_planLockTimeout(t *testing.T) {
|
|||
t.Fatalf("error creating pending run: %v", err)
|
||||
}
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{
|
||||
|
@ -570,7 +570,7 @@ func TestRemote_planDestroy(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
|
||||
op.Destroy = true
|
||||
|
@ -594,7 +594,7 @@ func TestRemote_planDestroyNoConfig(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/empty")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/empty")
|
||||
defer configCleanup()
|
||||
|
||||
op.Destroy = true
|
||||
|
@ -622,13 +622,71 @@ func TestRemote_planWithWorkingDirectory(t *testing.T) {
|
|||
WorkingDirectory: tfe.String("terraform"),
|
||||
}
|
||||
|
||||
// Configure the workspace to use a custom working direcrtory.
|
||||
// Configure the workspace to use a custom working directory.
|
||||
_, err := b.client.Workspaces.Update(context.Background(), b.organization, b.workspace, options)
|
||||
if err != nil {
|
||||
t.Fatalf("error configuring working directory: %v", err)
|
||||
}
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan-with-working-directory/terraform")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan-with-working-directory/terraform")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
if err != nil {
|
||||
t.Fatalf("error starting operation: %v", err)
|
||||
}
|
||||
|
||||
<-run.Done()
|
||||
if run.Result != backend.OperationSuccess {
|
||||
t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String())
|
||||
}
|
||||
if run.PlanEmpty {
|
||||
t.Fatalf("expected a non-empty plan")
|
||||
}
|
||||
|
||||
output := b.CLI.(*cli.MockUi).OutputWriter.String()
|
||||
if !strings.Contains(output, "The remote workspace is configured to work with configuration") {
|
||||
t.Fatalf("expected working directory warning: %s", output)
|
||||
}
|
||||
if !strings.Contains(output, "Running plan in the remote backend") {
|
||||
t.Fatalf("expected remote backend header in output: %s", output)
|
||||
}
|
||||
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
|
||||
t.Fatalf("expected plan summery in output: %s", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemote_planWithWorkingDirectoryFromCurrentPath(t *testing.T) {
|
||||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
options := tfe.WorkspaceUpdateOptions{
|
||||
WorkingDirectory: tfe.String("terraform"),
|
||||
}
|
||||
|
||||
// Configure the workspace to use a custom working directory.
|
||||
_, err := b.client.Workspaces.Update(context.Background(), b.organization, b.workspace, options)
|
||||
if err != nil {
|
||||
t.Fatalf("error configuring working directory: %v", err)
|
||||
}
|
||||
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("error getting current working directory: %v", err)
|
||||
}
|
||||
|
||||
// We need to change into the configuration directory to make sure
|
||||
// the logic to upload the correct slug is working as expected.
|
||||
if err := os.Chdir("./testdata/plan-with-working-directory/terraform"); err != nil {
|
||||
t.Fatalf("error changing directory: %v", err)
|
||||
}
|
||||
defer os.Chdir(wd) // Make sure we change back again when were done.
|
||||
|
||||
// For this test we need to give our current directory instead of the
|
||||
// full path to the configuration as we already changed directories.
|
||||
op, configCleanup := testOperationPlan(t, ".")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
@ -655,45 +713,11 @@ func TestRemote_planWithWorkingDirectory(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestRemote_costEstimation(t *testing.T) {
|
||||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan-cost-estimation")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
if err != nil {
|
||||
t.Fatalf("error starting operation: %v", err)
|
||||
}
|
||||
|
||||
<-run.Done()
|
||||
if run.Result != backend.OperationSuccess {
|
||||
t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String())
|
||||
}
|
||||
if run.PlanEmpty {
|
||||
t.Fatalf("expected a non-empty plan")
|
||||
}
|
||||
|
||||
output := b.CLI.(*cli.MockUi).OutputWriter.String()
|
||||
if !strings.Contains(output, "Running plan in the remote backend") {
|
||||
t.Fatalf("expected remote backend header in output: %s", output)
|
||||
}
|
||||
if !strings.Contains(output, "SKU") {
|
||||
t.Fatalf("expected cost estimation result in output: %s", output)
|
||||
}
|
||||
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
|
||||
t.Fatalf("expected plan summary in output: %s", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemote_planPolicyPass(t *testing.T) {
|
||||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan-policy-passed")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan-policy-passed")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
@ -727,7 +751,7 @@ func TestRemote_planPolicyHardFail(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan-policy-hard-failed")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan-policy-hard-failed")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
@ -766,7 +790,7 @@ func TestRemote_planPolicySoftFail(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan-policy-soft-failed")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan-policy-soft-failed")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
@ -805,7 +829,7 @@ func TestRemote_planWithRemoteError(t *testing.T) {
|
|||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan-with-error")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan-with-error")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
@ -831,3 +855,23 @@ func TestRemote_planWithRemoteError(t *testing.T) {
|
|||
t.Fatalf("expected plan error in output: %s", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemote_planOtherError(t *testing.T) {
|
||||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
|
||||
op.Workspace = "network-error" // custom error response in backend_mock.go
|
||||
|
||||
_, err := b.Operation(context.Background(), op)
|
||||
if err == nil {
|
||||
t.Errorf("expected error, got success")
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(),
|
||||
"The configured \"remote\" backend encountered an unexpected error:\n\nI'm a little teacup") {
|
||||
t.Fatalf("expected error message, got: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue