Merge with master
This commit is contained in:
commit
cb041053e9
|
@ -3,10 +3,14 @@ version: 2.1
|
|||
orbs:
|
||||
slack: circleci/slack@3.4.2
|
||||
|
||||
references:
|
||||
images:
|
||||
middleman: &MIDDLEMAN_IMAGE docker.mirror.hashicorp.services/hashicorp/middleman-hashicorp:0.3.44
|
||||
|
||||
executors:
|
||||
go:
|
||||
docker:
|
||||
- image: circleci/golang:1.15
|
||||
- image: docker.mirror.hashicorp.services/circleci/golang:1.15
|
||||
environment:
|
||||
CONSUL_VERSION: 1.7.2
|
||||
GOMAXPROCS: 4
|
||||
|
@ -176,6 +180,97 @@ jobs:
|
|||
name: test docker build for 'full' image
|
||||
command: docker build -t test-docker-full .
|
||||
|
||||
# Based on a similar job in terraform-website repo.
|
||||
website-link-check:
|
||||
docker:
|
||||
- image: *MIDDLEMAN_IMAGE
|
||||
steps:
|
||||
- checkout:
|
||||
path: terraform
|
||||
|
||||
- run:
|
||||
name: Determine changed website files, if any
|
||||
working_directory: terraform
|
||||
command: |
|
||||
# Figure out what the current branch forked from. Compare against
|
||||
# master and the set of "vX.Y" branches, and choose whichever branch
|
||||
# we're the *fewest* commits ahead of.
|
||||
# The point here isn't to perfectly predict where this will be
|
||||
# merged; all we really care about is determining which commits are
|
||||
# *unique to this PR,* so we don't accidentally complain about
|
||||
# problems you had nothing to do with.
|
||||
PARENT_BRANCH=$(
|
||||
for br in $(git branch -rl --format='%(refname:short)' | grep -E '^origin/(master|v\d+\.\d+)$'); do
|
||||
new_commits=$(git rev-list --first-parent ^${br} HEAD | wc -l);
|
||||
echo "${br} ${new_commits}";
|
||||
done \
|
||||
| sort -n -k2 \
|
||||
| head -n1 \
|
||||
| awk '{print $1}';
|
||||
)
|
||||
echo "Checking current branch against: ${PARENT_BRANCH}"
|
||||
MERGE_BASE=$(git merge-base HEAD ${PARENT_BRANCH})
|
||||
git diff --name-only -z --diff-filter=AMRCT ${MERGE_BASE}..HEAD -- ./website/ > /tmp/changed-website-files.txt
|
||||
# --name-only: Return a list of affected files but don't show the changes.
|
||||
# -z: Make that a null-separated list (instead of newline-separated), and
|
||||
# DON'T mangle non-ASCII characters.
|
||||
# --diff-filter=AMRCT: Only list files that were added, modified, renamed,
|
||||
# copied, or had their type changed (file, symlink, etc.). In
|
||||
# particular, we don't want to check deleted files.
|
||||
# ${MERGE_BASE}..HEAD: Only consider files that have
|
||||
# changed since this branch diverged from its parent branch.
|
||||
# -- ./website/: Only consider files in the website directory.
|
||||
echo "Changed website files:"
|
||||
cat /tmp/changed-website-files.txt | tr '\0' '\n'
|
||||
# Need to use "tr" for display because it's a null-separated list.
|
||||
|
||||
- run:
|
||||
name: Exit early if there's nothing to check
|
||||
command: |
|
||||
if [ ! -s /tmp/changed-website-files.txt ]; then
|
||||
circleci-agent step halt
|
||||
fi
|
||||
|
||||
- run:
|
||||
name: Check out terraform-website repo
|
||||
command: git clone git@github.com:hashicorp/terraform-website.git
|
||||
|
||||
- run:
|
||||
name: Use local checkout for terraform submodule, instead of cloning again
|
||||
working_directory: terraform-website
|
||||
command: |
|
||||
# Set submodule's URL to our existing checkout.
|
||||
# (Using `pwd` because git's behavior with strictly relative paths is unreliable.)
|
||||
git config --file=.gitmodules submodule.ext/terraform.url $(pwd)/../terraform/.git
|
||||
# Make it so `make sync` will grab our current branch instead of stable-website.
|
||||
git config --file=.gitmodules submodule.ext/terraform.branch HEAD
|
||||
|
||||
- run:
|
||||
name: Init/update terraform-website submodules
|
||||
working_directory: terraform-website
|
||||
command: make sync
|
||||
|
||||
- run:
|
||||
name: Set up terraform-website dependencies
|
||||
working_directory: terraform-website/content
|
||||
# If this does anything interesting, then the container needs an update.
|
||||
command: bundle check || bundle install --path vendor/bundle --retry=3
|
||||
|
||||
- run:
|
||||
name: Run middleman in background
|
||||
working_directory: terraform-website/content
|
||||
background: true
|
||||
command: bundle exec middleman server
|
||||
|
||||
- run:
|
||||
name: Wait for server to start
|
||||
command: until curl -sS http://localhost:4567/ > /dev/null; do sleep 1; done
|
||||
|
||||
- run:
|
||||
name: Check links in changed pages
|
||||
working_directory: terraform-website/content
|
||||
command: cat /tmp/changed-website-files.txt | bundle exec ./scripts/check-pr-links.rb
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
test:
|
||||
|
@ -203,3 +298,7 @@ workflows:
|
|||
- build-386
|
||||
- build-amd64
|
||||
- build-arm
|
||||
|
||||
website-test:
|
||||
jobs:
|
||||
- website-link-check
|
||||
|
|
|
@ -64,7 +64,7 @@ The Terraform team is not merging PRs for new state storage backends at the curr
|
|||
|
||||
Please see the [CODEOWNERS](https://github.com/hashicorp/terraform/blob/master/CODEOWNERS) file for the status of a given backend. Community members with an interest in a particular standard backend are welcome to help maintain it.
|
||||
|
||||
Currently, merging state storage backends places a significant burden on the Terraform team. The team must setup an environment and cloud service provider account, or a new database/storage/key-value service, in order to build and test remote state storage backends. The time and complexity of doing so prevents us from moving Terraform forward in other ways.
|
||||
Currently, merging state storage backends places a significant burden on the Terraform team. The team must set up an environment and cloud service provider account, or a new database/storage/key-value service, in order to build and test remote state storage backends. The time and complexity of doing so prevents us from moving Terraform forward in other ways.
|
||||
|
||||
We are working to remove ourselves from the critical path of state storage backends by moving them towards a plugin model. In the meantime, we won't be accepting new remote state backends into Terraform.
|
||||
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
---
|
||||
name: Backport Assistant Runner
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- closed
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
if: github.event.pull_request.merged
|
||||
runs-on: ubuntu-latest
|
||||
container: hashicorpdev/backport-assistant:0.2.1
|
||||
steps:
|
||||
- name: Run Backport Assistant
|
||||
run: |
|
||||
backport-assistant backport
|
||||
env:
|
||||
BACKPORT_LABEL_REGEXP: "(?P<target>\\d+\\.\\d+)-backport"
|
||||
BACKPORT_TARGET_TEMPLATE: "v{{.target}}"
|
||||
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
|
1
.tfdev
1
.tfdev
|
@ -1,5 +1,4 @@
|
|||
version_info {
|
||||
commit_var = "main.GitCommit"
|
||||
version_var = "github.com/hashicorp/terraform/version.Version"
|
||||
prerelease_var = "github.com/hashicorp/terraform/version.Prerelease"
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ When a bug report is filed, our goal is to either:
|
|||
|
||||
## Process
|
||||
|
||||
### 1. [Newly created issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Anew+label%3Abug+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3A%22waiting+for+reproduction%22+-label%3A%22waiting-response%22+-label%3Aexplained) require initial filtering.
|
||||
### 1. [Newly created issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Anew+label%3Abug+-label%3Abackend%2Fk8s+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3A%22waiting+for+reproduction%22+-label%3A%22waiting-response%22+-label%3Aexplained+) require initial filtering.
|
||||
|
||||
These are raw reports that need categorization and support clarifying them. They need the following done:
|
||||
|
||||
|
@ -20,7 +20,7 @@ If an issue requires discussion with the user to get it out of this initial stat
|
|||
|
||||
Once this initial filtering has been done, remove the new label. If an issue subjectively looks very high-impact and likely to impact many users, assign it to the [appropriate milestone](https://github.com/hashicorp/terraform/milestones) to mark it as being urgent.
|
||||
|
||||
### 2. Clarify [unreproduced issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+created%3A%3E2020-05-01+-label%3Aprovisioner%2Fsalt-masterless+-label%3Adocumentation+-label%3Aprovider%2Fazuredevops+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3Anew+-label%3A%22waiting+for+reproduction%22+-label%3Awaiting-response+-label%3Aexplained+sort%3Acreated-asc)
|
||||
### 2. Clarify [unreproduced issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+created%3A%3E2020-05-01+-label%3Abackend%2Fk8s+-label%3Aprovisioner%2Fsalt-masterless+-label%3Adocumentation+-label%3Aprovider%2Fazuredevops+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3Anew+-label%3A%22waiting+for+reproduction%22+-label%3Awaiting-response+-label%3Aexplained+sort%3Acreated-asc+)
|
||||
|
||||
A core team member initially determines whether the issue is immediately reproducible. If they cannot readily reproduce it, they label it "waiting for reproduction" and correspond with the reporter to describe what is needed. When the issue is reproduced by a core team member, they label it "confirmed".
|
||||
|
||||
|
@ -29,15 +29,15 @@ A core team member initially determines whether the issue is immediately reprodu
|
|||
Note that the link above excludes issues reported before May 2020; this is to avoid including issues that were reported prior to this new process being implemented. [Unreproduced issues reported before May 2020](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+created%3A%3C2020-05-01+-label%3Aprovisioner%2Fsalt-masterless+-label%3Adocumentation+-label%3Aprovider%2Fazuredevops+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3Anew+-label%3A%22waiting+for+reproduction%22+-label%3Awaiting-response+-label%3Aexplained+sort%3Areactions-%2B1-desc) will be triaged as capacity permits.
|
||||
|
||||
|
||||
### 3. Explain or fix [confirmed issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Aexplained+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+)
|
||||
### 3. Explain or fix [confirmed issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Aexplained+-label%3Abackend%2Foss+-label%3Abackend%2Fk8s+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+)
|
||||
The next step for confirmed issues is to either:
|
||||
|
||||
* explain why the behavior is expected, label the issue as "working as designed", and close it, or
|
||||
* locate the cause of the defect in the codebase. When the defect is located, and that description is posted on the issue, the issue is labeled "explained". In many cases, this step will get skipped if the fix is obvious, and engineers will jump forward and make a PR.
|
||||
|
||||
[Confirmed crashes](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Acrash+label%3Abug+-label%3Aexplained+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+) should generally be considered high impact
|
||||
[Confirmed crashes](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Acrash+label%3Abug+-label%3Abackend%2Fk8s+-label%3Aexplained+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+) should generally be considered high impact
|
||||
|
||||
### 4. The last step for [explained issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Aexplained+no%3Amilestone+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+) is to make a PR to fix them.
|
||||
### 4. The last step for [explained issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Aexplained+no%3Amilestone+-label%3Abackend%2Fk8s+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+) is to make a PR to fix them.
|
||||
|
||||
Explained issues that are expected to be fixed in a future release should be assigned to a milestone
|
||||
|
||||
|
@ -54,23 +54,23 @@ working as designed | confirmed as reported and closed because the behavior
|
|||
pending project | issue is confirmed but will require a significant project to fix
|
||||
|
||||
## Lack of response and unreproducible issues
|
||||
When bugs that have been [labeled waiting response](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3A%22waiting+for+reproduction%22+label%3Awaiting-response+-label%3Aexplained+sort%3Aupdated-asc) or [labeled "waiting for reproduction"](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+label%3A%22waiting+for+reproduction%22+-label%3Aexplained+sort%3Aupdated-asc+) for more than 30 days, we'll use our best judgement to determine whether it's more helpful to close it or prompt the reporter again. If they again go without a response for 30 days, they can be closed with a polite message explaining why and inviting the person to submit the needed information or reproduction case in the future.
|
||||
When bugs that have been [labeled waiting response](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Abackend%2Foss+-label%3Abackend%2Fk8s+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3A%22waiting+for+reproduction%22+label%3Awaiting-response+-label%3Aexplained+sort%3Aupdated-asc+) or [labeled "waiting for reproduction"](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+label%3A%22waiting+for+reproduction%22+-label%3Aexplained+sort%3Aupdated-asc+) for more than 30 days, we'll use our best judgement to determine whether it's more helpful to close it or prompt the reporter again. If they again go without a response for 30 days, they can be closed with a polite message explaining why and inviting the person to submit the needed information or reproduction case in the future.
|
||||
|
||||
The intent of this process is to get fix the maximum number of bugs in Terraform as quickly as possible, and having un-actionable bug reports makes it harder for Terraform Core team members and community contributors to find bugs they can actually work on.
|
||||
|
||||
## Helpful GitHub Filters
|
||||
|
||||
### Triage Process
|
||||
1. [Newly created issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Anew+label%3Abug+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3A%22waiting+for+reproduction%22+-label%3A%22waiting-response%22+-label%3Aexplained) require initial filtering.
|
||||
2. Clarify [unreproduced issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+created%3A%3E2020-05-01+-label%3Aprovisioner%2Fsalt-masterless+-label%3Adocumentation+-label%3Aprovider%2Fazuredevops+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3Anew+-label%3A%22waiting+for+reproduction%22+-label%3Awaiting-response+-label%3Aexplained+sort%3Acreated-asc)
|
||||
3. Explain or fix [confirmed issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Aexplained+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+). Prioritize [confirmed crashes](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Acrash+label%3Abug+-label%3Aexplained+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+).
|
||||
4. Fix [explained issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Aexplained+no%3Amilestone+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+)
|
||||
1. [Newly created issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Anew+label%3Abug+-label%3Abackend%2Foss+-label%3Abackend%2Fk8s+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3A%22waiting+for+reproduction%22+-label%3A%22waiting-response%22+-label%3Aexplained+) require initial filtering.
|
||||
2. Clarify [unreproduced issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+created%3A%3E2020-05-01+-label%3Abackend%2Fk8s+-label%3Aprovisioner%2Fsalt-masterless+-label%3Adocumentation+-label%3Aprovider%2Fazuredevops+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3Anew+-label%3A%22waiting+for+reproduction%22+-label%3Awaiting-response+-label%3Aexplained+sort%3Acreated-asc+)
|
||||
3. Explain or fix [confirmed issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Aexplained+-label%3Abackend%2Fk8s+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+). Prioritize [confirmed crashes](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Acrash+label%3Abug+-label%3Aexplained+-label%3Abackend%2Fk8s+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+).
|
||||
4. Fix [explained issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Aexplained+no%3Amilestone+-label%3Abackend%2Fk8s+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+)
|
||||
|
||||
### Other Backlog
|
||||
|
||||
[Confirmed needs for documentation fixes](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Adocumentation++label%3Aconfirmed+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+)
|
||||
[Confirmed needs for documentation fixes](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Adocumentation++label%3Aconfirmed+-label%3Abackend%2Fk8s+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+)
|
||||
|
||||
[Confirmed bugs that will require significant projects to fix](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Aconfirmed+label%3A%22pending+project%22++-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2)
|
||||
[Confirmed bugs that will require significant projects to fix](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Aconfirmed+label%3A%22pending+project%22+-label%3Abackend%2Fk8s+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+)
|
||||
|
||||
### Milestone Use
|
||||
|
||||
|
|
49
CHANGELOG.md
49
CHANGELOG.md
|
@ -1,17 +1,52 @@
|
|||
## 0.15.0 (Unreleased)
|
||||
|
||||
BREAKING CHANGES:
|
||||
|
||||
* Empty provider configuration blocks should be removed from modules. If a configuration alias is required within the module, it can be defined using the `configuration_aliases` argument within `required_providers`. Existing module configurations which were accepted but could produce incorrect or undefined behavior may now return errors when loading the configuration. [GH-27739]
|
||||
* The `list` and `map` functions, both of which were deprecated since Terraform v0.12, are now removed. You can replace uses of these functions with `tolist([...])` and `tomap({...})` respectively. ([#26818](https://github.com/hashicorp/terraform/issues/26818))
|
||||
* Terraform now requires UTF-8 character encoding and virtual terminal support when running on Windows. This unifies Terraform's terminal handling on Windows with that of other platforms, as per [Microsoft recommendations](https://docs.microsoft.com/en-us/windows/console/classic-vs-vt). Terraform previously required these terminal features on all other platforms, and now requires them on Windows too.
|
||||
|
||||
UTF-8 and virtual terminal support were introduced across various Windows 10 updates, and so Terraform is no longer officially supported on the original release of Windows 10 or on Windows 8 and earlier. However, there are currently no technical measures to artificially _prevent_ Terraform from running on these obsolete Windows releases, and so you _may_ still be able to use Terraform v0.15 on older Windows versions if you either disable formatting (using the `-no-color`) option, or if you use a third-party terminal emulator package such as [ConEmu](https://conemu.github.io/), [Cmder](https://cmder.net/), or [mintty](https://mintty.github.io/).
|
||||
|
||||
We strongly encourage planning to migrate to a newer version of Windows rather than relying on these workarounds for the long term, because the Terraform team will test future releases only on up-to-date Windows 10 and can therefore not guarantee ongoing support for older versions.
|
||||
|
||||
* Interrupting execution will now cause terraform to exit with a non-zero exit status. ([#26738](https://github.com/hashicorp/terraform/issues/26738))
|
||||
* The trailing `[DIR]` argument to specify the working directory for various commands is no longer supported. Use the global `-chdir` option instead. ([#27664](https://github.com/hashicorp/terraform/pull/27664))
|
||||
|
||||
For example, instead of `terraform init infra`, write `terraform -chdir=infra init`.
|
||||
* The `-lock` and `-lock-timeout` options are no longer available on `terraform init` ([#27464](https://github.com/hashicorp/terraform/issues/27464))
|
||||
* The `-verify-plugins=false` option is no longer available on `terraform init`. (Terraform now _always_ verifies plugins.) ([#27461](https://github.com/hashicorp/terraform/issues/27461))
|
||||
* The `-get-plugins=false` option is no longer available on `terraform init`. (Terraform now _always_ installs plugins.) ([#27463](https://github.com/hashicorp/terraform/issues/27463))
|
||||
* The `-force` option is no longer available on `terraform destroy`. Use `-auto-approve` instead ([#27681](https://github.com/hashicorp/terraform/pull/27681))
|
||||
* `terraform version -json` output no longer includes the (previously-unpopulated) "revision" property [[#27484](https://github.com/hashicorp/terraform/issues/27484)]
|
||||
* The `atlas` backend, which was deprecated since Terraform v0.12, is now removed. ([#26651](https://github.com/hashicorp/terraform/issues/26651))
|
||||
* In the `gcs` backend the `path` config argument, which was deprecated since Terraform v0.11, is now removed. Use the `prefix` argument instead. ([#26841](https://github.com/hashicorp/terraform/issues/26841))
|
||||
|
||||
ENHANCEMENTS:
|
||||
|
||||
* cli: Improved support for Windows console UI on Windows 10, including bold colors and underline for HCL diagnostics. [GH-26588]
|
||||
* cli: Small reorganization and tidier formatting for the main help text printed by `terraform` with no subcommands. [GH-26695]
|
||||
* cli: Removed the `terraform debug` container command, which has not had any subcommands under it for a long time. [GH-26695]
|
||||
* config: A `required_providers` entry can now contain `configuration_aliases` to declare additional configuration aliases names without requirring a configuration block [GH-27739]
|
||||
* config: Terraform will now emit a warning if you declare a `backend` block in a non-root module. Terraform has always ignored such declarations, but previously did so silently. This is a warning rather than an error only because it is sometimes convenient to temporarily use a root module as if it were a child module in order to test or debug its behavior separately from its main backend. ([#26954](https://github.com/hashicorp/terraform/issues/26954))
|
||||
* cli: The family of error messages with the summary "Invalid for_each argument" will now include some additional context about which external values contributed to the result. ([#26747](https://github.com/hashicorp/terraform/issues/26747))
|
||||
* cli: Terraform now uses UTF-8 and full VT mode even when running on Windows. Previously Terraform was using the "classic" Windows console API, which was far more limited in what formatting sequences it supported and which characters it could render. ([#27487](https://github.com/hashicorp/terraform/issues/27487))
|
||||
* cli: Improved support for Windows console UI on Windows 10, including bold colors and underline for HCL diagnostics. ([#26588](https://github.com/hashicorp/terraform/issues/26588))
|
||||
* cli: Diagnostic messages now have a vertical line along their left margin, which we hope will achieve a better visual heirarchy for sighted users and thus make it easier to see where the errors and warnings start and end in relation to other content that might be printed alongside. ([#27343](https://github.com/hashicorp/terraform/issues/27343))
|
||||
* cli: Typing an invalid top-level command, like `terraform destory` instead of `destroy`, will now print out a specific error message about the command being invalid, rather than just printing out the usual help directory. ([#26967](https://github.com/hashicorp/terraform/issues/26967))
|
||||
* cli: Plugin crashes will now be reported with more detail, pointing out the plugin name and the method call along with the stack trace ([#26694](https://github.com/hashicorp/terraform/issues/26694))
|
||||
* provisioner/remote-exec: Can now run in a mode that expects the remote system to be running Windows and excuting commands using the Windows command interpreter, rather than a Unix-style shell. Specify the `target_platform` as `"windows"` in the `connection` block. ([#26865](https://github.com/hashicorp/terraform/issues/26865))
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* cli: Exit with an error if unable to gather input from the UI. For example, this may happen when running in a non-interactive environment but without `-input=false`. Previously Terraform would interpret these errors as empty strings, which could be confusing. [GH-26509]
|
||||
|
||||
BREAKING CHANGES:
|
||||
* backend/atlas: the `atlas` backend, which was deprecated in v0.12, has been removed. [GH-26651]
|
||||
* cli: Exit with an error if unable to gather input from the UI. For example, this may happen when running in a non-interactive environment but without `-input=false`. Previously Terraform would interpret these errors as empty strings, which could be confusing. ([#26509](https://github.com/hashicorp/terraform/issues/26509))
|
||||
* cli: TF_LOG levels other than `trace` will now work correctly ([#26632](https://github.com/hashicorp/terraform/issues/26632))
|
||||
* cli: Core and Provider logs can now be enabled separately for debugging, using `TF_LOG_CORE` and `TF_LOG_PROVIDER` ([#26685](https://github.com/hashicorp/terraform/issues/26685))
|
||||
* command/console: expressions using `path` (`path.root`, `path.module`) now return the same result as they would in a configuration ([#27263](https://github.com/hashicorp/terraform/issues/27263))
|
||||
* command/show: fix issue with child_modules not properly displaying in certain circumstances ([#27352](https://github.com/hashicorp/terraform/issues/27352))
|
||||
* command/state list: fix bug where nested modules' resources were missing from `state list` output ([#27268](https://github.com/hashicorp/terraform/issues/27268))
|
||||
* command/state mv: fix display names in errors and improve error when failing to target a whole resource ([#27482](https://github.com/hashicorp/terraform/issues/27482))
|
||||
* command/taint: show resource name in -allow-missing warning ([#27501](https://github.com/hashicorp/terraform/issues/27501))
|
||||
* command/untaint: show resource name in -allow-missing warning ([#27502](https://github.com/hashicorp/terraform/issues/27502))
|
||||
* core: validate will now ignore providers without configuration ([#24896](https://github.com/hashicorp/terraform/issues/24896))
|
||||
* core: refresh data sources during destroy ([#27408](https://github.com/hashicorp/terraform/issues/27408))
|
||||
|
||||
## Previous Releases
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
# the officially-released binary from releases.hashicorp.com and are
|
||||
# built by the (closed-source) official release process.
|
||||
|
||||
FROM golang:alpine
|
||||
FROM docker.mirror.hashicorp.services/golang:alpine
|
||||
LABEL maintainer="HashiCorp Terraform Team <terraform@hashicorp.com>"
|
||||
|
||||
RUN apk add --no-cache git bash openssh
|
||||
|
|
29
Makefile
29
Makefile
|
@ -6,12 +6,6 @@ VERSION?="0.3.44"
|
|||
# "make protobuf".
|
||||
generate:
|
||||
go generate ./...
|
||||
# go fmt doesn't support -mod=vendor but it still wants to populate the
|
||||
# module cache with everything in go.mod even though formatting requires
|
||||
# no dependencies, and so we're disabling modules mode for this right
|
||||
# now until the "go fmt" behavior is rationalized to either support the
|
||||
# -mod= argument or _not_ try to install things.
|
||||
GO111MODULE=off go fmt command/internal_plugin_list.go > /dev/null
|
||||
|
||||
# We separate the protobuf generation because most development tasks on
|
||||
# Terraform do not involve changing protobuf files and protoc is not a
|
||||
|
@ -48,29 +42,6 @@ endif
|
|||
--workdir /terraform-website \
|
||||
hashicorp/middleman-hashicorp:${VERSION}
|
||||
|
||||
website-test:
|
||||
ifeq (,$(wildcard $(GOPATH)/src/$(WEBSITE_REPO)))
|
||||
echo "$(WEBSITE_REPO) not found in your GOPATH (necessary for layouts and assets), get-ting..."
|
||||
git clone https://$(WEBSITE_REPO) $(GOPATH)/src/$(WEBSITE_REPO)
|
||||
endif
|
||||
$(eval WEBSITE_PATH := $(GOPATH)/src/$(WEBSITE_REPO))
|
||||
@echo "==> Testing core website in Docker..."
|
||||
-@docker stop "tf-website-core-temp"
|
||||
@docker run \
|
||||
--detach \
|
||||
--rm \
|
||||
--name "tf-website-core-temp" \
|
||||
--publish "4567:4567" \
|
||||
--volume "$(shell pwd)/website:/website" \
|
||||
--volume "$(shell pwd):/ext/terraform" \
|
||||
--volume "$(WEBSITE_PATH)/content:/terraform-website" \
|
||||
--volume "$(WEBSITE_PATH)/content/source/assets:/website/docs/assets" \
|
||||
--volume "$(WEBSITE_PATH)/content/source/layouts:/website/docs/layouts" \
|
||||
--workdir /terraform-website \
|
||||
hashicorp/middleman-hashicorp:${VERSION}
|
||||
$(WEBSITE_PATH)/content/scripts/check-links.sh "http://127.0.0.1:4567" "/" "/docs/providers/*"
|
||||
@docker stop "tf-website-core-temp"
|
||||
|
||||
# disallow any parallelism (-j) for Make. This is necessary since some
|
||||
# commands during the build process create temporary files that collide
|
||||
# under parallel conditions.
|
||||
|
|
|
@ -36,7 +36,7 @@ Show off your Terraform knowledge by passing a certification exam. Visit the [ce
|
|||
Developing Terraform
|
||||
--------------------
|
||||
|
||||
This repository contains only Terraform core, which includes the command line interface and the main graph engine. Providers are implemented as plugins that each have their own repository in [the `terraform-providers` organization](https://github.com/terraform-providers) on GitHub. Instructions for developing each provider are in the associated README file. For more information, see [the provider development overview](https://www.terraform.io/docs/plugins/provider.html).
|
||||
This repository contains only Terraform core, which includes the command line interface and the main graph engine. Providers are implemented as plugins, and Terraform can automatically download providers that are published on [the Terraform Registry](https://registry.terraform.io). HashiCorp develops some providers, and others are developed by other organizations. For more information, see [Extending Terraform](https://www.terraform.io/docs/extend/index.html).
|
||||
|
||||
To learn more about compiling Terraform and contributing suggested changes, please refer to [the contributing guide](.github/CONTRIBUTING.md).
|
||||
|
||||
|
|
|
@ -82,6 +82,7 @@ func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Tra
|
|||
var mi ModuleInstance
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
LOOP:
|
||||
for len(remain) > 0 {
|
||||
var next string
|
||||
switch tt := remain[0].(type) {
|
||||
|
@ -96,7 +97,7 @@ func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Tra
|
|||
Detail: "Module address prefix must be followed by dot and then a name.",
|
||||
Subject: remain[0].SourceRange().Ptr(),
|
||||
})
|
||||
break
|
||||
break LOOP
|
||||
}
|
||||
|
||||
if next != "module" {
|
||||
|
@ -129,7 +130,7 @@ func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Tra
|
|||
Detail: "Prefix \"module.\" must be followed by a module name.",
|
||||
Subject: remain[0].SourceRange().Ptr(),
|
||||
})
|
||||
break
|
||||
break LOOP
|
||||
}
|
||||
remain = remain[1:]
|
||||
step := ModuleInstanceStep{
|
||||
|
|
|
@ -188,12 +188,11 @@ type Operation struct {
|
|||
|
||||
// The options below are more self-explanatory and affect the runtime
|
||||
// behavior of the operation.
|
||||
AutoApprove bool
|
||||
Destroy bool
|
||||
DestroyForce bool
|
||||
Parallelism int
|
||||
Targets []addrs.Targetable
|
||||
Variables map[string]UnparsedVariableValue
|
||||
AutoApprove bool
|
||||
Destroy bool
|
||||
Parallelism int
|
||||
Targets []addrs.Targetable
|
||||
Variables map[string]UnparsedVariableValue
|
||||
|
||||
// Some operations use root module variables only opportunistically or
|
||||
// don't need them at all. If this flag is set, the backend must treat
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
package backend
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"github.com/mitchellh/cli"
|
||||
"github.com/mitchellh/colorstring"
|
||||
|
||||
"github.com/hashicorp/terraform/internal/terminal"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
// CLI is an optional interface that can be implemented to be initialized
|
||||
|
@ -48,6 +50,12 @@ type CLIOpts struct {
|
|||
CLI cli.Ui
|
||||
CLIColor *colorstring.Colorize
|
||||
|
||||
// Streams describes the low-level streams for Stdout, Stderr and Stdin,
|
||||
// including some metadata about whether they are terminals. Most output
|
||||
// should go via the object in field CLI above, but Streams can be useful
|
||||
// for tailoring the output to fit the attached terminal, for example.
|
||||
Streams *terminal.Streams
|
||||
|
||||
// ShowDiagnostics is a function that will format and print diagnostic
|
||||
// messages to the UI.
|
||||
ShowDiagnostics func(vals ...interface{})
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/command/clistate"
|
||||
"github.com/hashicorp/terraform/configs/configschema"
|
||||
"github.com/hashicorp/terraform/internal/terminal"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
|
@ -38,6 +39,10 @@ type Local struct {
|
|||
CLI cli.Ui
|
||||
CLIColor *colorstring.Colorize
|
||||
|
||||
// If CLI is set then Streams might also be set, to describe the physical
|
||||
// input/output handles that CLI is connected to.
|
||||
Streams *terminal.Streams
|
||||
|
||||
// ShowDiagnostics prints diagnostic messages to the UI.
|
||||
ShowDiagnostics func(vals ...interface{})
|
||||
|
||||
|
|
|
@ -39,15 +39,13 @@ func (b *Local) opApply(
|
|||
return
|
||||
}
|
||||
|
||||
// Setup our count hook that keeps track of resource changes
|
||||
countHook := new(CountHook)
|
||||
stateHook := new(StateHook)
|
||||
if b.ContextOpts == nil {
|
||||
b.ContextOpts = new(terraform.ContextOpts)
|
||||
}
|
||||
old := b.ContextOpts.Hooks
|
||||
defer func() { b.ContextOpts.Hooks = old }()
|
||||
b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, countHook, stateHook)
|
||||
b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, stateHook)
|
||||
|
||||
// Get our context
|
||||
tfCtx, _, opState, contextDiags := b.context(op)
|
||||
|
@ -81,7 +79,7 @@ func (b *Local) opApply(
|
|||
|
||||
trivialPlan := plan.Changes.Empty()
|
||||
hasUI := op.UIOut != nil && op.UIIn != nil
|
||||
mustConfirm := hasUI && ((op.Destroy && (!op.DestroyForce && !op.AutoApprove)) || (!op.Destroy && !op.AutoApprove && !trivialPlan))
|
||||
mustConfirm := hasUI && !op.AutoApprove && !trivialPlan
|
||||
if mustConfirm {
|
||||
var desc, query string
|
||||
if op.Destroy {
|
||||
|
@ -137,7 +135,7 @@ func (b *Local) opApply(
|
|||
}
|
||||
}
|
||||
|
||||
// Setup our hook for continuous state updates
|
||||
// Set up our hook for continuous state updates
|
||||
stateHook.StateMgr = opState
|
||||
|
||||
// Start the apply in a goroutine so that we can be interrupted.
|
||||
|
@ -183,35 +181,6 @@ func (b *Local) opApply(
|
|||
// here just before we show the summary and next steps. If we encountered
|
||||
// errors then we would've returned early at some other point above.
|
||||
b.ShowDiagnostics(diags)
|
||||
|
||||
// If we have a UI, output the results
|
||||
if b.CLI != nil {
|
||||
if op.Destroy {
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
||||
"[reset][bold][green]\n"+
|
||||
"Destroy complete! Resources: %d destroyed.",
|
||||
countHook.Removed)))
|
||||
} else {
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
||||
"[reset][bold][green]\n"+
|
||||
"Apply complete! Resources: %d added, %d changed, %d destroyed.",
|
||||
countHook.Added,
|
||||
countHook.Changed,
|
||||
countHook.Removed)))
|
||||
}
|
||||
|
||||
// only show the state file help message if the state is local.
|
||||
if (countHook.Added > 0 || countHook.Changed > 0) && b.StateOutPath != "" {
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
||||
"[reset]\n"+
|
||||
"The state of your infrastructure has been saved to the path\n"+
|
||||
"below. This state is required to modify and destroy your\n"+
|
||||
"infrastructure, so keep it safe. To inspect the complete state\n"+
|
||||
"use the `terraform show` command.\n\n"+
|
||||
"State path: %s",
|
||||
b.StateOutPath)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// backupStateForError is called in a scenario where we're unable to persist the
|
||||
|
|
|
@ -27,7 +27,7 @@ func TestLocal_applyBasic(t *testing.T) {
|
|||
defer cleanup()
|
||||
|
||||
p := TestLocalProvider(t, b, "test", applyFixtureSchema())
|
||||
p.ApplyResourceChangeResponse = providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{
|
||||
p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("yes"),
|
||||
"ami": cty.StringVal("bar"),
|
||||
})}
|
||||
|
@ -70,7 +70,7 @@ func TestLocal_applyEmptyDir(t *testing.T) {
|
|||
defer cleanup()
|
||||
|
||||
p := TestLocalProvider(t, b, "test", &terraform.ProviderSchema{})
|
||||
p.ApplyResourceChangeResponse = providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{"id": cty.StringVal("yes")})}
|
||||
p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{"id": cty.StringVal("yes")})}
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./testdata/empty")
|
||||
defer configCleanup()
|
||||
|
@ -101,7 +101,7 @@ func TestLocal_applyEmptyDirDestroy(t *testing.T) {
|
|||
defer cleanup()
|
||||
|
||||
p := TestLocalProvider(t, b, "test", &terraform.ProviderSchema{})
|
||||
p.ApplyResourceChangeResponse = providers.ApplyResourceChangeResponse{}
|
||||
p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{}
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./testdata/empty")
|
||||
defer configCleanup()
|
||||
|
@ -193,7 +193,7 @@ func TestLocal_applyBackendFail(t *testing.T) {
|
|||
defer cleanup()
|
||||
|
||||
p := TestLocalProvider(t, b, "test", applyFixtureSchema())
|
||||
p.ApplyResourceChangeResponse = providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{
|
||||
p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("yes"),
|
||||
"ami": cty.StringVal("bar"),
|
||||
})}
|
||||
|
@ -241,6 +241,30 @@ test_instance.foo:
|
|||
assertBackendStateUnlocked(t, b)
|
||||
}
|
||||
|
||||
func TestLocal_applyRefreshFalse(t *testing.T) {
|
||||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
|
||||
p := TestLocalProvider(t, b, "test", planFixtureSchema())
|
||||
testStateFile(t, b.StatePath, testPlanState())
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
<-run.Done()
|
||||
if run.Result != backend.OperationSuccess {
|
||||
t.Fatalf("plan operation failed")
|
||||
}
|
||||
|
||||
if p.ReadResourceCalled {
|
||||
t.Fatal("ReadResource should not be called")
|
||||
}
|
||||
}
|
||||
|
||||
type backendWithFailingState struct {
|
||||
Local
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ func (b *Local) context(op *backend.Operation) (*terraform.Context, *configload.
|
|||
opts.Targets = op.Targets
|
||||
opts.UIInput = op.UIIn
|
||||
|
||||
opts.SkipRefresh = op.Type == backend.OperationTypePlan && !op.PlanRefresh
|
||||
opts.SkipRefresh = op.Type != backend.OperationTypeRefresh && !op.PlanRefresh
|
||||
if opts.SkipRefresh {
|
||||
log.Printf("[DEBUG] backend/local: skipping refresh of managed resources")
|
||||
}
|
||||
|
|
|
@ -32,6 +32,8 @@ func (b *Local) opPlan(
|
|||
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
outputColumns := b.outputColumns()
|
||||
|
||||
if op.PlanFile != nil {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
|
@ -57,14 +59,9 @@ func (b *Local) opPlan(
|
|||
return
|
||||
}
|
||||
|
||||
// Setup our count hook that keeps track of resource changes
|
||||
countHook := new(CountHook)
|
||||
if b.ContextOpts == nil {
|
||||
b.ContextOpts = new(terraform.ContextOpts)
|
||||
}
|
||||
old := b.ContextOpts.Hooks
|
||||
defer func() { b.ContextOpts.Hooks = old }()
|
||||
b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, countHook)
|
||||
|
||||
// Get our context
|
||||
tfCtx, configSnap, opState, ctxDiags := b.context(op)
|
||||
|
@ -150,6 +147,7 @@ func (b *Local) opPlan(
|
|||
|
||||
if runningOp.PlanEmpty {
|
||||
b.CLI.Output("\n" + b.Colorize().Color(strings.TrimSpace(planNoChanges)))
|
||||
b.CLI.Output("\n" + strings.TrimSpace(format.WordWrap(planNoChangesDetail, outputColumns)))
|
||||
// Even if there are no changes, there still could be some warnings
|
||||
b.ShowDiagnostics(diags)
|
||||
return
|
||||
|
@ -166,15 +164,15 @@ func (b *Local) opPlan(
|
|||
// tool which is presumed to provide its own UI for further actions.
|
||||
if !b.RunningInAutomation {
|
||||
|
||||
b.CLI.Output("\n------------------------------------------------------------------------")
|
||||
b.outputHorizRule()
|
||||
|
||||
if path := op.PlanOutPath; path == "" {
|
||||
b.CLI.Output(fmt.Sprintf(
|
||||
"\n" + strings.TrimSpace(planHeaderNoOutput) + "\n",
|
||||
"\n" + strings.TrimSpace(format.WordWrap(planHeaderNoOutput, outputColumns)) + "\n",
|
||||
))
|
||||
} else {
|
||||
b.CLI.Output(fmt.Sprintf(
|
||||
"\n"+strings.TrimSpace(planHeaderYesOutput)+"\n",
|
||||
"\n"+strings.TrimSpace(format.WordWrap(planHeaderYesOutput, outputColumns))+"\n",
|
||||
path, path,
|
||||
))
|
||||
}
|
||||
|
@ -183,7 +181,7 @@ func (b *Local) opPlan(
|
|||
}
|
||||
|
||||
func (b *Local) renderPlan(plan *plans.Plan, baseState *states.State, schemas *terraform.Schemas) {
|
||||
RenderPlan(plan, baseState, schemas, b.CLI, b.Colorize())
|
||||
RenderPlan(plan, baseState, schemas, b.CLI, b.Colorize(), b.outputColumns())
|
||||
}
|
||||
|
||||
// RenderPlan renders the given plan to the given UI.
|
||||
|
@ -206,7 +204,7 @@ func (b *Local) renderPlan(plan *plans.Plan, baseState *states.State, schemas *t
|
|||
// output values will not currently be rendered because their prior values
|
||||
// are currently stored only in the prior state. (see the docstring for
|
||||
// func planHasSideEffects for why this is and when that might change)
|
||||
func RenderPlan(plan *plans.Plan, baseState *states.State, schemas *terraform.Schemas, ui cli.Ui, colorize *colorstring.Colorize) {
|
||||
func RenderPlan(plan *plans.Plan, baseState *states.State, schemas *terraform.Schemas, ui cli.Ui, colorize *colorstring.Colorize, width int) {
|
||||
counts := map[plans.Action]int{}
|
||||
var rChanges []*plans.ResourceInstanceChangeSrc
|
||||
for _, change := range plan.Changes.Resources {
|
||||
|
@ -220,7 +218,7 @@ func RenderPlan(plan *plans.Plan, baseState *states.State, schemas *terraform.Sc
|
|||
}
|
||||
|
||||
headerBuf := &bytes.Buffer{}
|
||||
fmt.Fprintf(headerBuf, "\n%s\n", strings.TrimSpace(planHeaderIntro))
|
||||
fmt.Fprintf(headerBuf, "\n%s\n", strings.TrimSpace(format.WordWrap(planHeaderIntro, width)))
|
||||
if counts[plans.Create] > 0 {
|
||||
fmt.Fprintf(headerBuf, "%s create\n", format.DiffActionSymbol(plans.Create))
|
||||
}
|
||||
|
@ -314,24 +312,31 @@ func RenderPlan(plan *plans.Plan, baseState *states.State, schemas *terraform.Sc
|
|||
|
||||
// If there is at least one planned change to the root module outputs
|
||||
// then we'll render a summary of those too.
|
||||
if len(plan.Changes.Outputs) > 0 {
|
||||
ui.Output(colorize.Color("[reset]\n[bold]Changes to Outputs:[reset]" + format.OutputChanges(plan.Changes.Outputs, colorize)))
|
||||
var changedRootModuleOutputs []*plans.OutputChangeSrc
|
||||
for _, output := range plan.Changes.Outputs {
|
||||
if !output.Addr.Module.IsRoot() {
|
||||
continue
|
||||
}
|
||||
if output.ChangeSrc.Action == plans.NoOp {
|
||||
continue
|
||||
}
|
||||
changedRootModuleOutputs = append(changedRootModuleOutputs, output)
|
||||
}
|
||||
if len(changedRootModuleOutputs) > 0 {
|
||||
ui.Output(colorize.Color("[reset]\n[bold]Changes to Outputs:[reset]" + format.OutputChanges(changedRootModuleOutputs, colorize)))
|
||||
}
|
||||
}
|
||||
|
||||
const planHeaderIntro = `
|
||||
An execution plan has been generated and is shown below.
|
||||
Resource actions are indicated with the following symbols:
|
||||
Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
|
||||
`
|
||||
|
||||
const planHeaderNoOutput = `
|
||||
Note: You didn't specify an "-out" parameter to save this plan, so Terraform
|
||||
can't guarantee that exactly these actions will be performed if
|
||||
"terraform apply" is subsequently run.
|
||||
Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now.
|
||||
`
|
||||
|
||||
const planHeaderYesOutput = `
|
||||
This plan was saved to: %s
|
||||
Saved the plan to: %s
|
||||
|
||||
To perform exactly these actions, run the following command to apply:
|
||||
terraform apply %q
|
||||
|
@ -339,14 +344,8 @@ To perform exactly these actions, run the following command to apply:
|
|||
|
||||
const planNoChanges = `
|
||||
[reset][bold][green]No changes. Infrastructure is up-to-date.[reset][green]
|
||||
|
||||
This means that Terraform did not detect any differences between your
|
||||
configuration and real physical resources that exist. As a result, no
|
||||
actions need to be performed.
|
||||
`
|
||||
|
||||
const planRefreshing = `
|
||||
[reset][bold]Refreshing Terraform state in-memory prior to plan...[reset]
|
||||
The refreshed state will be used to calculate this plan, but will not be
|
||||
persisted to local or remote state storage.
|
||||
const planNoChangesDetail = `
|
||||
That Terraform did not detect any differences between your configuration and the remote system(s). As a result, there are no actions to take.
|
||||
`
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -51,7 +50,7 @@ func TestLocal_planInAutomation(t *testing.T) {
|
|||
defer cleanup()
|
||||
TestLocalProvider(t, b, "test", planFixtureSchema())
|
||||
|
||||
const msg = `You didn't specify an "-out" parameter`
|
||||
const msg = `You didn't use the -out option`
|
||||
|
||||
// When we're "in automation" we omit certain text from the
|
||||
// plan output. However, testing for the absense of text is
|
||||
|
@ -77,7 +76,7 @@ func TestLocal_planInAutomation(t *testing.T) {
|
|||
|
||||
output := b.CLI.(*cli.MockUi).OutputWriter.String()
|
||||
if !strings.Contains(output, msg) {
|
||||
t.Fatalf("missing next-steps message when not in automation")
|
||||
t.Fatalf("missing next-steps message when not in automation\nwant: %s\noutput:\n%s", msg, output)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -241,6 +240,56 @@ Changes to Outputs:
|
|||
}
|
||||
}
|
||||
|
||||
// Module outputs should not cause the plan to be rendered
|
||||
func TestLocal_planModuleOutputsChanged(t *testing.T) {
|
||||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
testStateFile(t, b.StatePath, states.BuildState(func(ss *states.SyncState) {
|
||||
ss.SetOutputValue(addrs.AbsOutputValue{
|
||||
Module: addrs.RootModuleInstance.Child("mod", addrs.NoKey),
|
||||
OutputValue: addrs.OutputValue{Name: "changed"},
|
||||
}, cty.StringVal("before"), false)
|
||||
}))
|
||||
b.CLI = cli.NewMockUi()
|
||||
outDir := testTempDir(t)
|
||||
defer os.RemoveAll(outDir)
|
||||
planPath := filepath.Join(outDir, "plan.tfplan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan-module-outputs-changed")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
op.PlanOutPath = planPath
|
||||
cfg := cty.ObjectVal(map[string]cty.Value{
|
||||
"path": cty.StringVal(b.StatePath),
|
||||
})
|
||||
cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
op.PlanOutBackend = &plans.Backend{
|
||||
Type: "local",
|
||||
Config: cfgRaw,
|
||||
}
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
<-run.Done()
|
||||
if run.Result != backend.OperationSuccess {
|
||||
t.Fatalf("plan operation failed")
|
||||
}
|
||||
if !run.PlanEmpty {
|
||||
t.Fatal("plan should be empty")
|
||||
}
|
||||
|
||||
expectedOutput := strings.TrimSpace(`
|
||||
No changes. Infrastructure is up-to-date.
|
||||
`)
|
||||
output := b.CLI.(*cli.MockUi).OutputWriter.String()
|
||||
if !strings.Contains(output, expectedOutput) {
|
||||
t.Fatalf("Unexpected output:\n%s\n\nwant output containing:\n%s", output, expectedOutput)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocal_planTainted(t *testing.T) {
|
||||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
|
@ -281,8 +330,8 @@ func TestLocal_planTainted(t *testing.T) {
|
|||
t.Fatal("plan should not be empty")
|
||||
}
|
||||
|
||||
expectedOutput := `An execution plan has been generated and is shown below.
|
||||
Resource actions are indicated with the following symbols:
|
||||
expectedOutput := `Terraform used the selected providers to generate the following execution
|
||||
plan. Resource actions are indicated with the following symbols:
|
||||
-/+ destroy and then create replacement
|
||||
|
||||
Terraform will perform the following actions:
|
||||
|
@ -383,8 +432,8 @@ func TestLocal_planDeposedOnly(t *testing.T) {
|
|||
// it's also possible for there to be _multiple_ deposed objects, in the
|
||||
// unlikely event that create_before_destroy _keeps_ crashing across
|
||||
// subsequent runs.
|
||||
expectedOutput := `An execution plan has been generated and is shown below.
|
||||
Resource actions are indicated with the following symbols:
|
||||
expectedOutput := `Terraform used the selected providers to generate the following execution
|
||||
plan. Resource actions are indicated with the following symbols:
|
||||
+ create
|
||||
- destroy
|
||||
|
||||
|
@ -457,8 +506,8 @@ func TestLocal_planTainted_createBeforeDestroy(t *testing.T) {
|
|||
t.Fatal("plan should not be empty")
|
||||
}
|
||||
|
||||
expectedOutput := `An execution plan has been generated and is shown below.
|
||||
Resource actions are indicated with the following symbols:
|
||||
expectedOutput := `Terraform used the selected providers to generate the following execution
|
||||
plan. Resource actions are indicated with the following symbols:
|
||||
+/- create replacement and then destroy
|
||||
|
||||
Terraform will perform the following actions:
|
||||
|
@ -509,7 +558,7 @@ func TestLocal_planDestroy(t *testing.T) {
|
|||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
|
||||
p := TestLocalProvider(t, b, "test", planFixtureSchema())
|
||||
TestLocalProvider(t, b, "test", planFixtureSchema())
|
||||
testStateFile(t, b.StatePath, testPlanState())
|
||||
|
||||
outDir := testTempDir(t)
|
||||
|
@ -543,10 +592,6 @@ func TestLocal_planDestroy(t *testing.T) {
|
|||
t.Fatalf("plan operation failed")
|
||||
}
|
||||
|
||||
if p.ReadResourceCalled {
|
||||
t.Fatal("ReadResource should not be called")
|
||||
}
|
||||
|
||||
if run.PlanEmpty {
|
||||
t.Fatal("plan should not be empty")
|
||||
}
|
||||
|
@ -563,7 +608,7 @@ func TestLocal_planDestroy_withDataSources(t *testing.T) {
|
|||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
|
||||
p := TestLocalProvider(t, b, "test", planFixtureSchema())
|
||||
TestLocalProvider(t, b, "test", planFixtureSchema())
|
||||
testStateFile(t, b.StatePath, testPlanState_withDataSource())
|
||||
|
||||
b.CLI = cli.NewMockUi()
|
||||
|
@ -599,14 +644,6 @@ func TestLocal_planDestroy_withDataSources(t *testing.T) {
|
|||
t.Fatalf("plan operation failed")
|
||||
}
|
||||
|
||||
if p.ReadResourceCalled {
|
||||
t.Fatal("ReadResource should not be called")
|
||||
}
|
||||
|
||||
if p.ReadDataSourceCalled {
|
||||
t.Fatal("ReadDataSourceCalled should not be called")
|
||||
}
|
||||
|
||||
if run.PlanEmpty {
|
||||
t.Fatal("plan should not be empty")
|
||||
}
|
||||
|
@ -640,7 +677,7 @@ Plan: 0 to add, 0 to change, 1 to destroy.`
|
|||
}
|
||||
|
||||
func getAddrs(resources []*plans.ResourceInstanceChangeSrc) []string {
|
||||
addrs := make([]string, len(resources), len(resources))
|
||||
addrs := make([]string, len(resources))
|
||||
for i, r := range resources {
|
||||
addrs[i] = r.Addr.String()
|
||||
}
|
||||
|
@ -690,49 +727,6 @@ func TestLocal_planOutPathNoChange(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestLocal_planScaleOutNoDupeCount tests a Refresh/Plan sequence when a
|
||||
// resource count is scaled out. The scaled out node needs to exist in the
|
||||
// graph and run through a plan-style sequence during the refresh phase, but
|
||||
// can conflate the count if its post-diff count hooks are not skipped. This
|
||||
// checks to make sure the correct resource count is ultimately given to the
|
||||
// UI.
|
||||
func TestLocal_planScaleOutNoDupeCount(t *testing.T) {
|
||||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
TestLocalProvider(t, b, "test", planFixtureSchema())
|
||||
testStateFile(t, b.StatePath, testPlanState())
|
||||
|
||||
actual := new(CountHook)
|
||||
b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, actual)
|
||||
|
||||
outDir := testTempDir(t)
|
||||
defer os.RemoveAll(outDir)
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan-scaleout")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
<-run.Done()
|
||||
if run.Result != backend.OperationSuccess {
|
||||
t.Fatalf("plan operation failed")
|
||||
}
|
||||
|
||||
expected := new(CountHook)
|
||||
expected.ToAdd = 1
|
||||
expected.ToChange = 0
|
||||
expected.ToRemoveAndAdd = 0
|
||||
expected.ToRemove = 0
|
||||
|
||||
if !reflect.DeepEqual(expected, actual) {
|
||||
t.Fatalf("Expected %#v, got %#v instead.",
|
||||
expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func testOperationPlan(t *testing.T, configDir string) (*backend.Operation, func()) {
|
||||
t.Helper()
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ func TestLocal_refresh(t *testing.T) {
|
|||
testStateFile(t, b.StatePath, testRefreshState())
|
||||
|
||||
p.ReadResourceFn = nil
|
||||
p.ReadResourceResponse = providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{
|
||||
p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("yes"),
|
||||
})}
|
||||
|
||||
|
@ -76,7 +76,7 @@ func TestLocal_refreshInput(t *testing.T) {
|
|||
testStateFile(t, b.StatePath, testRefreshState())
|
||||
|
||||
p.ReadResourceFn = nil
|
||||
p.ReadResourceResponse = providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{
|
||||
p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("yes"),
|
||||
})}
|
||||
p.ConfigureFn = func(req providers.ConfigureRequest) (resp providers.ConfigureResponse) {
|
||||
|
@ -119,7 +119,7 @@ func TestLocal_refreshValidate(t *testing.T) {
|
|||
p := TestLocalProvider(t, b, "test", refreshFixtureSchema())
|
||||
testStateFile(t, b.StatePath, testRefreshState())
|
||||
p.ReadResourceFn = nil
|
||||
p.ReadResourceResponse = providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{
|
||||
p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("yes"),
|
||||
})}
|
||||
|
||||
|
@ -135,6 +135,52 @@ func TestLocal_refreshValidate(t *testing.T) {
|
|||
}
|
||||
<-run.Done()
|
||||
|
||||
checkState(t, b.StateOutPath, `
|
||||
test_instance.foo:
|
||||
ID = yes
|
||||
provider = provider["registry.terraform.io/hashicorp/test"]
|
||||
`)
|
||||
}
|
||||
|
||||
func TestLocal_refreshValidateProviderConfigured(t *testing.T) {
|
||||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
|
||||
schema := &terraform.ProviderSchema{
|
||||
Provider: &configschema.Block{
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"value": {Type: cty.String, Optional: true},
|
||||
},
|
||||
},
|
||||
ResourceTypes: map[string]*configschema.Block{
|
||||
"test_instance": {
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"id": {Type: cty.String, Computed: true},
|
||||
"ami": {Type: cty.String, Optional: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
p := TestLocalProvider(t, b, "test", schema)
|
||||
testStateFile(t, b.StatePath, testRefreshState())
|
||||
p.ReadResourceFn = nil
|
||||
p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("yes"),
|
||||
})}
|
||||
|
||||
// Enable validation
|
||||
b.OpValidation = true
|
||||
|
||||
op, configCleanup := testOperationRefresh(t, "./testdata/refresh-provider-config")
|
||||
defer configCleanup()
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
<-run.Done()
|
||||
|
||||
if !p.PrepareProviderConfigCalled {
|
||||
t.Fatal("Prepare provider config should be called")
|
||||
}
|
||||
|
|
|
@ -4,12 +4,14 @@ import (
|
|||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/command/format"
|
||||
)
|
||||
|
||||
// backend.CLI impl.
|
||||
func (b *Local) CLIInit(opts *backend.CLIOpts) error {
|
||||
b.CLI = opts.CLI
|
||||
b.CLIColor = opts.CLIColor
|
||||
b.Streams = opts.Streams
|
||||
b.ShowDiagnostics = opts.ShowDiagnostics
|
||||
b.ContextOpts = opts.ContextOpts
|
||||
b.OpInput = opts.Input
|
||||
|
@ -34,3 +36,45 @@ func (b *Local) CLIInit(opts *backend.CLIOpts) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// outputColumns returns the number of text character cells any non-error
|
||||
// output should be wrapped to.
|
||||
//
|
||||
// This is the number of columns to use if you are calling b.CLI.Output or
|
||||
// b.CLI.Info.
|
||||
func (b *Local) outputColumns() int {
|
||||
if b.Streams == nil {
|
||||
// We can potentially get here in tests, if they don't populate the
|
||||
// CLIOpts fully.
|
||||
return 78 // placeholder just so we don't panic
|
||||
}
|
||||
return b.Streams.Stdout.Columns()
|
||||
}
|
||||
|
||||
// errorColumns returns the number of text character cells any error
|
||||
// output should be wrapped to.
|
||||
//
|
||||
// This is the number of columns to use if you are calling b.CLI.Error or
|
||||
// b.CLI.Warn.
|
||||
func (b *Local) errorColumns() int {
|
||||
if b.Streams == nil {
|
||||
// We can potentially get here in tests, if they don't populate the
|
||||
// CLIOpts fully.
|
||||
return 78 // placeholder just so we don't panic
|
||||
}
|
||||
return b.Streams.Stderr.Columns()
|
||||
}
|
||||
|
||||
// outputHorizRule will call b.CLI.Output with enough horizontal line
|
||||
// characters to fill an entire row of output.
|
||||
//
|
||||
// This function does nothing if the backend doesn't have a CLI attached.
|
||||
//
|
||||
// If UI color is enabled, the rule will get a dark grey coloring to try to
|
||||
// visually de-emphasize it.
|
||||
func (b *Local) outputHorizRule() {
|
||||
if b.CLI == nil {
|
||||
return
|
||||
}
|
||||
b.CLI.Output(format.HorizontalRule(b.CLIColor, b.outputColumns()))
|
||||
}
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
// Code generated by "stringer -type=countHookAction hook_count_action.go"; DO NOT EDIT.
|
||||
|
||||
package local
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[countHookActionAdd-0]
|
||||
_ = x[countHookActionChange-1]
|
||||
_ = x[countHookActionRemove-2]
|
||||
}
|
||||
|
||||
const _countHookAction_name = "countHookActionAddcountHookActionChangecountHookActionRemove"
|
||||
|
||||
var _countHookAction_index = [...]uint8{0, 18, 39, 60}
|
||||
|
||||
func (i countHookAction) String() string {
|
||||
if i >= countHookAction(len(_countHookAction_index)-1) {
|
||||
return "countHookAction(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _countHookAction_name[_countHookAction_index[i]:_countHookAction_index[i+1]]
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
package local
|
||||
|
||||
//go:generate go run golang.org/x/tools/cmd/stringer -type=countHookAction hook_count_action.go
|
||||
|
||||
type countHookAction byte
|
||||
|
||||
const (
|
||||
countHookActionAdd countHookAction = iota
|
||||
countHookActionChange
|
||||
countHookActionRemove
|
||||
)
|
|
@ -1,4 +1,5 @@
|
|||
resource "test_instance" "foo" {
|
||||
count = 1
|
||||
ami = "bar"
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
module "mod" {
|
||||
source = "./mod"
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
output "changed" {
|
||||
value = "after"
|
||||
}
|
|
@ -1,3 +1,7 @@
|
|||
module "submodule" {
|
||||
source = "./submodule"
|
||||
}
|
||||
|
||||
output "changed" {
|
||||
value = "after"
|
||||
}
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
output "foo" {
|
||||
value = "bar"
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
resource "test_instance" "foo" {
|
||||
count = 2
|
||||
ami = "bar"
|
||||
|
||||
# This is here because at some point it caused a test failure
|
||||
network_interface {
|
||||
device_index = 0
|
||||
description = "Main network interface"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
resource "test_instance" "foo" {
|
||||
ami = "bar"
|
||||
}
|
||||
|
||||
provider "test" {
|
||||
value = "foo"
|
||||
}
|
|
@ -72,7 +72,21 @@ func TestLocalProvider(t *testing.T, b *Local, name string, schema *terraform.Pr
|
|||
if schema == nil {
|
||||
schema = &terraform.ProviderSchema{} // default schema is empty
|
||||
}
|
||||
p.GetSchemaReturn = schema
|
||||
p.GetSchemaResponse = &providers.GetSchemaResponse{
|
||||
Provider: providers.Schema{Block: schema.Provider},
|
||||
ProviderMeta: providers.Schema{Block: schema.ProviderMeta},
|
||||
ResourceTypes: map[string]providers.Schema{},
|
||||
DataSources: map[string]providers.Schema{},
|
||||
}
|
||||
for name, res := range schema.ResourceTypes {
|
||||
p.GetSchemaResponse.ResourceTypes[name] = providers.Schema{
|
||||
Block: res,
|
||||
Version: int64(schema.ResourceTypeSchemaVersions[name]),
|
||||
}
|
||||
}
|
||||
for name, dat := range schema.DataSources {
|
||||
p.GetSchemaResponse.DataSources[name] = providers.Schema{Block: dat}
|
||||
}
|
||||
|
||||
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse {
|
||||
rSchema, _ := schema.SchemaForResourceType(addrs.ManagedResourceMode, req.TypeName)
|
||||
|
@ -111,7 +125,7 @@ func TestLocalProvider(t *testing.T, b *Local, name string, schema *terraform.Pr
|
|||
b.ContextOpts = &terraform.ContextOpts{}
|
||||
}
|
||||
|
||||
// Setup our provider
|
||||
// Set up our provider
|
||||
b.ContextOpts.Providers = map[addrs.Provider]providers.Factory{
|
||||
addrs.NewDefaultProvider(name): providers.FactoryFixed(p),
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/internal/legacy/helper/schema"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
artifactory "github.com/lusis/go-artifactory/src/artifactory.v401"
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/internal/legacy/helper/schema"
|
||||
)
|
||||
|
||||
// New creates a new backend for Azure remote state.
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/internal/legacy/helper/acctest"
|
||||
)
|
||||
|
||||
func TestBackend_impl(t *testing.T) {
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/internal/legacy/helper/acctest"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs"
|
||||
)
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/internal/legacy/helper/schema"
|
||||
)
|
||||
|
||||
// New creates a new backend for Consul remote state.
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/internal/legacy/helper/schema"
|
||||
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common"
|
||||
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile"
|
||||
tag "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tag/v20180813"
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
|
||||
etcdapi "github.com/coreos/etcd/client"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/internal/legacy/helper/schema"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
)
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
etcdv3 "github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/internal/legacy/helper/schema"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -11,8 +11,8 @@ import (
|
|||
|
||||
"cloud.google.com/go/storage"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/httpclient"
|
||||
"github.com/hashicorp/terraform/internal/legacy/helper/schema"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jwt"
|
||||
"google.golang.org/api/option"
|
||||
|
@ -27,9 +27,8 @@ type Backend struct {
|
|||
storageClient *storage.Client
|
||||
storageContext context.Context
|
||||
|
||||
bucketName string
|
||||
prefix string
|
||||
defaultStateFile string
|
||||
bucketName string
|
||||
prefix string
|
||||
|
||||
encryptionKey []byte
|
||||
}
|
||||
|
@ -45,13 +44,6 @@ func New() backend.Backend {
|
|||
Description: "The name of the Google Cloud Storage bucket",
|
||||
},
|
||||
|
||||
"path": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Path of the default state file",
|
||||
Deprecated: "Use the \"prefix\" option instead",
|
||||
},
|
||||
|
||||
"prefix": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
|
@ -74,6 +66,22 @@ func New() backend.Backend {
|
|||
Description: "An OAuth2 token used for GCP authentication",
|
||||
},
|
||||
|
||||
"impersonate_service_account": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
|
||||
"GOOGLE_IMPERSONATE_SERVICE_ACCOUNT",
|
||||
}, nil),
|
||||
Description: "The service account to impersonate for all Google API Calls",
|
||||
},
|
||||
|
||||
"impersonate_service_account_delegates": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Description: "The delegation chain for the impersonated service account",
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
|
||||
"encryption_key": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
|
@ -121,8 +129,6 @@ func (b *Backend) configure(ctx context.Context) error {
|
|||
b.prefix = b.prefix + "/"
|
||||
}
|
||||
|
||||
b.defaultStateFile = strings.TrimLeft(data.Get("path").(string), "/")
|
||||
|
||||
var opts []option.ClientOption
|
||||
|
||||
// Add credential source
|
||||
|
@ -168,6 +174,24 @@ func (b *Backend) configure(ctx context.Context) error {
|
|||
opts = append(opts, option.WithScopes(storage.ScopeReadWrite))
|
||||
}
|
||||
|
||||
// Service Account Impersonation
|
||||
if v, ok := data.GetOk("impersonate_service_account"); ok {
|
||||
ServiceAccount := v.(string)
|
||||
opts = append(opts, option.ImpersonateCredentials(ServiceAccount))
|
||||
|
||||
if v, ok := data.GetOk("impersonate_service_account_delegates"); ok {
|
||||
var delegates []string
|
||||
d := v.([]interface{})
|
||||
if len(delegates) > 0 {
|
||||
delegates = make([]string, len(d))
|
||||
}
|
||||
for _, delegate := range d {
|
||||
delegates = append(delegates, delegate.(string))
|
||||
}
|
||||
opts = append(opts, option.ImpersonateCredentials(ServiceAccount, delegates...))
|
||||
}
|
||||
}
|
||||
|
||||
opts = append(opts, option.WithUserAgent(httpclient.UserAgentString()))
|
||||
client, err := storage.NewClient(b.storageContext, opts...)
|
||||
if err != nil {
|
||||
|
|
|
@ -146,15 +146,9 @@ func (b *Backend) StateMgr(name string) (statemgr.Full, error) {
|
|||
}
|
||||
|
||||
func (b *Backend) stateFile(name string) string {
|
||||
if name == backend.DefaultStateName && b.defaultStateFile != "" {
|
||||
return b.defaultStateFile
|
||||
}
|
||||
return path.Join(b.prefix, name+stateFileSuffix)
|
||||
}
|
||||
|
||||
func (b *Backend) lockFile(name string) string {
|
||||
if name == backend.DefaultStateName && b.defaultStateFile != "" {
|
||||
return strings.TrimSuffix(b.defaultStateFile, stateFileSuffix) + lockFileSuffix
|
||||
}
|
||||
return path.Join(b.prefix, name+lockFileSuffix)
|
||||
}
|
||||
|
|
|
@ -25,23 +25,19 @@ func TestStateFile(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
cases := []struct {
|
||||
prefix string
|
||||
defaultStateFile string
|
||||
name string
|
||||
wantStateFile string
|
||||
wantLockFile string
|
||||
prefix string
|
||||
name string
|
||||
wantStateFile string
|
||||
wantLockFile string
|
||||
}{
|
||||
{"state", "", "default", "state/default.tfstate", "state/default.tflock"},
|
||||
{"state", "", "test", "state/test.tfstate", "state/test.tflock"},
|
||||
{"state", "legacy.tfstate", "default", "legacy.tfstate", "legacy.tflock"},
|
||||
{"state", "legacy.tfstate", "test", "state/test.tfstate", "state/test.tflock"},
|
||||
{"state", "legacy.state", "default", "legacy.state", "legacy.state.tflock"},
|
||||
{"state", "legacy.state", "test", "state/test.tfstate", "state/test.tflock"},
|
||||
{"state", "default", "state/default.tfstate", "state/default.tflock"},
|
||||
{"state", "test", "state/test.tfstate", "state/test.tflock"},
|
||||
{"state", "test", "state/test.tfstate", "state/test.tflock"},
|
||||
{"state", "test", "state/test.tfstate", "state/test.tflock"},
|
||||
}
|
||||
for _, c := range cases {
|
||||
b := &Backend{
|
||||
prefix: c.prefix,
|
||||
defaultStateFile: c.defaultStateFile,
|
||||
prefix: c.prefix,
|
||||
}
|
||||
|
||||
if got := b.stateFile(c.name); got != c.wantStateFile {
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/go-retryablehttp"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/internal/legacy/helper/schema"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
)
|
||||
|
|
|
@ -49,7 +49,7 @@ func (c *httpClient) httpRequest(method string, url *url.URL, data *[]byte, what
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to make %s HTTP request: %s", what, err)
|
||||
}
|
||||
// Setup basic auth
|
||||
// Set up basic auth
|
||||
if c.Username != "" {
|
||||
req.SetBasicAuth(c.Username, c.Password)
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/internal/legacy/helper/schema"
|
||||
statespkg "github.com/hashicorp/terraform/states"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/internal/legacy/helper/schema"
|
||||
"github.com/hashicorp/terraform/version"
|
||||
"github.com/mitchellh/cli"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -11,7 +11,7 @@ import (
|
|||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/internal/legacy/helper/schema"
|
||||
triton "github.com/joyent/triton-go"
|
||||
"github.com/joyent/triton-go/authentication"
|
||||
"github.com/joyent/triton-go/storage"
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore"
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/internal/legacy/helper/schema"
|
||||
"github.com/hashicorp/terraform/version"
|
||||
"github.com/jmespath/go-jmespath"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/internal/legacy/helper/schema"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
awsbase "github.com/hashicorp/aws-sdk-go-base"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/internal/legacy/helper/schema"
|
||||
"github.com/hashicorp/terraform/internal/logging"
|
||||
"github.com/hashicorp/terraform/version"
|
||||
)
|
||||
|
@ -327,7 +327,7 @@ func (b *Backend) configure(ctx context.Context) error {
|
|||
AssumeRoleExternalID: data.Get("external_id").(string),
|
||||
AssumeRolePolicy: data.Get("assume_role_policy").(string),
|
||||
AssumeRoleSessionName: data.Get("session_name").(string),
|
||||
CallerDocumentationURL: "https://www.terraform.io/docs/backends/types/s3.html",
|
||||
CallerDocumentationURL: "https://www.terraform.io/docs/language/settings/backends/s3.html",
|
||||
CallerName: "S3 Backend",
|
||||
CredsFilename: data.Get("shared_credentials_file").(string),
|
||||
DebugLogging: logging.IsDebugOrHigher(),
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"github.com/gophercloud/utils/terraform/auth"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/internal/legacy/helper/schema"
|
||||
"github.com/hashicorp/terraform/version"
|
||||
)
|
||||
|
||||
|
|
|
@ -85,6 +85,12 @@ type Remote struct {
|
|||
|
||||
// opLock locks operations
|
||||
opLock sync.Mutex
|
||||
|
||||
// ignoreVersionConflict, if true, will disable the requirement that the
|
||||
// local Terraform version matches the remote workspace's configured
|
||||
// version. This will also cause VerifyWorkspaceTerraformVersion to return
|
||||
// a warning diagnostic instead of an error.
|
||||
ignoreVersionConflict bool
|
||||
}
|
||||
|
||||
var _ backend.Backend = (*Remote)(nil)
|
||||
|
@ -629,6 +635,20 @@ func (b *Remote) StateMgr(name string) (statemgr.Full, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// This is a fallback error check. Most code paths should use other
|
||||
// mechanisms to check the version, then set the ignoreVersionConflict
|
||||
// field to true. This check is only in place to ensure that we don't
|
||||
// accidentally upgrade state with a new code path, and the version check
|
||||
// logic is coarser and simpler.
|
||||
if !b.ignoreVersionConflict {
|
||||
wsv := workspace.TerraformVersion
|
||||
// Explicitly ignore the pseudo-version "latest" here, as it will cause
|
||||
// plan and apply to always fail.
|
||||
if wsv != tfversion.String() && wsv != "latest" {
|
||||
return nil, fmt.Errorf("Remote workspace Terraform version %q does not match local Terraform version %q", workspace.TerraformVersion, tfversion.String())
|
||||
}
|
||||
}
|
||||
|
||||
client := &remoteClient{
|
||||
client: b.client,
|
||||
organization: b.organization,
|
||||
|
@ -674,6 +694,17 @@ func (b *Remote) Operation(ctx context.Context, op *backend.Operation) (*backend
|
|||
}
|
||||
}
|
||||
|
||||
// Terraform remote version conflicts are not a concern for operations. We
|
||||
// are in one of three states:
|
||||
//
|
||||
// - Running remotely, in which case the local version is irrelevant;
|
||||
// - Workspace configured for local operations, in which case the remote
|
||||
// version is meaningless;
|
||||
// - Forcing local operations with a remote backend, which should only
|
||||
// happen in the Terraform Cloud worker, in which case the Terraform
|
||||
// versions by definition match.
|
||||
b.IgnoreVersionConflict()
|
||||
|
||||
// Check if we need to use the local backend to run the operation.
|
||||
if b.forceLocal || !w.Operations {
|
||||
return b.local.Operation(ctx, op)
|
||||
|
@ -837,6 +868,114 @@ func (b *Remote) ReportResult(op *backend.RunningOperation, err error) {
|
|||
}
|
||||
}
|
||||
|
||||
// IgnoreVersionConflict allows commands to disable the fall-back check that
|
||||
// the local Terraform version matches the remote workspace's configured
|
||||
// Terraform version. This should be called by commands where this check is
|
||||
// unnecessary, such as those performing remote operations, or read-only
|
||||
// operations. It will also be called if the user uses a command-line flag to
|
||||
// override this check.
|
||||
func (b *Remote) IgnoreVersionConflict() {
|
||||
b.ignoreVersionConflict = true
|
||||
}
|
||||
|
||||
// VerifyWorkspaceTerraformVersion compares the local Terraform version against
|
||||
// the workspace's configured Terraform version. If they are equal, this means
|
||||
// that there are no compatibility concerns, so it returns no diagnostics.
|
||||
//
|
||||
// If the versions differ,
|
||||
func (b *Remote) VerifyWorkspaceTerraformVersion(workspaceName string) tfdiags.Diagnostics {
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
workspace, err := b.getRemoteWorkspace(context.Background(), workspaceName)
|
||||
if err != nil {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Error looking up workspace",
|
||||
fmt.Sprintf("Workspace read failed: %s", err),
|
||||
))
|
||||
return diags
|
||||
}
|
||||
|
||||
// If the workspace has the pseudo-version "latest", all bets are off. We
|
||||
// cannot reasonably determine what the intended Terraform version is, so
|
||||
// we'll skip version verification.
|
||||
if workspace.TerraformVersion == "latest" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the workspace has remote operations disabled, the remote Terraform
|
||||
// version is effectively meaningless, so we'll skip version verification.
|
||||
if workspace.Operations == false {
|
||||
return nil
|
||||
}
|
||||
|
||||
remoteVersion, err := version.NewSemver(workspace.TerraformVersion)
|
||||
if err != nil {
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Error looking up workspace",
|
||||
fmt.Sprintf("Invalid Terraform version: %s", err),
|
||||
))
|
||||
return diags
|
||||
}
|
||||
|
||||
v014 := version.Must(version.NewSemver("0.14.0"))
|
||||
if tfversion.SemVer.LessThan(v014) || remoteVersion.LessThan(v014) {
|
||||
// Versions of Terraform prior to 0.14.0 will refuse to load state files
|
||||
// written by a newer version of Terraform, even if it is only a patch
|
||||
// level difference. As a result we require an exact match.
|
||||
if tfversion.SemVer.Equal(remoteVersion) {
|
||||
return diags
|
||||
}
|
||||
}
|
||||
if tfversion.SemVer.GreaterThanOrEqual(v014) && remoteVersion.GreaterThanOrEqual(v014) {
|
||||
// Versions of Terraform after 0.14.0 should be compatible with each
|
||||
// other. At the time this code was written, the only constraints we
|
||||
// are aware of are:
|
||||
//
|
||||
// - 0.14.0 is guaranteed to be compatible with versions up to but not
|
||||
// including 1.1.0
|
||||
v110 := version.Must(version.NewSemver("1.1.0"))
|
||||
if tfversion.SemVer.LessThan(v110) && remoteVersion.LessThan(v110) {
|
||||
return diags
|
||||
}
|
||||
// - Any new Terraform state version will require at least minor patch
|
||||
// increment, so x.y.* will always be compatible with each other
|
||||
tfvs := tfversion.SemVer.Segments64()
|
||||
rwvs := remoteVersion.Segments64()
|
||||
if len(tfvs) == 3 && len(rwvs) == 3 && tfvs[0] == rwvs[0] && tfvs[1] == rwvs[1] {
|
||||
return diags
|
||||
}
|
||||
}
|
||||
|
||||
// Even if ignoring version conflicts, it may still be useful to call this
|
||||
// method and warn the user about a mismatch between the local and remote
|
||||
// Terraform versions.
|
||||
severity := tfdiags.Error
|
||||
if b.ignoreVersionConflict {
|
||||
severity = tfdiags.Warning
|
||||
}
|
||||
|
||||
suggestion := " If you're sure you want to upgrade the state, you can force Terraform to continue using the -ignore-remote-version flag. This may result in an unusable workspace."
|
||||
if b.ignoreVersionConflict {
|
||||
suggestion = ""
|
||||
}
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
severity,
|
||||
"Terraform version mismatch",
|
||||
fmt.Sprintf(
|
||||
"The local Terraform version (%s) does not match the configured version for remote workspace %s/%s (%s).%s",
|
||||
tfversion.String(),
|
||||
b.organization,
|
||||
workspace.Name,
|
||||
workspace.TerraformVersion,
|
||||
suggestion,
|
||||
),
|
||||
))
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
// Colorize returns the Colorize structure that can be used for colorizing
|
||||
// output. This is guaranteed to always return a non-nil value and so useful
|
||||
// as a helper to wrap any potentially colored strings.
|
||||
|
|
|
@ -170,8 +170,7 @@ func (b *Remote) opApply(stopCtx, cancelCtx context.Context, op *backend.Operati
|
|||
return r, diags.Err()
|
||||
}
|
||||
|
||||
mustConfirm := (op.UIIn != nil && op.UIOut != nil) &&
|
||||
((op.Destroy && (!op.DestroyForce && !op.AutoApprove)) || (!op.Destroy && !op.AutoApprove))
|
||||
mustConfirm := (op.UIIn != nil && op.UIOut != nil) && !op.AutoApprove
|
||||
|
||||
if !w.AutoApply {
|
||||
if mustConfirm {
|
||||
|
|
|
@ -11,12 +11,14 @@ import (
|
|||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
tfe "github.com/hashicorp/go-tfe"
|
||||
version "github.com/hashicorp/go-version"
|
||||
"github.com/hashicorp/terraform/addrs"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/internal/initwd"
|
||||
"github.com/hashicorp/terraform/plans/planfile"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
tfversion "github.com/hashicorp/terraform/version"
|
||||
"github.com/mitchellh/cli"
|
||||
)
|
||||
|
||||
|
@ -542,8 +544,8 @@ func TestRemote_applyApprovedExternally(t *testing.T) {
|
|||
t.Fatalf("error starting operation: %v", err)
|
||||
}
|
||||
|
||||
// Wait 2 seconds to make sure the run started.
|
||||
time.Sleep(2 * time.Second)
|
||||
// Wait 50 milliseconds to make sure the run started.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
wl, err := b.client.Workspaces.List(
|
||||
ctx,
|
||||
|
@ -617,8 +619,8 @@ func TestRemote_applyDiscardedExternally(t *testing.T) {
|
|||
t.Fatalf("error starting operation: %v", err)
|
||||
}
|
||||
|
||||
// Wait 2 seconds to make sure the run started.
|
||||
time.Sleep(2 * time.Second)
|
||||
// Wait 50 milliseconds to make sure the run started.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
wl, err := b.client.Workspaces.List(
|
||||
ctx,
|
||||
|
@ -773,8 +775,8 @@ func TestRemote_applyForceLocal(t *testing.T) {
|
|||
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
|
||||
t.Fatalf("expected plan summery in output: %s", output)
|
||||
}
|
||||
if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") {
|
||||
t.Fatalf("expected apply summery in output: %s", output)
|
||||
if !run.State.HasResources() {
|
||||
t.Fatalf("expected resources in state")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -831,8 +833,8 @@ func TestRemote_applyWorkspaceWithoutOperations(t *testing.T) {
|
|||
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
|
||||
t.Fatalf("expected plan summery in output: %s", output)
|
||||
}
|
||||
if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") {
|
||||
t.Fatalf("expected apply summery in output: %s", output)
|
||||
if !run.State.HasResources() {
|
||||
t.Fatalf("expected resources in state")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -871,7 +873,7 @@ func TestRemote_applyLockTimeout(t *testing.T) {
|
|||
"approve": "yes",
|
||||
})
|
||||
|
||||
op.StateLockTimeout = 5 * time.Second
|
||||
op.StateLockTimeout = 50 * time.Millisecond
|
||||
op.UIIn = input
|
||||
op.UIOut = b.CLI
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
@ -887,8 +889,8 @@ func TestRemote_applyLockTimeout(t *testing.T) {
|
|||
case <-sigint:
|
||||
// Stop redirecting SIGINT signals.
|
||||
signal.Stop(sigint)
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("expected lock timeout after 5 seconds, waited 10 seconds")
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("expected lock timeout after 50 milliseconds, waited 200 milliseconds")
|
||||
}
|
||||
|
||||
if len(input.answers) != 2 {
|
||||
|
@ -1277,3 +1279,141 @@ func TestRemote_applyWithRemoteError(t *testing.T) {
|
|||
t.Fatalf("expected apply error in output: %s", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemote_applyVersionCheck(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
localVersion string
|
||||
remoteVersion string
|
||||
forceLocal bool
|
||||
hasOperations bool
|
||||
wantErr string
|
||||
}{
|
||||
"versions can be different for remote apply": {
|
||||
localVersion: "0.14.0",
|
||||
remoteVersion: "0.13.5",
|
||||
hasOperations: true,
|
||||
},
|
||||
"versions can be different for local apply": {
|
||||
localVersion: "0.14.0",
|
||||
remoteVersion: "0.13.5",
|
||||
hasOperations: false,
|
||||
},
|
||||
"force local with remote operations and different versions is acceptable": {
|
||||
localVersion: "0.14.0",
|
||||
remoteVersion: "0.14.0-acme-provider-bundle",
|
||||
forceLocal: true,
|
||||
hasOperations: true,
|
||||
},
|
||||
"no error if versions are identical": {
|
||||
localVersion: "0.14.0",
|
||||
remoteVersion: "0.14.0",
|
||||
forceLocal: true,
|
||||
hasOperations: true,
|
||||
},
|
||||
"no error if force local but workspace has remote operations disabled": {
|
||||
localVersion: "0.14.0",
|
||||
remoteVersion: "0.13.5",
|
||||
forceLocal: true,
|
||||
hasOperations: false,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
// SETUP: Save original local version state and restore afterwards
|
||||
p := tfversion.Prerelease
|
||||
v := tfversion.Version
|
||||
s := tfversion.SemVer
|
||||
defer func() {
|
||||
tfversion.Prerelease = p
|
||||
tfversion.Version = v
|
||||
tfversion.SemVer = s
|
||||
}()
|
||||
|
||||
// SETUP: Set local version for the test case
|
||||
tfversion.Prerelease = ""
|
||||
tfversion.Version = tc.localVersion
|
||||
tfversion.SemVer = version.Must(version.NewSemver(tc.localVersion))
|
||||
|
||||
// SETUP: Set force local for the test case
|
||||
b.forceLocal = tc.forceLocal
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// SETUP: set the operations and Terraform Version fields on the
|
||||
// remote workspace
|
||||
_, err := b.client.Workspaces.Update(
|
||||
ctx,
|
||||
b.organization,
|
||||
b.workspace,
|
||||
tfe.WorkspaceUpdateOptions{
|
||||
Operations: tfe.Bool(tc.hasOperations),
|
||||
TerraformVersion: tfe.String(tc.remoteVersion),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating named workspace: %v", err)
|
||||
}
|
||||
|
||||
// RUN: prepare the apply operation and run it
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{
|
||||
"approve": "yes",
|
||||
})
|
||||
|
||||
op.UIIn = input
|
||||
op.UIOut = b.CLI
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
||||
run, err := b.Operation(ctx, op)
|
||||
if err != nil {
|
||||
t.Fatalf("error starting operation: %v", err)
|
||||
}
|
||||
|
||||
// RUN: wait for completion
|
||||
<-run.Done()
|
||||
|
||||
if tc.wantErr != "" {
|
||||
// ASSERT: if the test case wants an error, check for failure
|
||||
// and the error message
|
||||
if run.Result != backend.OperationFailure {
|
||||
t.Fatalf("expected run to fail, but result was %#v", run.Result)
|
||||
}
|
||||
errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String()
|
||||
if !strings.Contains(errOutput, tc.wantErr) {
|
||||
t.Fatalf("missing error %q\noutput: %s", tc.wantErr, errOutput)
|
||||
}
|
||||
} else {
|
||||
// ASSERT: otherwise, check for success and appropriate output
|
||||
// based on whether the run should be local or remote
|
||||
if run.Result != backend.OperationSuccess {
|
||||
t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String())
|
||||
}
|
||||
output := b.CLI.(*cli.MockUi).OutputWriter.String()
|
||||
hasRemote := strings.Contains(output, "Running apply in the remote backend")
|
||||
hasSummary := strings.Contains(output, "1 added, 0 changed, 0 destroyed")
|
||||
hasResources := run.State.HasResources()
|
||||
if !tc.forceLocal && tc.hasOperations {
|
||||
if !hasRemote {
|
||||
t.Errorf("missing remote backend header in output: %s", output)
|
||||
}
|
||||
if !hasSummary {
|
||||
t.Errorf("expected apply summary in output: %s", output)
|
||||
}
|
||||
} else {
|
||||
if hasRemote {
|
||||
t.Errorf("unexpected remote backend header in output: %s", output)
|
||||
}
|
||||
if !hasResources {
|
||||
t.Errorf("expected resources in state")
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,13 @@ var (
|
|||
errRunOverridden = errors.New("overridden using the UI or API")
|
||||
)
|
||||
|
||||
var (
|
||||
backoffMin = 1000.0
|
||||
backoffMax = 3000.0
|
||||
|
||||
runPollInterval = 3 * time.Second
|
||||
)
|
||||
|
||||
// backoff will perform exponential backoff based on the iteration and
|
||||
// limited by the provided min and max (in milliseconds) durations.
|
||||
func backoff(min, max float64, iter int) time.Duration {
|
||||
|
@ -43,7 +50,7 @@ func (b *Remote) waitForRun(stopCtx, cancelCtx context.Context, op *backend.Oper
|
|||
return r, stopCtx.Err()
|
||||
case <-cancelCtx.Done():
|
||||
return r, cancelCtx.Err()
|
||||
case <-time.After(backoff(1000, 3000, i)):
|
||||
case <-time.After(backoff(backoffMin, backoffMax, i)):
|
||||
// Timer up, show status
|
||||
}
|
||||
|
||||
|
@ -243,15 +250,7 @@ func (b *Remote) costEstimate(stopCtx, cancelCtx context.Context, op *backend.Op
|
|||
return nil
|
||||
}
|
||||
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output("\n------------------------------------------------------------------------\n")
|
||||
}
|
||||
|
||||
msgPrefix := "Cost estimation"
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n"))
|
||||
}
|
||||
|
||||
started := time.Now()
|
||||
updated := started
|
||||
for i := 0; ; i++ {
|
||||
|
@ -260,7 +259,7 @@ func (b *Remote) costEstimate(stopCtx, cancelCtx context.Context, op *backend.Op
|
|||
return stopCtx.Err()
|
||||
case <-cancelCtx.Done():
|
||||
return cancelCtx.Err()
|
||||
case <-time.After(1 * time.Second):
|
||||
case <-time.After(backoff(backoffMin, backoffMax, i)):
|
||||
}
|
||||
|
||||
// Retrieve the cost estimate to get its current status.
|
||||
|
@ -277,6 +276,12 @@ func (b *Remote) costEstimate(stopCtx, cancelCtx context.Context, op *backend.Op
|
|||
}
|
||||
}
|
||||
|
||||
// checking if i == 0 so as to avoid printing this starting horizontal-rule
|
||||
// every retry, and that it only prints it on the first (i=0) attempt.
|
||||
if b.CLI != nil && i == 0 {
|
||||
b.CLI.Output("\n------------------------------------------------------------------------\n")
|
||||
}
|
||||
|
||||
switch ce.Status {
|
||||
case tfe.CostEstimateFinished:
|
||||
delta, err := strconv.ParseFloat(ce.DeltaMonthlyCost, 64)
|
||||
|
@ -292,6 +297,7 @@ func (b *Remote) costEstimate(stopCtx, cancelCtx context.Context, op *backend.Op
|
|||
deltaRepr := strings.Replace(ce.DeltaMonthlyCost, "-", "", 1)
|
||||
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n"))
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Resources: %d of %d estimated", ce.MatchedResourcesCount, ce.ResourcesCount)))
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(" $%s/mo %s$%s", ce.ProposedMonthlyCost, sign, deltaRepr)))
|
||||
|
||||
|
@ -313,16 +319,17 @@ func (b *Remote) costEstimate(stopCtx, cancelCtx context.Context, op *backend.Op
|
|||
elapsed = fmt.Sprintf(
|
||||
" (%s elapsed)", current.Sub(started).Truncate(30*time.Second))
|
||||
}
|
||||
b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n"))
|
||||
b.CLI.Output(b.Colorize().Color("Waiting for cost estimate to complete..." + elapsed + "\n"))
|
||||
}
|
||||
continue
|
||||
case tfe.CostEstimateSkippedDueToTargeting:
|
||||
b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n"))
|
||||
b.CLI.Output("Not available for this plan, because it was created with the -target option.")
|
||||
b.CLI.Output("\n------------------------------------------------------------------------")
|
||||
return nil
|
||||
case tfe.CostEstimateErrored:
|
||||
b.CLI.Output(msgPrefix + " errored:\n")
|
||||
b.CLI.Output(ce.ErrorMessage)
|
||||
b.CLI.Output(msgPrefix + " errored.\n")
|
||||
b.CLI.Output("\n------------------------------------------------------------------------")
|
||||
return nil
|
||||
case tfe.CostEstimateCanceled:
|
||||
|
@ -455,7 +462,7 @@ func (b *Remote) confirm(stopCtx context.Context, op *backend.Operation, opts *t
|
|||
return
|
||||
case <-stopCtx.Done():
|
||||
return
|
||||
case <-time.After(3 * time.Second):
|
||||
case <-time.After(runPollInterval):
|
||||
// Retrieve the run again to get its current status.
|
||||
r, err := b.client.Runs.Read(stopCtx, r.ID)
|
||||
if err != nil {
|
||||
|
@ -489,10 +496,10 @@ func (b *Remote) confirm(stopCtx context.Context, op *backend.Operation, opts *t
|
|||
}
|
||||
|
||||
if err == errRunDiscarded {
|
||||
err = errApplyDiscarded
|
||||
if op.Destroy {
|
||||
err = errDestroyDiscarded
|
||||
}
|
||||
err = errApplyDiscarded
|
||||
}
|
||||
|
||||
result <- err
|
||||
|
|
|
@ -156,11 +156,20 @@ func (b *Remote) getRemoteWorkspaceName(localWorkspaceName string) string {
|
|||
}
|
||||
}
|
||||
|
||||
func (b *Remote) getRemoteWorkspaceID(ctx context.Context, localWorkspaceName string) (string, error) {
|
||||
func (b *Remote) getRemoteWorkspace(ctx context.Context, localWorkspaceName string) (*tfe.Workspace, error) {
|
||||
remoteWorkspaceName := b.getRemoteWorkspaceName(localWorkspaceName)
|
||||
|
||||
log.Printf("[TRACE] backend/remote: looking up workspace id for %s/%s", b.organization, remoteWorkspaceName)
|
||||
log.Printf("[TRACE] backend/remote: looking up workspace for %s/%s", b.organization, remoteWorkspaceName)
|
||||
remoteWorkspace, err := b.client.Workspaces.Read(ctx, b.organization, remoteWorkspaceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return remoteWorkspace, nil
|
||||
}
|
||||
|
||||
func (b *Remote) getRemoteWorkspaceID(ctx context.Context, localWorkspaceName string) (string, error) {
|
||||
remoteWorkspace, err := b.getRemoteWorkspace(ctx, localWorkspaceName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
@ -195,7 +195,7 @@ func TestRemoteContextWithVars(t *testing.T) {
|
|||
key := "key"
|
||||
v.Key = &key
|
||||
}
|
||||
b.client.Variables.Create(nil, workspaceID, *v)
|
||||
b.client.Variables.Create(context.TODO(), workspaceID, *v)
|
||||
|
||||
_, _, diags := b.Context(op)
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
|
||||
tfe "github.com/hashicorp/go-tfe"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
tfversion "github.com/hashicorp/terraform/version"
|
||||
"github.com/mitchellh/copystructure"
|
||||
)
|
||||
|
||||
|
@ -360,7 +361,7 @@ func (m *mockLogReader) Read(l []byte) (int, error) {
|
|||
if written, err := m.read(l); err != io.ErrNoProgress {
|
||||
return written, err
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1124,10 +1125,15 @@ func (m *mockWorkspaces) List(ctx context.Context, organization string, options
|
|||
}
|
||||
|
||||
func (m *mockWorkspaces) Create(ctx context.Context, organization string, options tfe.WorkspaceCreateOptions) (*tfe.Workspace, error) {
|
||||
if strings.HasSuffix(*options.Name, "no-operations") {
|
||||
options.Operations = tfe.Bool(false)
|
||||
} else if options.Operations == nil {
|
||||
options.Operations = tfe.Bool(true)
|
||||
}
|
||||
w := &tfe.Workspace{
|
||||
ID: generateID("ws-"),
|
||||
Name: *options.Name,
|
||||
Operations: !strings.HasSuffix(*options.Name, "no-operations"),
|
||||
Operations: *options.Operations,
|
||||
Permissions: &tfe.WorkspacePermissions{
|
||||
CanQueueApply: true,
|
||||
CanQueueRun: true,
|
||||
|
@ -1139,6 +1145,11 @@ func (m *mockWorkspaces) Create(ctx context.Context, organization string, option
|
|||
if options.VCSRepo != nil {
|
||||
w.VCSRepo = &tfe.VCSRepo{}
|
||||
}
|
||||
if options.TerraformVersion != nil {
|
||||
w.TerraformVersion = *options.TerraformVersion
|
||||
} else {
|
||||
w.TerraformVersion = tfversion.String()
|
||||
}
|
||||
m.workspaceIDs[w.ID] = w
|
||||
m.workspaceNames[w.Name] = w
|
||||
return w, nil
|
||||
|
@ -1171,6 +1182,9 @@ func (m *mockWorkspaces) Update(ctx context.Context, organization, workspace str
|
|||
return nil, tfe.ErrResourceNotFound
|
||||
}
|
||||
|
||||
if options.Operations != nil {
|
||||
w.Operations = *options.Operations
|
||||
}
|
||||
if options.Name != nil {
|
||||
w.Name = *options.Name
|
||||
}
|
||||
|
|
|
@ -20,6 +20,8 @@ import (
|
|||
"github.com/hashicorp/terraform/tfdiags"
|
||||
)
|
||||
|
||||
var planConfigurationVersionsPollInterval = 500 * time.Millisecond
|
||||
|
||||
func (b *Remote) opPlan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) {
|
||||
log.Printf("[INFO] backend/remote: starting Plan operation")
|
||||
|
||||
|
@ -213,7 +215,7 @@ in order to capture the filesystem context the remote workspace expects:
|
|||
return nil, context.Canceled
|
||||
case <-cancelCtx.Done():
|
||||
return nil, context.Canceled
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
case <-time.After(planConfigurationVersionsPollInterval):
|
||||
cv, err = b.client.ConfigurationVersions.Read(stopCtx, cv.ID)
|
||||
if err != nil {
|
||||
return nil, generalError("Failed to retrieve configuration version", err)
|
||||
|
|
|
@ -620,7 +620,7 @@ func TestRemote_planLockTimeout(t *testing.T) {
|
|||
"approve": "yes",
|
||||
})
|
||||
|
||||
op.StateLockTimeout = 5 * time.Second
|
||||
op.StateLockTimeout = 50 * time.Millisecond
|
||||
op.UIIn = input
|
||||
op.UIOut = b.CLI
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
@ -636,8 +636,8 @@ func TestRemote_planLockTimeout(t *testing.T) {
|
|||
case <-sigint:
|
||||
// Stop redirecting SIGINT signals.
|
||||
signal.Stop(sigint)
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("expected lock timeout after 5 seconds, waited 10 seconds")
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("expected lock timeout after 50 milliseconds, waited 200 milliseconds")
|
||||
}
|
||||
|
||||
if len(input.answers) != 2 {
|
||||
|
|
|
@ -1,13 +1,18 @@
|
|||
package remote
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
tfe "github.com/hashicorp/go-tfe"
|
||||
version "github.com/hashicorp/go-version"
|
||||
"github.com/hashicorp/terraform-svchost/disco"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/version"
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
tfversion "github.com/hashicorp/terraform/version"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
|
||||
backendLocal "github.com/hashicorp/terraform/backend/local"
|
||||
|
@ -196,11 +201,11 @@ func TestRemote_versionConstraints(t *testing.T) {
|
|||
}
|
||||
|
||||
// Save and restore the actual version.
|
||||
p := version.Prerelease
|
||||
v := version.Version
|
||||
p := tfversion.Prerelease
|
||||
v := tfversion.Version
|
||||
defer func() {
|
||||
version.Prerelease = p
|
||||
version.Version = v
|
||||
tfversion.Prerelease = p
|
||||
tfversion.Version = v
|
||||
}()
|
||||
|
||||
for name, tc := range cases {
|
||||
|
@ -208,8 +213,8 @@ func TestRemote_versionConstraints(t *testing.T) {
|
|||
b := New(testDisco(s))
|
||||
|
||||
// Set the version for this test.
|
||||
version.Prerelease = tc.prerelease
|
||||
version.Version = tc.version
|
||||
tfversion.Prerelease = tc.prerelease
|
||||
tfversion.Version = tc.version
|
||||
|
||||
// Validate
|
||||
_, valDiags := b.PrepareConfig(tc.config)
|
||||
|
@ -428,17 +433,17 @@ func TestRemote_checkConstraints(t *testing.T) {
|
|||
}
|
||||
|
||||
// Save and restore the actual version.
|
||||
p := version.Prerelease
|
||||
v := version.Version
|
||||
p := tfversion.Prerelease
|
||||
v := tfversion.Version
|
||||
defer func() {
|
||||
version.Prerelease = p
|
||||
version.Version = v
|
||||
tfversion.Prerelease = p
|
||||
tfversion.Version = v
|
||||
}()
|
||||
|
||||
for name, tc := range cases {
|
||||
// Set the version for this test.
|
||||
version.Prerelease = tc.prerelease
|
||||
version.Version = tc.version
|
||||
tfversion.Prerelease = tc.prerelease
|
||||
tfversion.Version = tc.version
|
||||
|
||||
// Check the constraints.
|
||||
diags := b.checkConstraints(tc.constraints)
|
||||
|
@ -448,3 +453,264 @@ func TestRemote_checkConstraints(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemote_StateMgr_versionCheck(t *testing.T) {
|
||||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
// Some fixed versions for testing with. This logic is a simple string
|
||||
// comparison, so we don't need many test cases.
|
||||
v0135 := version.Must(version.NewSemver("0.13.5"))
|
||||
v0140 := version.Must(version.NewSemver("0.14.0"))
|
||||
|
||||
// Save original local version state and restore afterwards
|
||||
p := tfversion.Prerelease
|
||||
v := tfversion.Version
|
||||
s := tfversion.SemVer
|
||||
defer func() {
|
||||
tfversion.Prerelease = p
|
||||
tfversion.Version = v
|
||||
tfversion.SemVer = s
|
||||
}()
|
||||
|
||||
// For this test, the local Terraform version is set to 0.14.0
|
||||
tfversion.Prerelease = ""
|
||||
tfversion.Version = v0140.String()
|
||||
tfversion.SemVer = v0140
|
||||
|
||||
// Update the mock remote workspace Terraform version to match the local
|
||||
// Terraform version
|
||||
if _, err := b.client.Workspaces.Update(
|
||||
context.Background(),
|
||||
b.organization,
|
||||
b.workspace,
|
||||
tfe.WorkspaceUpdateOptions{
|
||||
TerraformVersion: tfe.String(v0140.String()),
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
// This should succeed
|
||||
if _, err := b.StateMgr(backend.DefaultStateName); err != nil {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
|
||||
// Now change the remote workspace to a different Terraform version
|
||||
if _, err := b.client.Workspaces.Update(
|
||||
context.Background(),
|
||||
b.organization,
|
||||
b.workspace,
|
||||
tfe.WorkspaceUpdateOptions{
|
||||
TerraformVersion: tfe.String(v0135.String()),
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
// This should fail
|
||||
want := `Remote workspace Terraform version "0.13.5" does not match local Terraform version "0.14.0"`
|
||||
if _, err := b.StateMgr(backend.DefaultStateName); err.Error() != want {
|
||||
t.Fatalf("wrong error\n got: %v\nwant: %v", err.Error(), want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemote_StateMgr_versionCheckLatest(t *testing.T) {
|
||||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
v0140 := version.Must(version.NewSemver("0.14.0"))
|
||||
|
||||
// Save original local version state and restore afterwards
|
||||
p := tfversion.Prerelease
|
||||
v := tfversion.Version
|
||||
s := tfversion.SemVer
|
||||
defer func() {
|
||||
tfversion.Prerelease = p
|
||||
tfversion.Version = v
|
||||
tfversion.SemVer = s
|
||||
}()
|
||||
|
||||
// For this test, the local Terraform version is set to 0.14.0
|
||||
tfversion.Prerelease = ""
|
||||
tfversion.Version = v0140.String()
|
||||
tfversion.SemVer = v0140
|
||||
|
||||
// Update the remote workspace to the pseudo-version "latest"
|
||||
if _, err := b.client.Workspaces.Update(
|
||||
context.Background(),
|
||||
b.organization,
|
||||
b.workspace,
|
||||
tfe.WorkspaceUpdateOptions{
|
||||
TerraformVersion: tfe.String("latest"),
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
// This should succeed despite not being a string match
|
||||
if _, err := b.StateMgr(backend.DefaultStateName); err != nil {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemote_VerifyWorkspaceTerraformVersion(t *testing.T) {
|
||||
testCases := []struct {
|
||||
local string
|
||||
remote string
|
||||
operations bool
|
||||
wantErr bool
|
||||
}{
|
||||
{"0.13.5", "0.13.5", true, false},
|
||||
{"0.14.0", "0.13.5", true, true},
|
||||
{"0.14.0", "0.13.5", false, false},
|
||||
{"0.14.0", "0.14.1", true, false},
|
||||
{"0.14.0", "1.0.99", true, false},
|
||||
{"0.14.0", "1.1.0", true, true},
|
||||
{"1.2.0", "1.2.99", true, false},
|
||||
{"1.2.0", "1.3.0", true, true},
|
||||
{"0.15.0", "latest", true, false},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("local %s, remote %s", tc.local, tc.remote), func(t *testing.T) {
|
||||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
local := version.Must(version.NewSemver(tc.local))
|
||||
|
||||
// Save original local version state and restore afterwards
|
||||
p := tfversion.Prerelease
|
||||
v := tfversion.Version
|
||||
s := tfversion.SemVer
|
||||
defer func() {
|
||||
tfversion.Prerelease = p
|
||||
tfversion.Version = v
|
||||
tfversion.SemVer = s
|
||||
}()
|
||||
|
||||
// Override local version as specified
|
||||
tfversion.Prerelease = ""
|
||||
tfversion.Version = local.String()
|
||||
tfversion.SemVer = local
|
||||
|
||||
// Update the mock remote workspace Terraform version to the
|
||||
// specified remote version
|
||||
if _, err := b.client.Workspaces.Update(
|
||||
context.Background(),
|
||||
b.organization,
|
||||
b.workspace,
|
||||
tfe.WorkspaceUpdateOptions{
|
||||
Operations: tfe.Bool(tc.operations),
|
||||
TerraformVersion: tfe.String(tc.remote),
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
diags := b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName)
|
||||
if tc.wantErr {
|
||||
if len(diags) != 1 {
|
||||
t.Fatal("expected diag, but none returned")
|
||||
}
|
||||
if got := diags.Err().Error(); !strings.Contains(got, "Terraform version mismatch") {
|
||||
t.Fatalf("unexpected error: %s", got)
|
||||
}
|
||||
} else {
|
||||
if len(diags) != 0 {
|
||||
t.Fatalf("unexpected diags: %s", diags.Err())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemote_VerifyWorkspaceTerraformVersion_workspaceErrors(t *testing.T) {
|
||||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
// Attempting to check the version against a workspace which doesn't exist
|
||||
// should fail
|
||||
diags := b.VerifyWorkspaceTerraformVersion("invalid-workspace")
|
||||
if len(diags) != 1 {
|
||||
t.Fatal("expected diag, but none returned")
|
||||
}
|
||||
if got := diags.Err().Error(); !strings.Contains(got, "Error looking up workspace: Workspace read failed") {
|
||||
t.Fatalf("unexpected error: %s", got)
|
||||
}
|
||||
|
||||
// Update the mock remote workspace Terraform version to an invalid version
|
||||
if _, err := b.client.Workspaces.Update(
|
||||
context.Background(),
|
||||
b.organization,
|
||||
b.workspace,
|
||||
tfe.WorkspaceUpdateOptions{
|
||||
TerraformVersion: tfe.String("1.0.cheetarah"),
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
diags = b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName)
|
||||
|
||||
if len(diags) != 1 {
|
||||
t.Fatal("expected diag, but none returned")
|
||||
}
|
||||
if got := diags.Err().Error(); !strings.Contains(got, "Error looking up workspace: Invalid Terraform version") {
|
||||
t.Fatalf("unexpected error: %s", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemote_VerifyWorkspaceTerraformVersion_ignoreFlagSet(t *testing.T) {
|
||||
b, bCleanup := testBackendDefault(t)
|
||||
defer bCleanup()
|
||||
|
||||
// If the ignore flag is set, the behaviour changes
|
||||
b.IgnoreVersionConflict()
|
||||
|
||||
// Different local & remote versions to cause an error
|
||||
local := version.Must(version.NewSemver("0.14.0"))
|
||||
remote := version.Must(version.NewSemver("0.13.5"))
|
||||
|
||||
// Save original local version state and restore afterwards
|
||||
p := tfversion.Prerelease
|
||||
v := tfversion.Version
|
||||
s := tfversion.SemVer
|
||||
defer func() {
|
||||
tfversion.Prerelease = p
|
||||
tfversion.Version = v
|
||||
tfversion.SemVer = s
|
||||
}()
|
||||
|
||||
// Override local version as specified
|
||||
tfversion.Prerelease = ""
|
||||
tfversion.Version = local.String()
|
||||
tfversion.SemVer = local
|
||||
|
||||
// Update the mock remote workspace Terraform version to the
|
||||
// specified remote version
|
||||
if _, err := b.client.Workspaces.Update(
|
||||
context.Background(),
|
||||
b.organization,
|
||||
b.workspace,
|
||||
tfe.WorkspaceUpdateOptions{
|
||||
TerraformVersion: tfe.String(remote.String()),
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
diags := b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName)
|
||||
if len(diags) != 1 {
|
||||
t.Fatal("expected diag, but none returned")
|
||||
}
|
||||
|
||||
if got, want := diags[0].Severity(), tfdiags.Warning; got != want {
|
||||
t.Errorf("wrong severity: got %#v, want %#v", got, want)
|
||||
}
|
||||
if got, want := diags[0].Description().Summary, "Terraform version mismatch"; got != want {
|
||||
t.Errorf("wrong summary: got %s, want %s", got, want)
|
||||
}
|
||||
wantDetail := "The local Terraform version (0.14.0) does not match the configured version for remote workspace hashicorp/prod (0.13.5)."
|
||||
if got := diags[0].Description().Detail; got != wantDetail {
|
||||
t.Errorf("wrong summary: got %s, want %s", got, wantDetail)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"flag"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "github.com/hashicorp/terraform/internal/logging"
|
||||
)
|
||||
|
@ -14,5 +15,11 @@ func TestMain(m *testing.M) {
|
|||
// Make sure TF_FORCE_LOCAL_BACKEND is unset
|
||||
os.Unsetenv("TF_FORCE_LOCAL_BACKEND")
|
||||
|
||||
// Reduce delays to make tests run faster
|
||||
backoffMin = 1.0
|
||||
backoffMax = 1.0
|
||||
planConfigurationVersionsPollInterval = 1 * time.Millisecond
|
||||
runPollInterval = 1 * time.Millisecond
|
||||
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
|
|
@ -175,7 +175,7 @@ func testLocalBackend(t *testing.T, remote *Remote) backend.Enhanced {
|
|||
},
|
||||
},
|
||||
})
|
||||
p.ApplyResourceChangeResponse = providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{
|
||||
p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("yes"),
|
||||
})}
|
||||
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/builtin/providers/test"
|
||||
"github.com/hashicorp/terraform/plugin"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func main() {
|
||||
plugin.Serve(&plugin.ServeOpts{
|
||||
ProviderFunc: func() terraform.ResourceProvider {
|
||||
return test.Provider()
|
||||
},
|
||||
})
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/builtin/provisioners/chef"
|
||||
"github.com/hashicorp/terraform/plugin"
|
||||
)
|
||||
|
||||
func main() {
|
||||
plugin.Serve(&plugin.ServeOpts{
|
||||
ProvisionerFunc: chef.Provisioner,
|
||||
})
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/builtin/provisioners/file"
|
||||
"github.com/hashicorp/terraform/plugin"
|
||||
)
|
||||
|
||||
func main() {
|
||||
plugin.Serve(&plugin.ServeOpts{
|
||||
ProvisionerFunc: file.Provisioner,
|
||||
})
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/builtin/provisioners/habitat"
|
||||
"github.com/hashicorp/terraform/plugin"
|
||||
)
|
||||
|
||||
func main() {
|
||||
plugin.Serve(&plugin.ServeOpts{
|
||||
ProvisionerFunc: habitat.Provisioner,
|
||||
})
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/builtin/provisioners/local-exec"
|
||||
"github.com/hashicorp/terraform/plugin"
|
||||
)
|
||||
|
||||
func main() {
|
||||
plugin.Serve(&plugin.ServeOpts{
|
||||
ProvisionerFunc: localexec.Provisioner,
|
||||
})
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/builtin/provisioners/puppet"
|
||||
"github.com/hashicorp/terraform/plugin"
|
||||
)
|
||||
|
||||
func main() {
|
||||
plugin.Serve(&plugin.ServeOpts{
|
||||
ProvisionerFunc: puppet.Provisioner,
|
||||
})
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/builtin/provisioners/remote-exec"
|
||||
"github.com/hashicorp/terraform/plugin"
|
||||
)
|
||||
|
||||
func main() {
|
||||
plugin.Serve(&plugin.ServeOpts{
|
||||
ProvisionerFunc: remoteexec.Provisioner,
|
||||
})
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/builtin/provisioners/salt-masterless"
|
||||
"github.com/hashicorp/terraform/plugin"
|
||||
)
|
||||
|
||||
func main() {
|
||||
plugin.Serve(&plugin.ServeOpts{
|
||||
ProvisionerFunc: saltmasterless.Provisioner,
|
||||
})
|
||||
}
|
|
@ -5,6 +5,7 @@ import (
|
|||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/backend/remote"
|
||||
"github.com/hashicorp/terraform/configs/configschema"
|
||||
"github.com/hashicorp/terraform/providers"
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
|
@ -18,24 +19,42 @@ func dataSourceRemoteStateGetSchema() providers.Schema {
|
|||
Block: &configschema.Block{
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"backend": {
|
||||
Type: cty.String,
|
||||
Required: true,
|
||||
Type: cty.String,
|
||||
Description: "The remote backend to use, e.g. `remote` or `http`.",
|
||||
DescriptionKind: configschema.StringMarkdown,
|
||||
Required: true,
|
||||
},
|
||||
"config": {
|
||||
Type: cty.DynamicPseudoType,
|
||||
Optional: true,
|
||||
Type: cty.DynamicPseudoType,
|
||||
Description: "The configuration of the remote backend. " +
|
||||
"Although this is optional, most backends require " +
|
||||
"some configuration.\n\n" +
|
||||
"The object can use any arguments that would be valid " +
|
||||
"in the equivalent `terraform { backend \"<TYPE>\" { ... } }` " +
|
||||
"block.",
|
||||
DescriptionKind: configschema.StringMarkdown,
|
||||
Optional: true,
|
||||
},
|
||||
"defaults": {
|
||||
Type: cty.DynamicPseudoType,
|
||||
Optional: true,
|
||||
Type: cty.DynamicPseudoType,
|
||||
Description: "Default values for outputs, in case " +
|
||||
"the state file is empty or lacks a required output.",
|
||||
DescriptionKind: configschema.StringMarkdown,
|
||||
Optional: true,
|
||||
},
|
||||
"outputs": {
|
||||
Type: cty.DynamicPseudoType,
|
||||
Computed: true,
|
||||
Type: cty.DynamicPseudoType,
|
||||
Description: "An object containing every root-level " +
|
||||
"output in the remote state.",
|
||||
DescriptionKind: configschema.StringMarkdown,
|
||||
Computed: true,
|
||||
},
|
||||
"workspace": {
|
||||
Type: cty.String,
|
||||
Optional: true,
|
||||
Type: cty.String,
|
||||
Description: "The Terraform workspace to use, if " +
|
||||
"the backend supports workspaces.",
|
||||
DescriptionKind: configschema.StringMarkdown,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -215,6 +234,12 @@ func getBackend(cfg cty.Value) (backend.Backend, cty.Value, tfdiags.Diagnostics)
|
|||
return nil, cty.NilVal, diags
|
||||
}
|
||||
|
||||
// If this is the enhanced remote backend, we want to disable the version
|
||||
// check, because this is a read-only operation
|
||||
if rb, ok := b.(*remote.Remote); ok {
|
||||
rb.IgnoreVersionConflict()
|
||||
}
|
||||
|
||||
return b, newVal, diags
|
||||
}
|
||||
|
||||
|
|
|
@ -1,76 +0,0 @@
|
|||
package terraform
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// remoteStateFlatten takes a structure and turns into a flat map[string]string.
|
||||
//
|
||||
// Within the "thing" parameter, only primitive values are allowed. Structs are
|
||||
// not supported. Therefore, it can only be slices, maps, primitives, and
|
||||
// any combination of those together.
|
||||
//
|
||||
// The difference between this version and the version in package flatmap is that
|
||||
// we add the count key for maps in this version, and return a normal
|
||||
// map[string]string instead of a flatmap.Map
|
||||
func remoteStateFlatten(thing map[string]interface{}) map[string]string {
|
||||
result := make(map[string]string)
|
||||
|
||||
for k, raw := range thing {
|
||||
flatten(result, k, reflect.ValueOf(raw))
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func flatten(result map[string]string, prefix string, v reflect.Value) {
|
||||
if v.Kind() == reflect.Interface {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
if v.Bool() {
|
||||
result[prefix] = "true"
|
||||
} else {
|
||||
result[prefix] = "false"
|
||||
}
|
||||
case reflect.Int:
|
||||
result[prefix] = fmt.Sprintf("%d", v.Int())
|
||||
case reflect.Map:
|
||||
flattenMap(result, prefix, v)
|
||||
case reflect.Slice:
|
||||
flattenSlice(result, prefix, v)
|
||||
case reflect.String:
|
||||
result[prefix] = v.String()
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown: %s", v))
|
||||
}
|
||||
}
|
||||
|
||||
func flattenMap(result map[string]string, prefix string, v reflect.Value) {
|
||||
mapKeys := v.MapKeys()
|
||||
|
||||
result[fmt.Sprintf("%s.%%", prefix)] = fmt.Sprintf("%d", len(mapKeys))
|
||||
for _, k := range mapKeys {
|
||||
if k.Kind() == reflect.Interface {
|
||||
k = k.Elem()
|
||||
}
|
||||
|
||||
if k.Kind() != reflect.String {
|
||||
panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k))
|
||||
}
|
||||
|
||||
flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k))
|
||||
}
|
||||
}
|
||||
|
||||
func flattenSlice(result map[string]string, prefix string, v reflect.Value) {
|
||||
prefix = prefix + "."
|
||||
|
||||
result[prefix+"#"] = fmt.Sprintf("%d", v.Len())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i))
|
||||
}
|
||||
}
|
|
@ -17,7 +17,7 @@ type Provider struct {
|
|||
}
|
||||
|
||||
// NewProvider returns a new terraform provider
|
||||
func NewProvider() *Provider {
|
||||
func NewProvider() providers.Interface {
|
||||
return &Provider{}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,29 +1,10 @@
|
|||
package terraform
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/providers"
|
||||
|
||||
backendInit "github.com/hashicorp/terraform/backend/init"
|
||||
)
|
||||
|
||||
var testAccProviders map[string]*Provider
|
||||
var testAccProvider *Provider
|
||||
|
||||
func init() {
|
||||
// Initialize the backends
|
||||
backendInit.Init(nil)
|
||||
|
||||
testAccProvider = NewProvider()
|
||||
testAccProviders = map[string]*Provider{
|
||||
"terraform": testAccProvider,
|
||||
}
|
||||
}
|
||||
|
||||
func TestProvider_impl(t *testing.T) {
|
||||
var _ providers.Interface = NewProvider()
|
||||
}
|
||||
|
||||
func testAccPreCheck(t *testing.T) {
|
||||
}
|
||||
|
|
|
@ -1,63 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func testDataSource() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Read: testDataSourceRead,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"list": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
|
||||
"input": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"output": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
// this attribute is computed, but never set by the provider
|
||||
"nil": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"input_map": {
|
||||
Type: schema.TypeMap,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Optional: true,
|
||||
},
|
||||
"output_map": {
|
||||
Type: schema.TypeMap,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testDataSourceRead(d *schema.ResourceData, meta interface{}) error {
|
||||
d.SetId(time.Now().UTC().String())
|
||||
d.Set("list", []interface{}{"one", "two", "three"})
|
||||
|
||||
if input, hasInput := d.GetOk("input"); hasInput {
|
||||
d.Set("output", input)
|
||||
} else {
|
||||
d.Set("output", "some output")
|
||||
}
|
||||
|
||||
if inputMap, hasInput := d.GetOk("input_map"); hasInput {
|
||||
d.Set("output_map", inputMap)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func providerLabelDataSource() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Read: providerLabelDataSourceRead,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"label": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func providerLabelDataSourceRead(d *schema.ResourceData, meta interface{}) error {
|
||||
label := meta.(string)
|
||||
d.SetId(label)
|
||||
d.Set("label", label)
|
||||
return nil
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestProviderLabelDataSource(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: func(s *terraform.State) error {
|
||||
return nil
|
||||
},
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: strings.TrimSpace(`
|
||||
provider "test" {
|
||||
label = "foo"
|
||||
}
|
||||
|
||||
data "test_provider_label" "test" {
|
||||
}
|
||||
`),
|
||||
Check: func(s *terraform.State) error {
|
||||
res, hasRes := s.RootModule().Resources["data.test_provider_label.test"]
|
||||
if !hasRes {
|
||||
return errors.New("No test_provider_label in state")
|
||||
}
|
||||
if got, want := res.Primary.ID, "foo"; got != want {
|
||||
return fmt.Errorf("wrong id %q; want %q", got, want)
|
||||
}
|
||||
if got, want := res.Primary.Attributes["label"], "foo"; got != want {
|
||||
return fmt.Errorf("wrong id %q; want %q", got, want)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
|
@ -1,291 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestDataSource_dataSourceCount(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: func(s *terraform.State) error {
|
||||
return nil
|
||||
},
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: strings.TrimSpace(`
|
||||
data "test_data_source" "test" {
|
||||
count = 3
|
||||
input = "count-${count.index}"
|
||||
}
|
||||
|
||||
resource "test_resource" "foo" {
|
||||
required = "yep"
|
||||
required_map = {
|
||||
key = "value"
|
||||
}
|
||||
|
||||
list = "${data.test_data_source.test.*.output}"
|
||||
}
|
||||
`),
|
||||
Check: func(s *terraform.State) error {
|
||||
res, hasRes := s.RootModule().Resources["test_resource.foo"]
|
||||
if !hasRes {
|
||||
return errors.New("No test_resource.foo in state")
|
||||
}
|
||||
if res.Primary.Attributes["list.#"] != "3" {
|
||||
return errors.New("Wrong list.#, expected 3")
|
||||
}
|
||||
if res.Primary.Attributes["list.0"] != "count-0" {
|
||||
return errors.New("Wrong list.0, expected count-0")
|
||||
}
|
||||
if res.Primary.Attributes["list.1"] != "count-1" {
|
||||
return errors.New("Wrong list.0, expected count-1")
|
||||
}
|
||||
if res.Primary.Attributes["list.2"] != "count-2" {
|
||||
return errors.New("Wrong list.0, expected count-2")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Test that the output of a data source can be used as the value for
|
||||
// a "count" in a real resource. This would fail with "count cannot be computed"
|
||||
// at some point.
|
||||
func TestDataSource_valueAsResourceCount(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: func(s *terraform.State) error {
|
||||
return nil
|
||||
},
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: strings.TrimSpace(`
|
||||
data "test_data_source" "test" {
|
||||
input = "4"
|
||||
}
|
||||
|
||||
resource "test_resource" "foo" {
|
||||
count = "${data.test_data_source.test.output}"
|
||||
|
||||
required = "yep"
|
||||
required_map = {
|
||||
key = "value"
|
||||
}
|
||||
}
|
||||
`),
|
||||
Check: func(s *terraform.State) error {
|
||||
count := 0
|
||||
for k, _ := range s.RootModule().Resources {
|
||||
if strings.HasPrefix(k, "test_resource.foo.") {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
if count != 4 {
|
||||
return fmt.Errorf("bad count: %d", count)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestDataSource_dataSourceCountGrandChild tests that a grandchild data source
|
||||
// that is based off of count works, ie: dependency chain foo -> bar -> baz.
|
||||
// This was failing because CountBoundaryTransformer is being run during apply
|
||||
// instead of plan, which meant that it wasn't firing after data sources were
|
||||
// potentially changing state and causing diff/interpolation issues.
|
||||
//
|
||||
// This happens after the initial apply, after state is saved.
|
||||
func TestDataSource_dataSourceCountGrandChild(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: func(s *terraform.State) error {
|
||||
return nil
|
||||
},
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: dataSourceCountGrandChildConfig,
|
||||
},
|
||||
{
|
||||
Config: dataSourceCountGrandChildConfig,
|
||||
Check: func(s *terraform.State) error {
|
||||
for _, v := range []string{"foo", "bar", "baz"} {
|
||||
count := 0
|
||||
for k := range s.RootModule().Resources {
|
||||
if strings.HasPrefix(k, fmt.Sprintf("data.test_data_source.%s.", v)) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
if count != 2 {
|
||||
return fmt.Errorf("bad count for data.test_data_source.%s: %d", v, count)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
const dataSourceCountGrandChildConfig = `
|
||||
data "test_data_source" "foo" {
|
||||
count = 2
|
||||
input = "one"
|
||||
}
|
||||
|
||||
data "test_data_source" "bar" {
|
||||
count = "${length(data.test_data_source.foo.*.id)}"
|
||||
input = "${data.test_data_source.foo.*.output[count.index]}"
|
||||
}
|
||||
|
||||
data "test_data_source" "baz" {
|
||||
count = "${length(data.test_data_source.bar.*.id)}"
|
||||
input = "${data.test_data_source.bar.*.output[count.index]}"
|
||||
}
|
||||
`
|
||||
|
||||
func TestDataSource_nilComputedValues(t *testing.T) {
|
||||
check := func(s *terraform.State) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Check: check,
|
||||
Config: `
|
||||
variable "index" {
|
||||
default = "d"
|
||||
}
|
||||
|
||||
locals {
|
||||
name = {
|
||||
a = "something"
|
||||
b = "else"
|
||||
}
|
||||
}
|
||||
|
||||
data "test_data_source" "x" {
|
||||
input = "${lookup(local.name, var.index, local.name["a"])}"
|
||||
}
|
||||
|
||||
data "test_data_source" "y" {
|
||||
input = data.test_data_source.x.nil == "something" ? "something" : "else"
|
||||
}`,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// referencing test_data_source.one.output_map["a"] should produce an error when
|
||||
// there's a count.
|
||||
func TestDataSource_indexedCountOfOne(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: strings.TrimSpace(`
|
||||
data "test_data_source" "one" {
|
||||
count = 1
|
||||
input_map = {
|
||||
"a" = "b"
|
||||
}
|
||||
}
|
||||
|
||||
data "test_data_source" "two" {
|
||||
input_map = {
|
||||
"x" = data.test_data_source.one.output_map["a"]
|
||||
}
|
||||
}
|
||||
`),
|
||||
ExpectError: regexp.MustCompile("Because data.test_data_source.one has \"count\" set, its attributes must be accessed on specific instances"),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Verify that we can destroy when a data source references something with a
|
||||
// count of 1.
|
||||
func TestDataSource_countRefDestroyError(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: strings.TrimSpace(`
|
||||
data "test_data_source" "one" {
|
||||
count = 1
|
||||
input = "a"
|
||||
}
|
||||
|
||||
data "test_data_source" "two" {
|
||||
input = data.test_data_source.one[0].output
|
||||
}
|
||||
`),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestDataSource_planUpdate(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource" "a" {
|
||||
required = "first"
|
||||
required_map = {
|
||||
key = "1"
|
||||
}
|
||||
optional_force_new = "first"
|
||||
}
|
||||
|
||||
data "test_data_source" "a" {
|
||||
input = "${test_resource.a.computed_from_required}"
|
||||
}
|
||||
|
||||
output "out" {
|
||||
value = "${data.test_data_source.a.output}"
|
||||
}
|
||||
`),
|
||||
},
|
||||
{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource" "a" {
|
||||
required = "second"
|
||||
required_map = {
|
||||
key = "1"
|
||||
}
|
||||
optional_force_new = "second"
|
||||
}
|
||||
|
||||
data "test_data_source" "a" {
|
||||
input = "${test_resource.a.computed_from_required}"
|
||||
}
|
||||
|
||||
output "out" {
|
||||
value = "${data.test_data_source.a.output}"
|
||||
}
|
||||
`),
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
resource.TestCheckResourceAttr("data.test_data_source.a", "output", "second"),
|
||||
resource.TestCheckOutput("out", "second"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
|
@ -1,144 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestDiffApply_set(t *testing.T) {
|
||||
priorAttrs := map[string]string{
|
||||
"id": "testID",
|
||||
"egress.#": "1",
|
||||
"egress.2129912301.cidr_blocks.#": "1",
|
||||
"egress.2129912301.cidr_blocks.0": "10.0.0.0/8",
|
||||
"egress.2129912301.description": "Egress description",
|
||||
"egress.2129912301.from_port": "80",
|
||||
"egress.2129912301.ipv6_cidr_blocks.#": "0",
|
||||
"egress.2129912301.prefix_list_ids.#": "0",
|
||||
"egress.2129912301.protocol": "tcp",
|
||||
"egress.2129912301.security_groups.#": "0",
|
||||
"egress.2129912301.self": "false",
|
||||
"egress.2129912301.to_port": "8000",
|
||||
}
|
||||
|
||||
diff := &terraform.InstanceDiff{
|
||||
Attributes: map[string]*terraform.ResourceAttrDiff{
|
||||
"egress.2129912301.cidr_blocks.#": {Old: "1", New: "0", NewComputed: false, NewRemoved: false},
|
||||
"egress.2129912301.cidr_blocks.0": {Old: "10.0.0.0/8", New: "", NewComputed: false, NewRemoved: true},
|
||||
"egress.2129912301.description": {Old: "Egress description", New: "", NewComputed: false, NewRemoved: true},
|
||||
"egress.2129912301.from_port": {Old: "80", New: "0", NewComputed: false, NewRemoved: true},
|
||||
"egress.2129912301.ipv6_cidr_blocks.#": {Old: "0", New: "0", NewComputed: false, NewRemoved: false},
|
||||
"egress.2129912301.prefix_list_ids.#": {Old: "0", New: "0", NewComputed: false, NewRemoved: false},
|
||||
"egress.2129912301.protocol": {Old: "tcp", New: "", NewComputed: false, NewRemoved: true},
|
||||
"egress.2129912301.security_groups.#": {Old: "0", New: "0", NewComputed: false, NewRemoved: false},
|
||||
"egress.2129912301.self": {Old: "false", New: "false", NewComputed: false, NewRemoved: true},
|
||||
"egress.2129912301.to_port": {Old: "8000", New: "0", NewComputed: false, NewRemoved: true},
|
||||
"egress.746197026.cidr_blocks.#": {Old: "", New: "1", NewComputed: false, NewRemoved: false},
|
||||
"egress.746197026.cidr_blocks.0": {Old: "", New: "10.0.0.0/8", NewComputed: false, NewRemoved: false},
|
||||
"egress.746197026.description": {Old: "", New: "New egress description", NewComputed: false, NewRemoved: false},
|
||||
"egress.746197026.from_port": {Old: "", New: "80", NewComputed: false, NewRemoved: false},
|
||||
"egress.746197026.ipv6_cidr_blocks.#": {Old: "", New: "0", NewComputed: false, NewRemoved: false},
|
||||
"egress.746197026.prefix_list_ids.#": {Old: "", New: "0", NewComputed: false, NewRemoved: false},
|
||||
"egress.746197026.protocol": {Old: "", New: "tcp", NewComputed: false, NewRemoved: false, NewExtra: "tcp"},
|
||||
"egress.746197026.security_groups.#": {Old: "", New: "0", NewComputed: false, NewRemoved: false},
|
||||
"egress.746197026.self": {Old: "", New: "false", NewComputed: false, NewRemoved: false},
|
||||
"egress.746197026.to_port": {Old: "", New: "8000", NewComputed: false, NewRemoved: false},
|
||||
// an erroneous nil diff should do nothing
|
||||
"egress.111111111.to_port": nil,
|
||||
},
|
||||
}
|
||||
|
||||
resSchema := map[string]*schema.Schema{
|
||||
"egress": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ConfigMode: schema.SchemaConfigModeAttr,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"from_port": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"to_port": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"protocol": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"cidr_blocks": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
|
||||
"ipv6_cidr_blocks": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
|
||||
"prefix_list_ids": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
|
||||
"security_groups": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"self": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"description": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expected := map[string]string{
|
||||
"egress.#": "1",
|
||||
"egress.746197026.cidr_blocks.#": "1",
|
||||
"egress.746197026.cidr_blocks.0": "10.0.0.0/8",
|
||||
"egress.746197026.description": "New egress description",
|
||||
"egress.746197026.from_port": "80", "egress.746197026.ipv6_cidr_blocks.#": "0",
|
||||
"egress.746197026.prefix_list_ids.#": "0",
|
||||
"egress.746197026.protocol": "tcp",
|
||||
"egress.746197026.security_groups.#": "0",
|
||||
"egress.746197026.self": "false",
|
||||
"egress.746197026.to_port": "8000",
|
||||
"id": "testID",
|
||||
}
|
||||
|
||||
attrs, err := diff.Apply(priorAttrs, (&schema.Resource{Schema: resSchema}).CoreConfigSchema())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(attrs, expected) {
|
||||
t.Fatalf("wrong result\ngot: %s\nwant: %s\n", spew.Sdump(attrs), spew.Sdump(expected))
|
||||
}
|
||||
}
|
|
@ -1,59 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func Provider() terraform.ResourceProvider {
|
||||
return &schema.Provider{
|
||||
Schema: map[string]*schema.Schema{
|
||||
// Optional attribute to label a particular instance for a test
|
||||
// that has multiple instances of this provider, so that they
|
||||
// can be distinguished using the test_provider_label data source.
|
||||
"label": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
ProviderMetaSchema: map[string]*schema.Schema{
|
||||
// Optionally allow specifying information at a module-level
|
||||
"foo": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
ResourcesMap: map[string]*schema.Resource{
|
||||
"test_resource": testResource(),
|
||||
"test_resource_gh12183": testResourceGH12183(),
|
||||
"test_resource_with_custom_diff": testResourceCustomDiff(),
|
||||
"test_resource_timeout": testResourceTimeout(),
|
||||
"test_resource_diff_suppress": testResourceDiffSuppress(),
|
||||
"test_resource_force_new": testResourceForceNew(),
|
||||
"test_resource_nested": testResourceNested(),
|
||||
"test_resource_nested_set": testResourceNestedSet(),
|
||||
"test_resource_state_func": testResourceStateFunc(),
|
||||
"test_resource_deprecated": testResourceDeprecated(),
|
||||
"test_resource_defaults": testResourceDefaults(),
|
||||
"test_resource_list": testResourceList(),
|
||||
"test_resource_list_set": testResourceListSet(),
|
||||
"test_resource_map": testResourceMap(),
|
||||
"test_resource_computed_set": testResourceComputedSet(),
|
||||
"test_resource_config_mode": testResourceConfigMode(),
|
||||
"test_resource_nested_id": testResourceNestedId(),
|
||||
"test_resource_provider_meta": testResourceProviderMeta(),
|
||||
"test_resource_signal": testResourceSignal(),
|
||||
"test_undeleteable": testResourceUndeleteable(),
|
||||
"test_resource_required_min": testResourceRequiredMin(),
|
||||
},
|
||||
DataSourcesMap: map[string]*schema.Resource{
|
||||
"test_data_source": testDataSource(),
|
||||
"test_provider_label": providerLabelDataSource(),
|
||||
},
|
||||
ConfigureFunc: providerConfigure,
|
||||
}
|
||||
}
|
||||
|
||||
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
||||
return d.Get("label"), nil
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
var testAccProviders map[string]terraform.ResourceProvider
|
||||
var testAccProvider *schema.Provider
|
||||
|
||||
func TestProvider(t *testing.T) {
|
||||
if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
testAccProvider = Provider().(*schema.Provider)
|
||||
testAccProviders = map[string]terraform.ResourceProvider{
|
||||
"test": testAccProvider,
|
||||
}
|
||||
}
|
|
@ -1,233 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func testResource() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: testResourceCreate,
|
||||
Read: testResourceRead,
|
||||
Update: testResourceUpdate,
|
||||
Delete: testResourceDelete,
|
||||
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
CustomizeDiff: func(d *schema.ResourceDiff, _ interface{}) error {
|
||||
if d.HasChange("optional") {
|
||||
d.SetNewComputed("planned_computed")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"required": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"optional": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"optional_bool": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
},
|
||||
"optional_force_new": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"optional_computed_map": {
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"optional_computed_force_new": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"optional_computed": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"computed_read_only": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"computed_from_required": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"computed_read_only_force_new": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"computed_list": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"set": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
"computed_set": {
|
||||
Type: schema.TypeSet,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
"map": {
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
},
|
||||
"optional_map": {
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
},
|
||||
"required_map": {
|
||||
Type: schema.TypeMap,
|
||||
Required: true,
|
||||
},
|
||||
"map_that_look_like_set": {
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"computed_map": {
|
||||
Type: schema.TypeMap,
|
||||
Computed: true,
|
||||
},
|
||||
"list": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"list_of_map": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeMap,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
"apply_error": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "return and error during apply",
|
||||
},
|
||||
"planned_computed": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "copied the required field during apply, and plans computed when changed",
|
||||
},
|
||||
// this should return unset from GetOkExists
|
||||
"get_ok_exists_false": {
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
Description: "do not set in config",
|
||||
},
|
||||
"int": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testResourceCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
d.SetId("testId")
|
||||
|
||||
errMsg, _ := d.Get("apply_error").(string)
|
||||
if errMsg != "" {
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
|
||||
// Required must make it through to Create
|
||||
if _, ok := d.GetOk("required"); !ok {
|
||||
return fmt.Errorf("Missing attribute 'required', but it's required!")
|
||||
}
|
||||
if _, ok := d.GetOk("required_map"); !ok {
|
||||
return fmt.Errorf("Missing attribute 'required_map', but it's required!")
|
||||
}
|
||||
|
||||
d.Set("computed_from_required", d.Get("required"))
|
||||
|
||||
return testResourceRead(d, meta)
|
||||
}
|
||||
|
||||
func testResourceRead(d *schema.ResourceData, meta interface{}) error {
|
||||
d.Set("computed_read_only", "value_from_api")
|
||||
d.Set("computed_read_only_force_new", "value_from_api")
|
||||
if _, ok := d.GetOk("optional_computed_map"); !ok {
|
||||
d.Set("optional_computed_map", map[string]string{})
|
||||
}
|
||||
d.Set("computed_map", map[string]string{"key1": "value1"})
|
||||
d.Set("computed_list", []string{"listval1", "listval2"})
|
||||
d.Set("computed_set", []string{"setval1", "setval2"})
|
||||
|
||||
d.Set("planned_computed", d.Get("optional"))
|
||||
|
||||
// if there is no "set" value, erroneously set it to an empty set. This
|
||||
// might change a null value to an empty set, but we should be able to
|
||||
// ignore that.
|
||||
s := d.Get("set")
|
||||
if s == nil || s.(*schema.Set).Len() == 0 {
|
||||
d.Set("set", []interface{}{})
|
||||
}
|
||||
|
||||
// This mimics many providers always setting a *string value.
|
||||
// The existing behavior is that this will appear in the state as an empty
|
||||
// string, which we have to maintain.
|
||||
o := d.Get("optional")
|
||||
if o == "" {
|
||||
d.Set("optional", nil)
|
||||
}
|
||||
|
||||
// This should not show as set unless it's set in the config
|
||||
_, ok := d.GetOkExists("get_ok_exists_false")
|
||||
if ok {
|
||||
return errors.New("get_ok_exists_false should not be set")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testResourceUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
errMsg, _ := d.Get("apply_error").(string)
|
||||
if errMsg != "" {
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
return testResourceRead(d, meta)
|
||||
}
|
||||
|
||||
func testResourceDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
|
@ -1,123 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/hashcode"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func testResourceComputedSet() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: testResourceComputedSetCreate,
|
||||
Read: testResourceComputedSetRead,
|
||||
Delete: testResourceComputedSetDelete,
|
||||
Update: testResourceComputedSetUpdate,
|
||||
|
||||
CustomizeDiff: func(d *schema.ResourceDiff, _ interface{}) error {
|
||||
o, n := d.GetChange("set_count")
|
||||
if o != n {
|
||||
d.SetNewComputed("string_set")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"set_count": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"string_set": {
|
||||
Type: schema.TypeSet,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"rule": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"ip_protocol": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: false,
|
||||
},
|
||||
|
||||
"cidr": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: false,
|
||||
StateFunc: func(v interface{}) string {
|
||||
return strings.ToLower(v.(string))
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"optional_set": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func computeSecGroupV2RuleHash(v interface{}) int {
|
||||
var buf bytes.Buffer
|
||||
m := v.(map[string]interface{})
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["ip_protocol"].(string)))
|
||||
buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["cidr"].(string))))
|
||||
|
||||
return hashcode.String(buf.String())
|
||||
}
|
||||
|
||||
func testResourceComputedSetCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
d.SetId(fmt.Sprintf("%x", rand.Int63()))
|
||||
return testResourceComputedSetRead(d, meta)
|
||||
}
|
||||
|
||||
func testResourceComputedSetRead(d *schema.ResourceData, meta interface{}) error {
|
||||
count := 3
|
||||
v, ok := d.GetOk("set_count")
|
||||
if ok {
|
||||
count = v.(int)
|
||||
}
|
||||
|
||||
var set []interface{}
|
||||
for i := 0; i < count; i++ {
|
||||
set = append(set, fmt.Sprintf("%d", i))
|
||||
}
|
||||
|
||||
d.Set("string_set", schema.NewSet(schema.HashString, set))
|
||||
|
||||
// This isn't computed, but we should be able to ignore without issues.
|
||||
d.Set("optional_set", []interface{}{})
|
||||
return nil
|
||||
}
|
||||
|
||||
func testResourceComputedSetUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
return testResourceComputedSetRead(d, meta)
|
||||
}
|
||||
|
||||
func testResourceComputedSetDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
|
@ -1,71 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestResourceComputedSet_update(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckResourceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_computed_set" "foo" {
|
||||
}
|
||||
`),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_computed_set.foo", "string_set.#", "3",
|
||||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_computed_set" "foo" {
|
||||
set_count = 5
|
||||
}
|
||||
`),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_computed_set.foo", "string_set.#", "5",
|
||||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_computed_set" "foo" {
|
||||
set_count = 2
|
||||
}
|
||||
`),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_computed_set.foo", "string_set.#", "2",
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestResourceComputedSet_ruleTest(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckResourceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_computed_set" "foo" {
|
||||
rule {
|
||||
ip_protocol = "udp"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
}
|
||||
`),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
|
@ -1,78 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func testResourceConfigMode() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: testResourceConfigModeCreate,
|
||||
Read: testResourceConfigModeRead,
|
||||
Delete: testResourceConfigModeDelete,
|
||||
Update: testResourceConfigModeUpdate,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"resource_as_attr": {
|
||||
Type: schema.TypeList,
|
||||
ConfigMode: schema.SchemaConfigModeAttr,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"foo": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"nested_set": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
ConfigMode: schema.SchemaConfigModeAttr,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"value": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"set": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testResourceConfigModeCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
d.SetId("placeholder")
|
||||
return testResourceConfigModeRead(d, meta)
|
||||
}
|
||||
|
||||
func testResourceConfigModeRead(d *schema.ResourceData, meta interface{}) error {
|
||||
if l, ok := d.Get("resource_as_attr").([]interface{}); !ok {
|
||||
return fmt.Errorf("resource_as_attr should appear as []interface{}, not %T", l)
|
||||
} else {
|
||||
for i, item := range l {
|
||||
if _, ok := item.(map[string]interface{}); !ok {
|
||||
return fmt.Errorf("resource_as_attr[%d] should appear as map[string]interface{}, not %T", i, item)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func testResourceConfigModeUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
return testResourceConfigModeRead(d, meta)
|
||||
}
|
||||
|
||||
func testResourceConfigModeDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
|
@ -1,120 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestResourceConfigMode(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckResourceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_config_mode" "foo" {
|
||||
resource_as_attr = [
|
||||
{
|
||||
foo = "resource_as_attr 0"
|
||||
},
|
||||
{
|
||||
foo = "resource_as_attr 1"
|
||||
},
|
||||
]
|
||||
}
|
||||
`),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr("test_resource_config_mode.foo", "resource_as_attr.#", "2"),
|
||||
resource.TestCheckResourceAttr("test_resource_config_mode.foo", "resource_as_attr.0.foo", "resource_as_attr 0"),
|
||||
resource.TestCheckResourceAttr("test_resource_config_mode.foo", "resource_as_attr.1.foo", "resource_as_attr 1"),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_config_mode" "foo" {
|
||||
# Due to a preprocessing fixup we do in lang.EvalBlock, it's allowed
|
||||
# to specify resource_as_attr members using one or more nested blocks
|
||||
# instead of attribute syntax, if desired. This should be equivalent
|
||||
# to the previous config.
|
||||
#
|
||||
# This allowance is made for backward-compatibility with existing providers
|
||||
# before Terraform v0.12 that were expecting nested block types to also
|
||||
# support attribute syntax; it should not be used for any new use-cases.
|
||||
resource_as_attr {
|
||||
foo = "resource_as_attr 0"
|
||||
}
|
||||
resource_as_attr {
|
||||
foo = "resource_as_attr 1"
|
||||
}
|
||||
}
|
||||
`),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr("test_resource_config_mode.foo", "resource_as_attr.#", "2"),
|
||||
resource.TestCheckResourceAttr("test_resource_config_mode.foo", "resource_as_attr.0.foo", "resource_as_attr 0"),
|
||||
resource.TestCheckResourceAttr("test_resource_config_mode.foo", "resource_as_attr.1.foo", "resource_as_attr 1"),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_config_mode" "foo" {
|
||||
resource_as_attr = [
|
||||
{
|
||||
foo = "resource_as_attr 0 updated"
|
||||
},
|
||||
]
|
||||
}
|
||||
`),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr("test_resource_config_mode.foo", "resource_as_attr.#", "1"),
|
||||
resource.TestCheckResourceAttr("test_resource_config_mode.foo", "resource_as_attr.0.foo", "resource_as_attr 0 updated"),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_config_mode" "foo" {
|
||||
resource_as_attr = []
|
||||
}
|
||||
`),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr("test_resource_config_mode.foo", "resource_as_attr.#", "0"),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_config_mode" "foo" {
|
||||
}
|
||||
`),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckNoResourceAttr("test_resource_config_mode.foo", "resource_as_attr.#"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestResourceConfigMode_nestedSet(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckResourceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_config_mode" "foo" {
|
||||
resource_as_attr = []
|
||||
|
||||
nested_set {
|
||||
value = "a"
|
||||
}
|
||||
nested_set {
|
||||
value = "b"
|
||||
set = []
|
||||
}
|
||||
}
|
||||
`),
|
||||
Check: resource.ComposeTestCheckFunc(),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
|
@ -1,224 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
// TestResourceDataDep_alignedCountScaleOut tests to make sure interpolation
|
||||
// works (namely without index errors) when a data source and a resource share
|
||||
// the same count variable during scale-out with an existing state.
|
||||
func TestResourceDataDep_alignedCountScaleOut(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: func(s *terraform.State) error {
|
||||
return nil
|
||||
},
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testResourceDataDepConfig(2),
|
||||
},
|
||||
{
|
||||
Config: testResourceDataDepConfig(4),
|
||||
Check: resource.TestCheckOutput("out", "value_from_api,value_from_api,value_from_api,value_from_api"),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestResourceDataDep_alignedCountScaleIn tests to make sure interpolation
|
||||
// works (namely without index errors) when a data source and a resource share
|
||||
// the same count variable during scale-in with an existing state.
|
||||
func TestResourceDataDep_alignedCountScaleIn(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: func(s *terraform.State) error {
|
||||
return nil
|
||||
},
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testResourceDataDepConfig(4),
|
||||
},
|
||||
{
|
||||
Config: testResourceDataDepConfig(2),
|
||||
Check: resource.TestCheckOutput("out", "value_from_api,value_from_api"),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestDataResourceDep_alignedCountScaleOut functions like
|
||||
// TestResourceDataDep_alignedCountScaleOut, but with the dependencies swapped
|
||||
// (resource now depends on data source, a pretty regular use case, but
|
||||
// included here to check for regressions).
|
||||
func TestDataResourceDep_alignedCountScaleOut(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: func(s *terraform.State) error {
|
||||
return nil
|
||||
},
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testDataResourceDepConfig(2),
|
||||
},
|
||||
{
|
||||
Config: testDataResourceDepConfig(4),
|
||||
Check: resource.TestCheckOutput("out", "test,test,test,test"),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestDataResourceDep_alignedCountScaleIn functions like
|
||||
// TestResourceDataDep_alignedCountScaleIn, but with the dependencies swapped
|
||||
// (resource now depends on data source, a pretty regular use case, but
|
||||
// included here to check for regressions).
|
||||
func TestDataResourceDep_alignedCountScaleIn(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: func(s *terraform.State) error {
|
||||
return nil
|
||||
},
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testDataResourceDepConfig(4),
|
||||
},
|
||||
{
|
||||
Config: testDataResourceDepConfig(2),
|
||||
Check: resource.TestCheckOutput("out", "test,test"),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestResourceResourceDep_alignedCountScaleOut functions like
|
||||
// TestResourceDataDep_alignedCountScaleOut, but with a resource-to-resource
|
||||
// dependency instead, a pretty regular use case, but included here to check
|
||||
// for regressions.
|
||||
func TestResourceResourceDep_alignedCountScaleOut(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: func(s *terraform.State) error {
|
||||
return nil
|
||||
},
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testResourceResourceDepConfig(2),
|
||||
},
|
||||
{
|
||||
Config: testResourceResourceDepConfig(4),
|
||||
Check: resource.TestCheckOutput("out", "test,test,test,test"),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestResourceResourceDep_alignedCountScaleIn functions like
|
||||
// TestResourceDataDep_alignedCountScaleIn, but with a resource-to-resource
|
||||
// dependency instead, a pretty regular use case, but included here to check
|
||||
// for regressions.
|
||||
func TestResourceResourceDep_alignedCountScaleIn(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: func(s *terraform.State) error {
|
||||
return nil
|
||||
},
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testResourceResourceDepConfig(4),
|
||||
},
|
||||
{
|
||||
Config: testResourceResourceDepConfig(2),
|
||||
Check: resource.TestCheckOutput("out", "test,test"),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testResourceDataDepConfig(count int) string {
|
||||
return fmt.Sprintf(`
|
||||
variable num {
|
||||
default = "%d"
|
||||
}
|
||||
|
||||
resource "test_resource" "foo" {
|
||||
count = "${var.num}"
|
||||
required = "yes"
|
||||
|
||||
required_map = {
|
||||
"foo" = "bar"
|
||||
}
|
||||
}
|
||||
|
||||
data "test_data_source" "bar" {
|
||||
count = "${var.num}"
|
||||
input = "${test_resource.foo.*.computed_read_only[count.index]}"
|
||||
}
|
||||
|
||||
output "out" {
|
||||
value = "${join(",", data.test_data_source.bar.*.output)}"
|
||||
}
|
||||
`, count)
|
||||
}
|
||||
|
||||
func testDataResourceDepConfig(count int) string {
|
||||
return fmt.Sprintf(`
|
||||
variable num {
|
||||
default = "%d"
|
||||
}
|
||||
|
||||
data "test_data_source" "foo" {
|
||||
count = "${var.num}"
|
||||
input = "test"
|
||||
}
|
||||
|
||||
resource "test_resource" "bar" {
|
||||
count = "${var.num}"
|
||||
required = "yes"
|
||||
optional = "${data.test_data_source.foo.*.output[count.index]}"
|
||||
|
||||
required_map = {
|
||||
"foo" = "bar"
|
||||
}
|
||||
}
|
||||
|
||||
output "out" {
|
||||
value = "${join(",", test_resource.bar.*.optional)}"
|
||||
}
|
||||
`, count)
|
||||
}
|
||||
|
||||
func testResourceResourceDepConfig(count int) string {
|
||||
return fmt.Sprintf(`
|
||||
variable num {
|
||||
default = "%d"
|
||||
}
|
||||
|
||||
resource "test_resource" "foo" {
|
||||
count = "${var.num}"
|
||||
required = "yes"
|
||||
optional = "test"
|
||||
|
||||
required_map = {
|
||||
"foo" = "bar"
|
||||
}
|
||||
}
|
||||
|
||||
resource "test_resource" "bar" {
|
||||
count = "${var.num}"
|
||||
required = "yes"
|
||||
optional = "${test_resource.foo.*.optional[count.index]}"
|
||||
|
||||
required_map = {
|
||||
"foo" = "bar"
|
||||
}
|
||||
}
|
||||
|
||||
output "out" {
|
||||
value = "${join(",", test_resource.bar.*.optional)}"
|
||||
}
|
||||
`, count)
|
||||
}
|
|
@ -1,491 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
var dataprocClusterSchema = map[string]*schema.Schema{
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"project": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"region": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "global",
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"labels": {
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
// GCP automatically adds two labels
|
||||
// 'goog-dataproc-cluster-uuid'
|
||||
// 'goog-dataproc-cluster-name'
|
||||
Computed: true,
|
||||
DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool {
|
||||
if old != "" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
},
|
||||
|
||||
"tag_set": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"cluster_config": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
|
||||
"delete_autogen_bucket": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
Removed: "If you need a bucket that can be deleted, please create" +
|
||||
"a new one and set the `staging_bucket` field",
|
||||
},
|
||||
|
||||
"staging_bucket": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"bucket": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"gce_cluster_config": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
|
||||
"zone": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"network": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
ConflictsWith: []string{"cluster_config.0.gce_cluster_config.0.subnetwork"},
|
||||
},
|
||||
|
||||
"subnetwork": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ConflictsWith: []string{"cluster_config.0.gce_cluster_config.0.network"},
|
||||
},
|
||||
|
||||
"tags": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
|
||||
"service_account": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"service_account_scopes": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
|
||||
"internal_ip_only": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"metadata": {
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"master_config": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"num_instances": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"image_uri": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"machine_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"disk_config": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
MaxItems: 1,
|
||||
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"num_local_ssds": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"boot_disk_size_gb": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"boot_disk_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Default: "pd-standard",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"accelerators": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"accelerator_type": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"accelerator_count": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"instance_names": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"preemptible_worker_config": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"num_instances": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_config": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
MaxItems: 1,
|
||||
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"num_local_ssds": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"boot_disk_size_gb": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"boot_disk_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Default: "pd-standard",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"instance_names": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"software_config": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
MaxItems: 1,
|
||||
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"image_version": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"override_properties": {
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
|
||||
"properties": {
|
||||
Type: schema.TypeMap,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"initialization_action": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"script": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"timeout_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 300,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"encryption_config": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"kms_key_name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestDiffApply_dataprocCluster(t *testing.T) {
|
||||
priorAttrs := map[string]string{
|
||||
"cluster_config.#": "1",
|
||||
"cluster_config.0.bucket": "dataproc-1dc18cb2-116e-4e92-85ea-ff63a1bf2745-us-central1",
|
||||
"cluster_config.0.delete_autogen_bucket": "false",
|
||||
"cluster_config.0.encryption_config.#": "0",
|
||||
"cluster_config.0.gce_cluster_config.#": "1",
|
||||
"cluster_config.0.gce_cluster_config.0.internal_ip_only": "false",
|
||||
"cluster_config.0.gce_cluster_config.0.metadata.%": "0",
|
||||
"cluster_config.0.gce_cluster_config.0.network": "https://www.googleapis.com/compute/v1/projects/hc-terraform-testing/global/networks/default",
|
||||
"cluster_config.0.gce_cluster_config.0.service_account": "",
|
||||
"cluster_config.0.gce_cluster_config.0.service_account_scopes.#": "7",
|
||||
"cluster_config.0.gce_cluster_config.0.service_account_scopes.1245378569": "https://www.googleapis.com/auth/bigtable.admin.table",
|
||||
"cluster_config.0.gce_cluster_config.0.service_account_scopes.1328717722": "https://www.googleapis.com/auth/devstorage.read_write",
|
||||
"cluster_config.0.gce_cluster_config.0.service_account_scopes.1693978638": "https://www.googleapis.com/auth/devstorage.full_control",
|
||||
"cluster_config.0.gce_cluster_config.0.service_account_scopes.172152165": "https://www.googleapis.com/auth/logging.write",
|
||||
"cluster_config.0.gce_cluster_config.0.service_account_scopes.2401844655": "https://www.googleapis.com/auth/bigquery",
|
||||
"cluster_config.0.gce_cluster_config.0.service_account_scopes.299921284": "https://www.googleapis.com/auth/bigtable.data",
|
||||
"cluster_config.0.gce_cluster_config.0.service_account_scopes.3804780973": "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
|
||||
"cluster_config.0.gce_cluster_config.0.subnetwork": "",
|
||||
"cluster_config.0.gce_cluster_config.0.tags.#": "0",
|
||||
"cluster_config.0.gce_cluster_config.0.zone": "us-central1-f",
|
||||
"cluster_config.0.initialization_action.#": "0",
|
||||
"cluster_config.0.master_config.#": "1",
|
||||
"cluster_config.0.master_config.0.accelerators.#": "0",
|
||||
"cluster_config.0.master_config.0.disk_config.#": "1",
|
||||
"cluster_config.0.master_config.0.disk_config.0.boot_disk_size_gb": "500",
|
||||
"cluster_config.0.master_config.0.disk_config.0.boot_disk_type": "pd-standard",
|
||||
"cluster_config.0.master_config.0.disk_config.0.num_local_ssds": "0",
|
||||
"cluster_config.0.master_config.0.image_uri": "https://www.googleapis.com/compute/v1/projects/cloud-dataproc/global/images/dataproc-1-3-deb9-20190228-000000-rc01",
|
||||
"cluster_config.0.master_config.0.instance_names.#": "1",
|
||||
"cluster_config.0.master_config.0.instance_names.0": "dproc-cluster-test-2ww3c60iww-m",
|
||||
"cluster_config.0.master_config.0.machine_type": "n1-standard-4",
|
||||
"cluster_config.0.master_config.0.num_instances": "1",
|
||||
"cluster_config.0.preemptible_worker_config.#": "1",
|
||||
"cluster_config.0.preemptible_worker_config.0.disk_config.#": "1",
|
||||
"cluster_config.0.preemptible_worker_config.0.instance_names.#": "0",
|
||||
"cluster_config.0.preemptible_worker_config.0.num_instances": "0",
|
||||
"cluster_config.0.software_config.#": "1",
|
||||
"cluster_config.0.software_config.0.image_version": "1.3.28-deb9",
|
||||
"cluster_config.0.software_config.0.override_properties.%": "0",
|
||||
"cluster_config.0.software_config.0.properties.%": "14",
|
||||
"cluster_config.0.software_config.0.properties.capacity-scheduler:yarn.scheduler.capacity.root.default.ordering-policy": "fair",
|
||||
"cluster_config.0.software_config.0.properties.core:fs.gs.block.size": "134217728",
|
||||
"cluster_config.0.software_config.0.properties.core:fs.gs.metadata.cache.enable": "false",
|
||||
"cluster_config.0.software_config.0.properties.core:hadoop.ssl.enabled.protocols": "TLSv1,TLSv1.1,TLSv1.2",
|
||||
"cluster_config.0.software_config.0.properties.distcp:mapreduce.map.java.opts": "-Xmx768m",
|
||||
"cluster_config.0.software_config.0.properties.distcp:mapreduce.map.memory.mb": "1024",
|
||||
"cluster_config.0.software_config.0.properties.distcp:mapreduce.reduce.java.opts": "-Xmx768m",
|
||||
"cluster_config.0.software_config.0.properties.distcp:mapreduce.reduce.memory.mb": "1024",
|
||||
"cluster_config.0.software_config.0.properties.hdfs:dfs.datanode.address": "0.0.0.0:9866",
|
||||
"cluster_config.0.software_config.0.properties.hdfs:dfs.datanode.http.address": "0.0.0.0:9864",
|
||||
"cluster_config.0.software_config.0.properties.hdfs:dfs.datanode.https.address": "0.0.0.0:9865",
|
||||
"cluster_config.0.software_config.0.properties.hdfs:dfs.datanode.ipc.address": "0.0.0.0:9867",
|
||||
"cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.handler.count": "20",
|
||||
"cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.http-address": "0.0.0.0:9870",
|
||||
"cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.https-address": "0.0.0.0:9871",
|
||||
"cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.lifeline.rpc-address": "dproc-cluster-test-2ww3c60iww-m:8050",
|
||||
"cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.secondary.http-address": "0.0.0.0:9868",
|
||||
"cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.secondary.https-address": "0.0.0.0:9869",
|
||||
"cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.service.handler.count": "10",
|
||||
"cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.servicerpc-address": "dproc-cluster-test-2ww3c60iww-m:8051",
|
||||
"cluster_config.0.software_config.0.properties.mapred-env:HADOOP_JOB_HISTORYSERVER_HEAPSIZE": "3840",
|
||||
"cluster_config.0.software_config.0.properties.mapred:mapreduce.job.maps": "21",
|
||||
"cluster_config.0.software_config.0.properties.mapred:mapreduce.job.reduce.slowstart.completedmaps": "0.95",
|
||||
"cluster_config.0.software_config.0.properties.mapred:mapreduce.job.reduces": "7",
|
||||
"cluster_config.0.software_config.0.properties.mapred:mapreduce.map.cpu.vcores": "1",
|
||||
"cluster_config.0.software_config.0.properties.mapred:mapreduce.map.java.opts": "-Xmx2457m",
|
||||
"cluster_config.0.software_config.0.properties.mapred:mapreduce.map.memory.mb": "3072",
|
||||
"cluster_config.0.software_config.0.properties.mapred:mapreduce.reduce.cpu.vcores": "1",
|
||||
"cluster_config.0.software_config.0.properties.mapred:mapreduce.reduce.java.opts": "-Xmx2457m",
|
||||
"cluster_config.0.software_config.0.properties.mapred:mapreduce.reduce.memory.mb": "3072",
|
||||
"cluster_config.0.software_config.0.properties.mapred:mapreduce.task.io.sort.mb": "256",
|
||||
"cluster_config.0.software_config.0.properties.mapred:yarn.app.mapreduce.am.command-opts": "-Xmx2457m",
|
||||
"cluster_config.0.software_config.0.properties.mapred:yarn.app.mapreduce.am.resource.cpu-vcores": "1",
|
||||
"cluster_config.0.software_config.0.properties.mapred:yarn.app.mapreduce.am.resource.mb": "3072",
|
||||
"cluster_config.0.software_config.0.properties.presto-jvm:MaxHeapSize": "12288m",
|
||||
"cluster_config.0.software_config.0.properties.presto:query.max-memory-per-node": "7372MB",
|
||||
"cluster_config.0.software_config.0.properties.presto:query.max-total-memory-per-node": "7372MB",
|
||||
"cluster_config.0.software_config.0.properties.spark-env:SPARK_DAEMON_MEMORY": "3840m",
|
||||
"cluster_config.0.software_config.0.properties.spark:spark.driver.maxResultSize": "1920m",
|
||||
"cluster_config.0.software_config.0.properties.spark:spark.driver.memory": "3840m",
|
||||
"cluster_config.0.software_config.0.properties.spark:spark.executor.cores": "2",
|
||||
"cluster_config.0.software_config.0.properties.spark:spark.executor.instances": "2",
|
||||
"cluster_config.0.software_config.0.properties.spark:spark.executor.memory": "5586m",
|
||||
"cluster_config.0.software_config.0.properties.spark:spark.executorEnv.OPENBLAS_NUM_THREADS": "1",
|
||||
"cluster_config.0.software_config.0.properties.spark:spark.scheduler.mode": "FAIR",
|
||||
"cluster_config.0.software_config.0.properties.spark:spark.sql.cbo.enabled": "true",
|
||||
"cluster_config.0.software_config.0.properties.spark:spark.yarn.am.memory": "640m",
|
||||
"cluster_config.0.software_config.0.properties.yarn-env:YARN_TIMELINESERVER_HEAPSIZE": "3840",
|
||||
"cluster_config.0.software_config.0.properties.yarn:yarn.nodemanager.resource.memory-mb": "12288",
|
||||
"cluster_config.0.software_config.0.properties.yarn:yarn.resourcemanager.nodemanager-graceful-decommission-timeout-secs": "86400",
|
||||
"cluster_config.0.software_config.0.properties.yarn:yarn.scheduler.maximum-allocation-mb": "12288",
|
||||
"cluster_config.0.software_config.0.properties.yarn:yarn.scheduler.minimum-allocation-mb": "1024",
|
||||
"cluster_config.0.staging_bucket": "",
|
||||
"id": "dproc-cluster-test-ktbyrniu4e",
|
||||
"labels.%": "4",
|
||||
"labels.goog-dataproc-cluster-name": "dproc-cluster-test-ktbyrniu4e",
|
||||
"labels.goog-dataproc-cluster-uuid": "d576c4e0-8fda-4ad1-abf5-ec951ab25855",
|
||||
"labels.goog-dataproc-location": "us-central1",
|
||||
"labels.key1": "value1",
|
||||
"tag_set.#": "0",
|
||||
}
|
||||
|
||||
diff := &terraform.InstanceDiff{
|
||||
Attributes: map[string]*terraform.ResourceAttrDiff{
|
||||
"labels.%": &terraform.ResourceAttrDiff{Old: "4", New: "1", NewComputed: false, NewRemoved: false, NewExtra: interface{}(nil), RequiresNew: false, Sensitive: false, Type: 0x0},
|
||||
"labels.goog-dataproc-cluster-name": &terraform.ResourceAttrDiff{Old: "dproc-cluster-test-ktbyrniu4e", New: "", NewComputed: false, NewRemoved: true, NewExtra: interface{}(nil), RequiresNew: false, Sensitive: false, Type: 0x0},
|
||||
"labels.goog-dataproc-cluster-uuid": &terraform.ResourceAttrDiff{Old: "d576c4e0-8fda-4ad1-abf5-ec951ab25855", New: "", NewComputed: false, NewRemoved: true, NewExtra: interface{}(nil), RequiresNew: false, Sensitive: false, Type: 0x0},
|
||||
"labels.goog-dataproc-location": &terraform.ResourceAttrDiff{Old: "us-central1", New: "", NewComputed: false, NewRemoved: true, NewExtra: interface{}(nil), RequiresNew: false, Sensitive: false, Type: 0x0},
|
||||
},
|
||||
}
|
||||
|
||||
newAttrs, err := diff.Apply(priorAttrs, (&schema.Resource{Schema: dataprocClusterSchema}).CoreConfigSchema())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// the diff'ed labale elements should be removed
|
||||
delete(priorAttrs, "labels.goog-dataproc-cluster-name")
|
||||
delete(priorAttrs, "labels.goog-dataproc-cluster-uuid")
|
||||
delete(priorAttrs, "labels.goog-dataproc-location")
|
||||
priorAttrs["labels.%"] = "1"
|
||||
|
||||
// the missing required "name" should be added
|
||||
priorAttrs["name"] = ""
|
||||
|
||||
if !reflect.DeepEqual(priorAttrs, newAttrs) {
|
||||
t.Fatal(cmp.Diff(priorAttrs, newAttrs))
|
||||
}
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func testResourceDefaults() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: testResourceDefaultsCreate,
|
||||
Read: testResourceDefaultsRead,
|
||||
Delete: testResourceDefaultsDelete,
|
||||
Update: testResourceDefaultsUpdate,
|
||||
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"default_string": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "default string",
|
||||
},
|
||||
"default_bool": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: true,
|
||||
},
|
||||
"nested": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"string": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "default nested",
|
||||
},
|
||||
"optional": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testResourceDefaultsCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
d.SetId(fmt.Sprintf("%x", rand.Int63()))
|
||||
return testResourceDefaultsRead(d, meta)
|
||||
}
|
||||
|
||||
func testResourceDefaultsUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
return testResourceDefaultsRead(d, meta)
|
||||
}
|
||||
|
||||
func testResourceDefaultsRead(d *schema.ResourceData, meta interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func testResourceDefaultsDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
|
@ -1,168 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestResourceDefaults_basic(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckResourceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_defaults" "foo" {
|
||||
}
|
||||
`),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_defaults.foo", "default_string", "default string",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_defaults.foo", "default_bool", "1",
|
||||
),
|
||||
resource.TestCheckNoResourceAttr(
|
||||
"test_resource_defaults.foo", "nested.#",
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestResourceDefaults_change(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckResourceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_defaults" "foo" {
|
||||
}
|
||||
`),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_defaults.foo", "default_string", "default string",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_defaults.foo", "default_bool", "1",
|
||||
),
|
||||
resource.TestCheckNoResourceAttr(
|
||||
"test_resource_defaults.foo", "nested.#",
|
||||
),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_defaults" "foo" {
|
||||
default_string = "new"
|
||||
default_bool = false
|
||||
nested {
|
||||
optional = "nested"
|
||||
}
|
||||
}
|
||||
`),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_defaults.foo", "default_string", "new",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_defaults.foo", "default_bool", "false",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_defaults.foo", "nested.#", "1",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_defaults.foo", "nested.2950978312.optional", "nested",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_defaults.foo", "nested.2950978312.string", "default nested",
|
||||
),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_defaults" "foo" {
|
||||
default_string = "new"
|
||||
default_bool = false
|
||||
nested {
|
||||
optional = "nested"
|
||||
string = "new"
|
||||
}
|
||||
}
|
||||
`),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_defaults.foo", "default_string", "new",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_defaults.foo", "default_bool", "false",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_defaults.foo", "nested.#", "1",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_defaults.foo", "nested.782850362.optional", "nested",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_defaults.foo", "nested.782850362.string", "new",
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestResourceDefaults_inSet(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckResourceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_defaults" "foo" {
|
||||
nested {
|
||||
optional = "val"
|
||||
}
|
||||
}
|
||||
`),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_defaults.foo", "default_string", "default string",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_defaults.foo", "default_bool", "1",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_defaults.foo", "nested.2826070548.optional", "val",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"test_resource_defaults.foo", "nested.2826070548.string", "default nested",
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestDefaults_emptyString(t *testing.T) {
|
||||
config := `
|
||||
resource "test_resource_defaults" "test" {
|
||||
default_string = ""
|
||||
}
|
||||
`
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: config,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr("test_resource_defaults.test", "default_string", ""),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
|
@ -1,119 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func testResourceDeprecated() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: testResourceDeprecatedCreate,
|
||||
Read: testResourceDeprecatedRead,
|
||||
Update: testResourceDeprecatedUpdate,
|
||||
Delete: testResourceDeprecatedDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"map_deprecated": {
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
Deprecated: "deprecated",
|
||||
},
|
||||
"map_removed": {
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
Removed: "removed",
|
||||
},
|
||||
"set_block_deprecated": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
MaxItems: 1,
|
||||
Deprecated: "deprecated",
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"value": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Deprecated: "deprecated",
|
||||
},
|
||||
"optional": {
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
Deprecated: "deprecated",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"set_block_removed": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
MaxItems: 1,
|
||||
Removed: "Removed",
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"optional": {
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Removed: "removed",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"list_block_deprecated": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Deprecated: "deprecated",
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"value": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Deprecated: "deprecated",
|
||||
},
|
||||
"optional": {
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
Deprecated: "deprecated",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"list_block_removed": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Removed: "removed",
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"optional": {
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
Removed: "removed",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testResourceDeprecatedCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
d.SetId("testId")
|
||||
return nil
|
||||
}
|
||||
|
||||
func testResourceDeprecatedRead(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testResourceDeprecatedUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func testResourceDeprecatedDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
|
@ -1,71 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
// an empty config should be ok, because no deprecated/removed fields are set.
|
||||
func TestResourceDeprecated_empty(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckResourceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_deprecated" "foo" {
|
||||
}
|
||||
`),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Deprecated fields should still work
|
||||
func TestResourceDeprecated_deprecatedOK(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckResourceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_deprecated" "foo" {
|
||||
map_deprecated = {
|
||||
"a" = "b",
|
||||
}
|
||||
set_block_deprecated {
|
||||
value = "1"
|
||||
}
|
||||
list_block_deprecated {
|
||||
value = "2"
|
||||
}
|
||||
}
|
||||
`),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Declaring an empty block should trigger the error
|
||||
func TestResourceDeprecated_removedBlocks(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckResourceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_deprecated" "foo" {
|
||||
set_block_removed {
|
||||
}
|
||||
list_block_removed {
|
||||
}
|
||||
}
|
||||
`),
|
||||
ExpectError: regexp.MustCompile("REMOVED"),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
|
@ -1,104 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func testResourceDiffSuppress() *schema.Resource {
|
||||
diffSuppress := func(k, old, new string, d *schema.ResourceData) bool {
|
||||
if old == "" || strings.Contains(new, "replace") {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return &schema.Resource{
|
||||
Create: testResourceDiffSuppressCreate,
|
||||
Read: testResourceDiffSuppressRead,
|
||||
Delete: testResourceDiffSuppressDelete,
|
||||
Update: testResourceDiffSuppressUpdate,
|
||||
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"optional": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"val_to_upper": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
StateFunc: func(val interface{}) string {
|
||||
return strings.ToUpper(val.(string))
|
||||
},
|
||||
DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool {
|
||||
return strings.ToUpper(old) == strings.ToUpper(new)
|
||||
},
|
||||
},
|
||||
"network": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "default",
|
||||
ForceNew: true,
|
||||
DiffSuppressFunc: diffSuppress,
|
||||
},
|
||||
"subnetwork": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
DiffSuppressFunc: diffSuppress,
|
||||
},
|
||||
|
||||
"node_pool": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testResourceDiffSuppressCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
d.Set("network", "modified")
|
||||
d.Set("subnetwork", "modified")
|
||||
|
||||
if _, ok := d.GetOk("node_pool"); !ok {
|
||||
d.Set("node_pool", []string{})
|
||||
}
|
||||
|
||||
id := fmt.Sprintf("%x", rand.Int63())
|
||||
d.SetId(id)
|
||||
return nil
|
||||
}
|
||||
|
||||
func testResourceDiffSuppressRead(d *schema.ResourceData, meta interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func testResourceDiffSuppressUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func testResourceDiffSuppressDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
|
@ -1,126 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/addrs"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestResourceDiffSuppress_create(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckResourceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_diff_suppress" "foo" {
|
||||
val_to_upper = "foo"
|
||||
}
|
||||
`),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
func TestResourceDiffSuppress_update(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckResourceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_diff_suppress" "foo" {
|
||||
val_to_upper = "foo"
|
||||
}
|
||||
`),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_diff_suppress" "foo" {
|
||||
val_to_upper = "bar"
|
||||
optional = "more"
|
||||
}
|
||||
`),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestResourceDiffSuppress_updateIgnoreChanges(t *testing.T) {
|
||||
// None of these steps should replace the instance
|
||||
id := ""
|
||||
checkFunc := func(s *terraform.State) error {
|
||||
root := s.ModuleByPath(addrs.RootModuleInstance)
|
||||
res := root.Resources["test_resource_diff_suppress.foo"]
|
||||
if id != "" && res.Primary.ID != id {
|
||||
return errors.New("expected no resource replacement")
|
||||
}
|
||||
id = res.Primary.ID
|
||||
return nil
|
||||
}
|
||||
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckResourceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_diff_suppress" "foo" {
|
||||
val_to_upper = "foo"
|
||||
|
||||
network = "foo"
|
||||
subnetwork = "foo"
|
||||
|
||||
node_pool {
|
||||
name = "default-pool"
|
||||
}
|
||||
lifecycle {
|
||||
ignore_changes = ["node_pool"]
|
||||
}
|
||||
}
|
||||
`),
|
||||
Check: checkFunc,
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_diff_suppress" "foo" {
|
||||
val_to_upper = "foo"
|
||||
|
||||
network = "ignored"
|
||||
subnetwork = "ignored"
|
||||
|
||||
node_pool {
|
||||
name = "default-pool"
|
||||
}
|
||||
lifecycle {
|
||||
ignore_changes = ["node_pool"]
|
||||
}
|
||||
}
|
||||
`),
|
||||
Check: checkFunc,
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_diff_suppress" "foo" {
|
||||
val_to_upper = "foo"
|
||||
|
||||
network = "ignored"
|
||||
subnetwork = "ignored"
|
||||
|
||||
node_pool {
|
||||
name = "ignored"
|
||||
}
|
||||
lifecycle {
|
||||
ignore_changes = ["node_pool"]
|
||||
}
|
||||
}
|
||||
`),
|
||||
Check: checkFunc,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
|
@ -1,39 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func testResourceForceNew() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: testResourceForceNewCreate,
|
||||
Read: testResourceForceNewRead,
|
||||
Delete: testResourceForceNewDelete,
|
||||
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"triggers": {
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testResourceForceNewCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
d.SetId("testId")
|
||||
return testResourceForceNewRead(d, meta)
|
||||
}
|
||||
|
||||
func testResourceForceNewRead(d *schema.ResourceData, meta interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func testResourceForceNewDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
|
@ -1,79 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestResourceForceNew_create(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckResourceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_force_new" "foo" {
|
||||
triggers = {
|
||||
"a" = "foo"
|
||||
}
|
||||
}`),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
func TestResourceForceNew_update(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckResourceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_force_new" "foo" {
|
||||
triggers = {
|
||||
"a" = "foo"
|
||||
}
|
||||
}`),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_force_new" "foo" {
|
||||
triggers = {
|
||||
"a" = "bar"
|
||||
}
|
||||
}`),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_force_new" "foo" {
|
||||
triggers = {
|
||||
"b" = "bar"
|
||||
}
|
||||
}`),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestResourceForceNew_remove(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckResourceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_force_new" "foo" {
|
||||
triggers = {
|
||||
"a" = "bar"
|
||||
}
|
||||
}`),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_force_new" "foo" {
|
||||
} `),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
|
@ -1,64 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
// This is a test resource to help reproduce GH-12183. This issue came up
|
||||
// as a complex mixing of core + helper/schema and while we added core tests
|
||||
// to cover some of the cases, this test helps top it off with an end-to-end
|
||||
// test.
|
||||
func testResourceGH12183() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: testResourceCreate_gh12183,
|
||||
Read: testResourceRead_gh12183,
|
||||
Update: testResourceUpdate_gh12183,
|
||||
Delete: testResourceDelete_gh12183,
|
||||
Schema: map[string]*schema.Schema{
|
||||
"key": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"config": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
MinItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"rules": {
|
||||
Type: schema.TypeSet,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testResourceCreate_gh12183(d *schema.ResourceData, meta interface{}) error {
|
||||
d.SetId("testId")
|
||||
return testResourceRead_gh12183(d, meta)
|
||||
}
|
||||
|
||||
func testResourceRead_gh12183(d *schema.ResourceData, meta interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func testResourceUpdate_gh12183(d *schema.ResourceData, meta interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func testResourceDelete_gh12183(d *schema.ResourceData, meta interface{}) error {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
// Tests GH-12183. This would previously cause a crash. More granular
|
||||
// unit tests are scattered through helper/schema and terraform core for
|
||||
// this.
|
||||
func TestResourceGH12183_basic(t *testing.T) {
|
||||
resource.UnitTest(t, resource.TestCase{
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckResourceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: strings.TrimSpace(`
|
||||
resource "test_resource_gh12183" "a" {
|
||||
config {
|
||||
name = "hello"
|
||||
}
|
||||
}
|
||||
|
||||
resource "test_resource_gh12183" "b" {
|
||||
key = "${lookup(test_resource_gh12183.a.config[0], "name")}"
|
||||
config {
|
||||
name = "required"
|
||||
}
|
||||
}
|
||||
`),
|
||||
Check: func(s *terraform.State) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
|
@ -1,192 +0,0 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func testResourceList() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: testResourceListCreate,
|
||||
Read: testResourceListRead,
|
||||
Update: testResourceListUpdate,
|
||||
Delete: testResourceListDelete,
|
||||
|
||||
CustomizeDiff: func(d *schema.ResourceDiff, _ interface{}) error {
|
||||
if d.HasChange("dependent_list") {
|
||||
d.SetNewComputed("computed_list")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"list_block": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"string": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"int": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"force_new": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"sublist": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"sublist_block": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"string": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"int": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"sublist_block_optional": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"list": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"dependent_list": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"val": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"computed_list": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
"min_items": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
MinItems: 2,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"val": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"never_set": {
|
||||
Type: schema.TypeList,
|
||||
MaxItems: 1,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"sublist": {
|
||||
Type: schema.TypeList,
|
||||
MaxItems: 1,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"bool": {
|
||||
Type: schema.TypeBool,
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
},
|
||||
"string": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"map_list": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeMap},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testResourceListCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
d.SetId("testId")
|
||||
return testResourceListRead(d, meta)
|
||||
}
|
||||
|
||||
func testResourceListRead(d *schema.ResourceData, meta interface{}) error {
|
||||
fixedIps := d.Get("dependent_list")
|
||||
|
||||
// all_fixed_ips should be set as computed with a CustomizeDiff func, but
|
||||
// we're trying to emulate legacy provider behavior, and updating a
|
||||
// computed field was a common case.
|
||||
ips := []interface{}{}
|
||||
if fixedIps != nil {
|
||||
for _, v := range fixedIps.([]interface{}) {
|
||||
m := v.(map[string]interface{})
|
||||
ips = append(ips, m["val"])
|
||||
}
|
||||
}
|
||||
if err := d.Set("computed_list", ips); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// "computing" these values should insert empty containers into the
|
||||
// never_set block.
|
||||
values := make(map[string]interface{})
|
||||
values["sublist"] = []interface{}{}
|
||||
d.Set("never_set", []interface{}{values})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testResourceListUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
block := d.Get("never_set").([]interface{})
|
||||
if len(block) > 0 {
|
||||
// if profiles contains any values, they should not be nil
|
||||
_ = block[0].(map[string]interface{})
|
||||
}
|
||||
return testResourceListRead(d, meta)
|
||||
}
|
||||
|
||||
func testResourceListDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue